summaryrefslogtreecommitdiff
path: root/drivers/net/team
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/team')
-rw-r--r--drivers/net/team/team.c86
-rw-r--r--drivers/net/team/team_mode_loadbalance.c3
-rw-r--r--drivers/net/team/team_mode_roundrobin.c3
3 files changed, 56 insertions, 36 deletions
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index b3051052f3ad..bff7e0b0b4e7 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -525,31 +525,26 @@ static void team_set_no_mode(struct team *team)
team->mode = &__team_no_mode;
}
-static void __team_adjust_ops(struct team *team, int en_port_count)
+static void team_adjust_ops(struct team *team)
{
/*
* To avoid checks in rx/tx skb paths, ensure here that non-null and
* correct ops are always set.
*/
- if (!en_port_count || !team_is_mode_set(team) ||
+ if (!team->en_port_count || !team_is_mode_set(team) ||
!team->mode->ops->transmit)
team->ops.transmit = team_dummy_transmit;
else
team->ops.transmit = team->mode->ops->transmit;
- if (!en_port_count || !team_is_mode_set(team) ||
+ if (!team->en_port_count || !team_is_mode_set(team) ||
!team->mode->ops->receive)
team->ops.receive = team_dummy_receive;
else
team->ops.receive = team->mode->ops->receive;
}
-static void team_adjust_ops(struct team *team)
-{
- __team_adjust_ops(team, team->en_port_count);
-}
-
/*
* We can benefit from the fact that it's ensured no port is present
* at the time of mode change. Therefore no packets are in fly so there's no
@@ -725,9 +720,9 @@ static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
static void __team_queue_override_port_del(struct team *team,
struct team_port *port)
{
+ if (!port->queue_id)
+ return;
list_del_rcu(&port->qom_list);
- synchronize_rcu();
- INIT_LIST_HEAD(&port->qom_list);
}
static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
@@ -749,9 +744,8 @@ static void __team_queue_override_port_add(struct team *team,
struct list_head *qom_list;
struct list_head *node;
- if (!port->queue_id || !team_port_enabled(port))
+ if (!port->queue_id)
return;
-
qom_list = __team_get_qom_list(team, port->queue_id);
node = qom_list;
list_for_each_entry(cur, qom_list, qom_list) {
@@ -768,7 +762,7 @@ static void __team_queue_override_enabled_check(struct team *team)
bool enabled = false;
list_for_each_entry(port, &team->port_list, list) {
- if (!list_empty(&port->qom_list)) {
+ if (port->queue_id) {
enabled = true;
break;
}
@@ -780,14 +774,44 @@ static void __team_queue_override_enabled_check(struct team *team)
team->queue_override_enabled = enabled;
}
-static void team_queue_override_port_refresh(struct team *team,
- struct team_port *port)
+static void team_queue_override_port_prio_changed(struct team *team,
+ struct team_port *port)
{
+ if (!port->queue_id || team_port_enabled(port))
+ return;
__team_queue_override_port_del(team, port);
__team_queue_override_port_add(team, port);
__team_queue_override_enabled_check(team);
}
+static void team_queue_override_port_change_queue_id(struct team *team,
+ struct team_port *port,
+ u16 new_queue_id)
+{
+ if (team_port_enabled(port)) {
+ __team_queue_override_port_del(team, port);
+ port->queue_id = new_queue_id;
+ __team_queue_override_port_add(team, port);
+ __team_queue_override_enabled_check(team);
+ } else {
+ port->queue_id = new_queue_id;
+ }
+}
+
+static void team_queue_override_port_add(struct team *team,
+ struct team_port *port)
+{
+ __team_queue_override_port_add(team, port);
+ __team_queue_override_enabled_check(team);
+}
+
+static void team_queue_override_port_del(struct team *team,
+ struct team_port *port)
+{
+ __team_queue_override_port_del(team, port);
+ __team_queue_override_enabled_check(team);
+}
+
/****************
* Port handling
@@ -819,7 +843,7 @@ static void team_port_enable(struct team *team,
hlist_add_head_rcu(&port->hlist,
team_port_index_hash(team, port->index));
team_adjust_ops(team);
- team_queue_override_port_refresh(team, port);
+ team_queue_override_port_add(team, port);
if (team->ops.port_enabled)
team->ops.port_enabled(team, port);
}
@@ -848,14 +872,9 @@ static void team_port_disable(struct team *team,
hlist_del_rcu(&port->hlist);
__reconstruct_port_hlist(team, port->index);
port->index = -1;
- team_queue_override_port_refresh(team, port);
- __team_adjust_ops(team, team->en_port_count - 1);
- /*
- * Wait until readers see adjusted ops. This ensures that
- * readers never see team->en_port_count == 0
- */
- synchronize_rcu();
team->en_port_count--;
+ team_queue_override_port_del(team, port);
+ team_adjust_ops(team);
}
#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -1163,8 +1182,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
team_port_set_orig_dev_addr(port);
dev_set_mtu(port_dev, port->orig.mtu);
- synchronize_rcu();
- kfree(port);
+ kfree_rcu(port, rcu);
netdev_info(dev, "Port device %s removed\n", portname);
__team_compute_features(team);
@@ -1259,9 +1277,12 @@ static int team_priority_option_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
struct team_port *port = ctx->info->port;
+ s32 priority = ctx->data.s32_val;
- port->priority = ctx->data.s32_val;
- team_queue_override_port_refresh(team, port);
+ if (port->priority == priority)
+ return 0;
+ port->priority = priority;
+ team_queue_override_port_prio_changed(team, port);
return 0;
}
@@ -1278,17 +1299,16 @@ static int team_queue_id_option_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
struct team_port *port = ctx->info->port;
+ u16 new_queue_id = ctx->data.u32_val;
- if (port->queue_id == ctx->data.u32_val)
+ if (port->queue_id == new_queue_id)
return 0;
- if (ctx->data.u32_val >= team->dev->real_num_tx_queues)
+ if (new_queue_id >= team->dev->real_num_tx_queues)
return -EINVAL;
- port->queue_id = ctx->data.u32_val;
- team_queue_override_port_refresh(team, port);
+ team_queue_override_port_change_queue_id(team, port, new_queue_id);
return 0;
}
-
static const struct team_option team_options[] = {
{
.name = "mode",
@@ -2648,7 +2668,7 @@ static void team_port_change_check(struct team_port *port, bool linkup)
static int team_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
- struct net_device *dev = (struct net_device *) ptr;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct team_port *port;
port = team_port_get_rtnl(dev);
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index cdc31b5ea15e..829a9cd2b4da 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -112,9 +112,8 @@ static struct team_port *lb_hash_select_tx_port(struct team *team,
struct sk_buff *skb,
unsigned char hash)
{
- int port_index;
+ int port_index = team_num_to_port_index(team, hash);
- port_index = hash % team->en_port_count;
return team_get_port_by_index_rcu(team, port_index);
}
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index 472623f8ce3d..53665850b59e 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -30,7 +30,8 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
struct team_port *port;
int port_index;
- port_index = rr_priv(team)->sent_packets++ % team->en_port_count;
+ port_index = team_num_to_port_index(team,
+ rr_priv(team)->sent_packets++);
port = team_get_port_by_index_rcu(team, port_index);
if (unlikely(!port))
goto drop;