diff options
author | Jiri Pirko <jiri@resnulli.us> | 2015-01-14 18:15:30 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-01-14 16:53:57 -0500 |
commit | b0d11b42785b70e19bc6a3122eead3f7969a7589 (patch) | |
tree | 06ca980eca49523be349e19fdbd50c2e090d7f6f /drivers/net | |
parent | 1ba398041f5b5a15456ea20a9ba3ff80b6a4e7d1 (diff) |
team: avoid possible underflow of count_pending value for notify_peers and mcast_rejoin
This patch is fixing a race condition that may cause setting
count_pending to -1, which results in unwanted big bulk of arp messages
(in case of "notify peers").
Consider following scenario:
count_pending == 2
CPU0 CPU1
team_notify_peers_work
atomic_dec_and_test (dec count_pending to 1)
schedule_delayed_work
team_notify_peers
atomic_add (adding 1 to count_pending)
team_notify_peers_work
atomic_dec_and_test (dec count_pending to 1)
schedule_delayed_work
team_notify_peers_work
atomic_dec_and_test (dec count_pending to 0)
schedule_delayed_work
team_notify_peers_work
atomic_dec_and_test (dec count_pending to -1)
Fix this race by using atomic_dec_if_positive - that will prevent
count_pending running under 0.
Fixes: fc423ff00df3a1955441 ("team: add peer notification")
Fixes: 492b200efdd20b8fcfd ("team: add support for sending multicast rejoins")
Signed-off-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: Jiri Benc <jbenc@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/team/team.c | 16 |
1 files changed, 14 insertions, 2 deletions
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 93e224217e24..f7ff493f1e73 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind) static void team_notify_peers_work(struct work_struct *work) { struct team *team; + int val; team = container_of(work, struct team, notify_peers.dw.work); @@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work) schedule_delayed_work(&team->notify_peers.dw, 0); return; } + val = atomic_dec_if_positive(&team->notify_peers.count_pending); + if (val < 0) { + rtnl_unlock(); + return; + } call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev); rtnl_unlock(); - if (!atomic_dec_and_test(&team->notify_peers.count_pending)) + if (val) schedule_delayed_work(&team->notify_peers.dw, msecs_to_jiffies(team->notify_peers.interval)); } @@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team) static void team_mcast_rejoin_work(struct work_struct *work) { struct team *team; + int val; team = container_of(work, struct team, mcast_rejoin.dw.work); @@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work) schedule_delayed_work(&team->mcast_rejoin.dw, 0); return; } + val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending); + if (val < 0) { + rtnl_unlock(); + return; + } call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev); rtnl_unlock(); - if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending)) + if (val) schedule_delayed_work(&team->mcast_rejoin.dw, msecs_to_jiffies(team->mcast_rejoin.interval)); } |