summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2023-09-21 20:28:17 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-11-28 16:50:14 +0000
commit73909810ac27f736698ad7f9823b71fadb99e824 (patch)
tree9f1dc20aa0e180d5ec658177ebc458a7a84cb32f /include
parentbd653b07095a6bedc5d98449e6becbc8c5f2876e (diff)
net: annotate data-races around sk->sk_tx_queue_mapping
[ Upstream commit 0bb4d124d34044179b42a769a0c76f389ae973b6 ] This field can be read or written without socket lock being held. Add annotations to avoid load-store tearing. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/net/sock.h20
1 files changed, 16 insertions, 4 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index f73ef7087a18..b021c8912e2c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1782,21 +1782,33 @@ static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
/* sk_tx_queue_mapping accept only upto a 16-bit value */
if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
return;
- sk->sk_tx_queue_mapping = tx_queue;
+ /* Paired with READ_ONCE() in sk_tx_queue_get() and
+ * other WRITE_ONCE() because socket lock might be not held.
+ */
+ WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
}
#define NO_QUEUE_MAPPING USHRT_MAX
static inline void sk_tx_queue_clear(struct sock *sk)
{
- sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
+ /* Paired with READ_ONCE() in sk_tx_queue_get() and
+ * other WRITE_ONCE() because socket lock might be not held.
+ */
+ WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING);
}
static inline int sk_tx_queue_get(const struct sock *sk)
{
- if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
- return sk->sk_tx_queue_mapping;
+ if (sk) {
+ /* Paired with WRITE_ONCE() in sk_tx_queue_clear()
+ * and sk_tx_queue_set().
+ */
+ int val = READ_ONCE(sk->sk_tx_queue_mapping);
+ if (val != NO_QUEUE_MAPPING)
+ return val;
+ }
return -1;
}