1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Resilient Queued Spin Lock
*
* (C) Copyright 2024-2025 Meta Platforms, Inc. and affiliates.
*
* Authors: Kumar Kartikeya Dwivedi <memxor@gmail.com>
*/
#ifndef __ASM_GENERIC_RQSPINLOCK_H
#define __ASM_GENERIC_RQSPINLOCK_H
#include <linux/types.h>
#include <vdso/time64.h>
#include <linux/percpu.h>
struct qspinlock;
typedef struct qspinlock rqspinlock_t;
extern int resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val);
/*
* Default timeout for waiting loops is 0.25 seconds
*/
#define RES_DEF_TIMEOUT (NSEC_PER_SEC / 4)
/*
* Choose 31 as it makes rqspinlock_held cacheline-aligned.
*/
#define RES_NR_HELD 31
struct rqspinlock_held {
int cnt;
void *locks[RES_NR_HELD];
};
DECLARE_PER_CPU_ALIGNED(struct rqspinlock_held, rqspinlock_held_locks);
static __always_inline void grab_held_lock_entry(void *lock)
{
int cnt = this_cpu_inc_return(rqspinlock_held_locks.cnt);
if (unlikely(cnt > RES_NR_HELD)) {
/* Still keep the inc so we decrement later. */
return;
}
/*
* Implied compiler barrier in per-CPU operations; otherwise we can have
* the compiler reorder inc with write to table, allowing interrupts to
* overwrite and erase our write to the table (as on interrupt exit it
* will be reset to NULL).
*
* It is fine for cnt inc to be reordered wrt remote readers though,
* they won't observe our entry until the cnt update is visible, that's
* all.
*/
this_cpu_write(rqspinlock_held_locks.locks[cnt - 1], lock);
}
/*
* We simply don't support out-of-order unlocks, and keep the logic simple here.
* The verifier prevents BPF programs from unlocking out-of-order, and the same
* holds for in-kernel users.
*
* It is possible to run into misdetection scenarios of AA deadlocks on the same
* CPU, and missed ABBA deadlocks on remote CPUs if this function pops entries
* out of order (due to lock A, lock B, unlock A, unlock B) pattern. The correct
* logic to preserve right entries in the table would be to walk the array of
* held locks and swap and clear out-of-order entries, but that's too
* complicated and we don't have a compelling use case for out of order unlocking.
*/
static __always_inline void release_held_lock_entry(void)
{
struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
if (unlikely(rqh->cnt > RES_NR_HELD))
goto dec;
WRITE_ONCE(rqh->locks[rqh->cnt - 1], NULL);
dec:
/*
* Reordering of clearing above with inc and its write in
* grab_held_lock_entry that came before us (in same acquisition
* attempt) is ok, we either see a valid entry or NULL when it's
* visible.
*
* But this helper is invoked when we unwind upon failing to acquire the
* lock. Unlike the unlock path which constitutes a release store after
* we clear the entry, we need to emit a write barrier here. Otherwise,
* we may have a situation as follows:
*
* <error> for lock B
* release_held_lock_entry
*
* try_cmpxchg_acquire for lock A
* grab_held_lock_entry
*
* Lack of any ordering means reordering may occur such that dec, inc
* are done before entry is overwritten. This permits a remote lock
* holder of lock B (which this CPU failed to acquire) to now observe it
* as being attempted on this CPU, and may lead to misdetection (if this
* CPU holds a lock it is attempting to acquire, leading to false ABBA
* diagnosis).
*
* In case of unlock, we will always do a release on the lock word after
* releasing the entry, ensuring that other CPUs cannot hold the lock
* (and make conclusions about deadlocks) until the entry has been
* cleared on the local CPU, preventing any anomalies. Reordering is
* still possible there, but a remote CPU cannot observe a lock in our
* table which it is already holding, since visibility entails our
* release store for the said lock has not retired.
*
* In theory we don't have a problem if the dec and WRITE_ONCE above get
* reordered with each other, we either notice an empty NULL entry on
* top (if dec succeeds WRITE_ONCE), or a potentially stale entry which
* cannot be observed (if dec precedes WRITE_ONCE).
*
* Emit the write barrier _before_ the dec, this permits dec-inc
* reordering but that is harmless as we'd have new entry set to NULL
* already, i.e. they cannot precede the NULL store above.
*/
smp_wmb();
this_cpu_dec(rqspinlock_held_locks.cnt);
}
#endif /* __ASM_GENERIC_RQSPINLOCK_H */
|