summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSiddharth Chintamaneni <sidchintamaneni@gmail.com>2025-10-01 17:27:02 +0000
committerAlexei Starovoitov <ast@kernel.org>2025-10-07 15:30:43 -0700
commit56b4d162392dda2365fbc1f482184a24b489d07d (patch)
tree4e90e7eafde140d6a2d05ceba84fbebef9187748
parent0db4941d9dae159d887e7e2eac7e54e60c3aac87 (diff)
bpf: Cleanup unused func args in rqspinlock implementation
cleanup unused function args in check_deadlock* functions. Fixes: 31158ad02ddb ("rqspinlock: Add deadlock detection and recovery") Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@gmail.com> Reviewed-by: Eduard Zingerman <eddyz87@gmail.com> Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20251001172702.122838-1-sidchintamaneni@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--kernel/bpf/rqspinlock.c19
1 files changed, 8 insertions, 11 deletions
diff --git a/kernel/bpf/rqspinlock.c b/kernel/bpf/rqspinlock.c
index a00561b1d3e5..21be48108e96 100644
--- a/kernel/bpf/rqspinlock.c
+++ b/kernel/bpf/rqspinlock.c
@@ -89,15 +89,14 @@ struct rqspinlock_timeout {
DEFINE_PER_CPU_ALIGNED(struct rqspinlock_held, rqspinlock_held_locks);
EXPORT_SYMBOL_GPL(rqspinlock_held_locks);
-static bool is_lock_released(rqspinlock_t *lock, u32 mask, struct rqspinlock_timeout *ts)
+static bool is_lock_released(rqspinlock_t *lock, u32 mask)
{
if (!(atomic_read_acquire(&lock->val) & (mask)))
return true;
return false;
}
-static noinline int check_deadlock_AA(rqspinlock_t *lock, u32 mask,
- struct rqspinlock_timeout *ts)
+static noinline int check_deadlock_AA(rqspinlock_t *lock)
{
struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
int cnt = min(RES_NR_HELD, rqh->cnt);
@@ -118,8 +117,7 @@ static noinline int check_deadlock_AA(rqspinlock_t *lock, u32 mask,
* more locks, which reduce to ABBA). This is not exhaustive, and we rely on
* timeouts as the final line of defense.
*/
-static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask,
- struct rqspinlock_timeout *ts)
+static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask)
{
struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
int rqh_cnt = min(RES_NR_HELD, rqh->cnt);
@@ -142,7 +140,7 @@ static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask,
* Let's ensure to break out of this loop if the lock is available for
* us to potentially acquire.
*/
- if (is_lock_released(lock, mask, ts))
+ if (is_lock_released(lock, mask))
return 0;
/*
@@ -198,15 +196,14 @@ static noinline int check_deadlock_ABBA(rqspinlock_t *lock, u32 mask,
return 0;
}
-static noinline int check_deadlock(rqspinlock_t *lock, u32 mask,
- struct rqspinlock_timeout *ts)
+static noinline int check_deadlock(rqspinlock_t *lock, u32 mask)
{
int ret;
- ret = check_deadlock_AA(lock, mask, ts);
+ ret = check_deadlock_AA(lock);
if (ret)
return ret;
- ret = check_deadlock_ABBA(lock, mask, ts);
+ ret = check_deadlock_ABBA(lock, mask);
if (ret)
return ret;
@@ -234,7 +231,7 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask,
*/
if (prev + NSEC_PER_MSEC < time) {
ts->cur = time;
- return check_deadlock(lock, mask, ts);
+ return check_deadlock(lock, mask);
}
return 0;