summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.ne>2015-10-30 05:25:59 +0900
committerJens Axboe <axboe@fb.com>2015-10-30 05:25:59 +0900
commitcdea01b2bf98affb7e9c44530108a4a28535eee8 (patch)
treed76f70baf592052d4dff44e45bcde1ee29062570 /kernel
parentcfd0c552a8272d691691f40073654d775836e23a (diff)
blktrace: re-write setting q->blk_trace
This is really about simplifying the double xchg patterns into a single cmpxchg, with the same logic. Other than the immediate cleanup, there are some subtleties this change deals with: (i) While the load of the old bt is fully ordered wrt everything, ie: old_bt = xchg(&q->blk_trace, bt); [barrier] if (old_bt) (void) xchg(&q->blk_trace, old_bt); [barrier] blk_trace could still be changed between the xchg and the old_bt load. Note that this description is merely theoretical and afaict very small, but doing everything in a single context with cmpxchg closes this potential race. (ii) Ordering guarantees are obviously kept with cmpxchg. (iii) Gets rid of the hacky-by-nature (void)xchg pattern. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> eviewed-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/blktrace.c16
1 files changed, 5 insertions, 11 deletions
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 90e72a0c3047..e3a26188b95e 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -437,7 +437,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
struct block_device *bdev,
struct blk_user_trace_setup *buts)
{
- struct blk_trace *old_bt, *bt = NULL;
+ struct blk_trace *bt = NULL;
struct dentry *dir = NULL;
int ret;
@@ -519,11 +519,8 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
bt->trace_state = Blktrace_setup;
ret = -EBUSY;
- old_bt = xchg(&q->blk_trace, bt);
- if (old_bt) {
- (void) xchg(&q->blk_trace, old_bt);
+ if (cmpxchg(&q->blk_trace, NULL, bt))
goto err;
- }
if (atomic_inc_return(&blk_probes_ref) == 1)
blk_register_tracepoints();
@@ -1481,7 +1478,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
static int blk_trace_setup_queue(struct request_queue *q,
struct block_device *bdev)
{
- struct blk_trace *old_bt, *bt = NULL;
+ struct blk_trace *bt = NULL;
int ret = -ENOMEM;
bt = kzalloc(sizeof(*bt), GFP_KERNEL);
@@ -1497,12 +1494,9 @@ static int blk_trace_setup_queue(struct request_queue *q,
blk_trace_setup_lba(bt, bdev);
- old_bt = xchg(&q->blk_trace, bt);
- if (old_bt != NULL) {
- (void)xchg(&q->blk_trace, old_bt);
- ret = -EBUSY;
+ ret = -EBUSY;
+ if (cmpxchg(&q->blk_trace, NULL, bt))
goto free_bt;
- }
if (atomic_inc_return(&blk_probes_ref) == 1)
blk_register_tracepoints();