summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-10-20 15:12:13 +0200
committerLeonard Crestez <leonard.crestez@nxp.com>2018-08-24 12:41:33 +0300
commit584d17b95953aaf5754d152ca8a203abef922ec3 (patch)
treeeac7c57dd4930d3f2d95ca4371a7dc9a6b1ea37b /drivers/md
parent0d29a94140c40f1b8a8b371926bf45bddc5e49f1 (diff)
block: split out request-only flags into a new namespace
A lot of the REQ_* flags are only used on struct requests, and only of use to the block layer and a few drivers that dig into struct request internals. This patch adds a new req_flags_t rq_flags field to struct request for them, and thus dramatically shrinks the number of common requests. It also removes the unfortunate situation where we have to fit the fields from the same enum into 32 bits for struct bio and 64 bits for struct request. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Shaun Tancheff <shaun.tancheff@seagate.com> Signed-off-by: Jens Axboe <axboe@fb.com> (cherry picked from commit e806402130c9c494e22c73ae9ead4e79d2a5811c) Conflicts: drivers/mmc/core/block.c drivers/scsi/sd_zbc.c
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-rq.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index ba7c4c685db3..03e8df440500 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -319,7 +319,7 @@ static void dm_unprep_request(struct request *rq)
if (!rq->q->mq_ops) {
rq->special = NULL;
- rq->cmd_flags &= ~REQ_DONTPREP;
+ rq->rq_flags &= ~RQF_DONTPREP;
}
if (clone)
@@ -437,7 +437,7 @@ static void dm_softirq_done(struct request *rq)
return;
}
- if (rq->cmd_flags & REQ_FAILED)
+ if (rq->rq_flags & RQF_FAILED)
mapped = false;
dm_done(clone, tio->error, mapped);
@@ -466,7 +466,7 @@ static void dm_complete_request(struct request *rq, int error)
*/
static void dm_kill_unmapped_request(struct request *rq, int error)
{
- rq->cmd_flags |= REQ_FAILED;
+ rq->rq_flags |= RQF_FAILED;
dm_complete_request(rq, error);
}
@@ -482,7 +482,7 @@ static void end_clone_request(struct request *clone, int error)
* For just cleaning up the information of the queue in which
* the clone was dispatched.
* The clone is *NOT* freed actually here because it is alloced
- * from dm own mempool (REQ_ALLOCED isn't set).
+ * from dm own mempool (RQF_ALLOCED isn't set).
*/
__blk_put_request(clone->q, clone);
}
@@ -503,7 +503,7 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
int r;
if (blk_queue_io_stat(clone->q))
- clone->cmd_flags |= REQ_IO_STAT;
+ clone->rq_flags |= RQF_IO_STAT;
clone->start_time = jiffies;
r = blk_insert_cloned_request(clone->q, clone);
@@ -639,7 +639,7 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
return BLKPREP_DEFER;
rq->special = tio;
- rq->cmd_flags |= REQ_DONTPREP;
+ rq->rq_flags |= RQF_DONTPREP;
return BLKPREP_OK;
}