summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--block/bio.c6
-rw-r--r--block/blk-mq-dma.c1
-rw-r--r--block/blk-mq.c29
-rw-r--r--block/blk-zoned.c2
-rw-r--r--drivers/block/ublk_drv.c28
-rw-r--r--drivers/md/bcache/request.c6
6 files changed, 41 insertions, 31 deletions
diff --git a/block/bio.c b/block/bio.c
index fa5ff36b443f..e726c0e280a8 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -321,9 +321,13 @@ static struct bio *__bio_chain_endio(struct bio *bio)
return parent;
}
+/*
+ * This function should only be used as a flag and must never be called.
+ * If execution reaches here, it indicates a serious programming error.
+ */
static void bio_chain_endio(struct bio *bio)
{
- bio_endio(__bio_chain_endio(bio));
+ BUG();
}
/**
diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c
index b6dbc9767596..fb018fffffdc 100644
--- a/block/blk-mq-dma.c
+++ b/block/blk-mq-dma.c
@@ -199,6 +199,7 @@ static bool blk_dma_map_iter_start(struct request *req, struct device *dma_dev,
if (blk_can_dma_map_iova(req, dma_dev) &&
dma_iova_try_alloc(dma_dev, state, vec.paddr, total_len))
return blk_rq_dma_map_iova(req, dma_dev, state, iter, &vec);
+ memset(state, 0, sizeof(*state));
return blk_dma_map_direct(req, dma_dev, iter, &vec);
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index bd8b11c472a2..1978eef95dca 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -336,12 +336,12 @@ void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set)
{
struct request_queue *q;
- mutex_lock(&set->tag_list_lock);
- list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(q, &set->tag_list, tag_set_list) {
if (!blk_queue_skip_tagset_quiesce(q))
blk_mq_quiesce_queue_nowait(q);
}
- mutex_unlock(&set->tag_list_lock);
+ rcu_read_unlock();
blk_mq_wait_quiesce_done(set);
}
@@ -351,12 +351,12 @@ void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set)
{
struct request_queue *q;
- mutex_lock(&set->tag_list_lock);
- list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(q, &set->tag_list, tag_set_list) {
if (!blk_queue_skip_tagset_quiesce(q))
blk_mq_unquiesce_queue(q);
}
- mutex_unlock(&set->tag_list_lock);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset);
@@ -4311,7 +4311,7 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
struct blk_mq_tag_set *set = q->tag_set;
mutex_lock(&set->tag_list_lock);
- list_del(&q->tag_set_list);
+ list_del_rcu(&q->tag_set_list);
if (list_is_singular(&set->tag_list)) {
/* just transitioned to unshared */
set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
@@ -4319,7 +4319,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
blk_mq_update_tag_set_shared(set, false);
}
mutex_unlock(&set->tag_list_lock);
- INIT_LIST_HEAD(&q->tag_set_list);
}
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
@@ -4338,7 +4337,7 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
}
if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
queue_set_hctx_shared(q, true);
- list_add_tail(&q->tag_set_list, &set->tag_list);
+ list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
mutex_unlock(&set->tag_list_lock);
}
@@ -5193,27 +5192,19 @@ EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
struct io_comp_batch *iob, unsigned int flags)
{
- long state = get_current_state();
int ret;
do {
ret = q->mq_ops->poll(hctx, iob);
- if (ret > 0) {
- __set_current_state(TASK_RUNNING);
+ if (ret > 0)
return ret;
- }
-
- if (signal_pending_state(state, current))
- __set_current_state(TASK_RUNNING);
- if (task_is_running(current))
+ if (task_sigpending(current))
return 1;
-
if (ret < 0 || (flags & BLK_POLL_ONESHOT))
break;
cpu_relax();
} while (!need_resched());
- __set_current_state(TASK_RUNNING);
return 0;
}
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 394d8d74bba9..1c54678fae6b 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -2100,7 +2100,7 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
* we have a zone write plug for such zone if the device has a zone
* write plug hash table.
*/
- if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash)
+ if (!disk->zone_wplugs_hash)
return 0;
wp_offset = disk_zone_wplug_sync_wp_offset(disk, zone);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 2c715df63f23..df9831783a13 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -926,6 +926,7 @@ static size_t ublk_copy_user_pages(const struct request *req,
size_t done = 0;
rq_for_each_segment(bv, req, iter) {
+ unsigned len;
void *bv_buf;
size_t copied;
@@ -934,18 +935,17 @@ static size_t ublk_copy_user_pages(const struct request *req,
continue;
}
- bv.bv_offset += offset;
- bv.bv_len -= offset;
- bv_buf = bvec_kmap_local(&bv);
+ len = bv.bv_len - offset;
+ bv_buf = kmap_local_page(bv.bv_page) + bv.bv_offset + offset;
if (dir == ITER_DEST)
- copied = copy_to_iter(bv_buf, bv.bv_len, uiter);
+ copied = copy_to_iter(bv_buf, len, uiter);
else
- copied = copy_from_iter(bv_buf, bv.bv_len, uiter);
+ copied = copy_from_iter(bv_buf, len, uiter);
kunmap_local(bv_buf);
done += copied;
- if (copied < bv.bv_len)
+ if (copied < len)
break;
offset = 0;
@@ -3673,6 +3673,19 @@ exit:
return ret;
}
+static bool ublk_ctrl_uring_cmd_may_sleep(u32 cmd_op)
+{
+ switch (_IOC_NR(cmd_op)) {
+ case UBLK_CMD_GET_QUEUE_AFFINITY:
+ case UBLK_CMD_GET_DEV_INFO:
+ case UBLK_CMD_GET_DEV_INFO2:
+ case _IOC_NR(UBLK_U_CMD_GET_FEATURES):
+ return false;
+ default:
+ return true;
+ }
+}
+
static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
@@ -3681,7 +3694,8 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
u32 cmd_op = cmd->cmd_op;
int ret = -EINVAL;
- if (issue_flags & IO_URING_F_NONBLOCK)
+ if (ublk_ctrl_uring_cmd_may_sleep(cmd_op) &&
+ issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
ublk_ctrl_cmd_dump(cmd);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index af345dc6fde1..82fdea7dea7a 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -1104,7 +1104,7 @@ static void detached_dev_end_io(struct bio *bio)
}
kfree(ddip);
- bio->bi_end_io(bio);
+ bio_endio(bio);
}
static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
@@ -1121,7 +1121,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
if (!ddip) {
bio->bi_status = BLK_STS_RESOURCE;
- bio->bi_end_io(bio);
+ bio_endio(bio);
return;
}
@@ -1136,7 +1136,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
if ((bio_op(bio) == REQ_OP_DISCARD) &&
!bdev_max_discard_sectors(dc->bdev))
- bio->bi_end_io(bio);
+ detached_dev_end_io(bio);
else
submit_bio_noacct(bio);
}