summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorAndrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>2021-07-20 15:04:13 +0000
committerAndrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>2021-07-20 15:04:13 +0000
commite9646ca70100b35fd85b597c3af7534c075e0e3a (patch)
tree626c744c696fe142faa6599e1f99d3c433581cc1 /block
parentd27b76752782fcf058f548e4ecb7d9f31eefe612 (diff)
parent7c76bd6c36ed84c0e613ba0f3a1408a515b9f12d (diff)
Merge tag 'v5.4.132' into 5.4-2.3.x-imx
This is the 5.4.132 stable release Conflicts (manual resolve): - drivers/gpu/drm/rockchip/cdn-dp-core.c: Fix merge hiccup when integrating upstream commit 450c25b8a4c9c ("drm/rockchip: cdn-dp-core: add missing clk_disable_unprepare() on error in cdn_dp_grf_write()") - drivers/perf/fsl_imx8_ddr_perf.c: Port upstream commit 3fea9b708ae37 ("drivers/perf: fix the missed ida_simple_remove() in ddr_perf_probe()") manually to NXP version. Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-merge.c8
-rw-r--r--block/blk-rq-qos.h24
-rw-r--r--block/blk-wbt.c11
-rw-r--r--block/blk-wbt.h1
4 files changed, 39 insertions, 5 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 03959bfe961c..4b022f0c49d2 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -571,10 +571,14 @@ static inline unsigned int blk_rq_get_max_segments(struct request *rq)
static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
unsigned int nr_phys_segs)
{
- if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
+ if (blk_integrity_merge_bio(req->q, req, bio) == false)
goto no_merge;
- if (blk_integrity_merge_bio(req->q, req, bio) == false)
+ /* discard request merge won't add new segment */
+ if (req_op(req) == REQ_OP_DISCARD)
+ return 1;
+
+ if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
goto no_merge;
/*
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index 2bc43e94f4c4..2bcb3495e376 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -7,6 +7,7 @@
#include <linux/blk_types.h>
#include <linux/atomic.h>
#include <linux/wait.h>
+#include <linux/blk-mq.h>
#include "blk-mq-debugfs.h"
@@ -99,8 +100,21 @@ static inline void rq_wait_init(struct rq_wait *rq_wait)
static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
{
+ /*
+ * No IO can be in-flight when adding rqos, so freeze queue, which
+ * is fine since we only support rq_qos for blk-mq queue.
+ *
+ * Reuse ->queue_lock for protecting against other concurrent
+ * rq_qos adding/deleting
+ */
+ blk_mq_freeze_queue(q);
+
+ spin_lock_irq(&q->queue_lock);
rqos->next = q->rq_qos;
q->rq_qos = rqos;
+ spin_unlock_irq(&q->queue_lock);
+
+ blk_mq_unfreeze_queue(q);
if (rqos->ops->debugfs_attrs)
blk_mq_debugfs_register_rqos(rqos);
@@ -110,12 +124,22 @@ static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
{
struct rq_qos **cur;
+ /*
+ * See comment in rq_qos_add() about freezing queue & using
+ * ->queue_lock.
+ */
+ blk_mq_freeze_queue(q);
+
+ spin_lock_irq(&q->queue_lock);
for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
if (*cur == rqos) {
*cur = rqos->next;
break;
}
}
+ spin_unlock_irq(&q->queue_lock);
+
+ blk_mq_unfreeze_queue(q);
blk_mq_debugfs_unregister_rqos(rqos);
}
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 8641ba9793c5..ee708c1bc352 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -77,7 +77,8 @@ enum {
static inline bool rwb_enabled(struct rq_wb *rwb)
{
- return rwb && rwb->wb_normal != 0;
+ return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
+ rwb->wb_normal != 0;
}
static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
@@ -644,9 +645,13 @@ void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
void wbt_enable_default(struct request_queue *q)
{
struct rq_qos *rqos = wbt_rq_qos(q);
+
/* Throttling already enabled? */
- if (rqos)
+ if (rqos) {
+ if (RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
+ RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
return;
+ }
/* Queue not registered? Maybe shutting down... */
if (!blk_queue_registered(q))
@@ -710,7 +715,7 @@ void wbt_disable_default(struct request_queue *q)
rwb = RQWB(rqos);
if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
blk_stat_deactivate(rwb->cb);
- rwb->wb_normal = 0;
+ rwb->enable_state = WBT_STATE_OFF_DEFAULT;
}
}
EXPORT_SYMBOL_GPL(wbt_disable_default);
diff --git a/block/blk-wbt.h b/block/blk-wbt.h
index 8e4e37660971..d8d9f41b42f9 100644
--- a/block/blk-wbt.h
+++ b/block/blk-wbt.h
@@ -34,6 +34,7 @@ enum {
enum {
WBT_STATE_ON_DEFAULT = 1,
WBT_STATE_ON_MANUAL = 2,
+ WBT_STATE_OFF_DEFAULT
};
struct rq_wb {