diff options
author | Marcel Ziswiler <marcel.ziswiler@toradex.com> | 2020-02-04 11:31:11 +0100 |
---|---|---|
committer | Marcel Ziswiler <marcel.ziswiler@toradex.com> | 2020-02-07 13:50:20 +0100 |
commit | 500f9a04cd76f504285ad99b15e390e20ab17e0c (patch) | |
tree | e1c5ff3a78e75f0847b47fbac63d279dd411e7e5 /block | |
parent | 0f549d8c4d5edd68a2a492afd48228458d3bca4a (diff) | |
parent | 6d0c334a400db31751c787c411e7187ab59a3f1d (diff) |
Merge tag 'v4.14.164' into 4.14-2.3.x-imx
This is the 4.14.164 stable release
Conflicts:
arch/arm/Kconfig.debug
arch/arm/boot/dts/imx7s.dtsi
arch/arm/mach-imx/cpuidle-imx6q.c
arch/arm/mach-imx/cpuidle-imx6sx.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kvm/hyp/tlb.c
drivers/crypto/caam/caamalg.c
drivers/crypto/mxs-dcp.c
drivers/dma/imx-sdma.c
drivers/gpio/gpio-vf610.c
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
drivers/input/keyboard/imx_keypad.c
drivers/input/keyboard/snvs_pwrkey.c
drivers/mmc/core/block.c
drivers/mmc/core/queue.h
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/net/can/flexcan.c
drivers/net/can/rx-offload.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/pci/dwc/pci-imx6.c
drivers/spi/spi-fsl-lpspi.c
drivers/usb/dwc3/gadget.c
include/net/tcp.h
sound/soc/fsl/Kconfig
sound/soc/fsl/fsl_esai.c
Diffstat (limited to 'block')
-rw-r--r-- | block/bfq-iosched.c | 13 | ||||
-rw-r--r-- | block/bio-integrity.c | 8 | ||||
-rw-r--r-- | block/bio.c | 136 | ||||
-rw-r--r-- | block/blk-core.c | 1 | ||||
-rw-r--r-- | block/blk-flush.c | 2 | ||||
-rw-r--r-- | block/blk-map.c | 2 | ||||
-rw-r--r-- | block/blk-merge.c | 48 | ||||
-rw-r--r-- | block/blk-mq-sysfs.c | 15 | ||||
-rw-r--r-- | block/blk-sysfs.c | 3 | ||||
-rw-r--r-- | block/compat_ioctl.c | 11 | ||||
-rw-r--r-- | block/sed-opal.c | 9 |
11 files changed, 185 insertions, 63 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 3b44bd28fc45..93863c6173e6 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -2282,6 +2282,8 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd) if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && bfq_symmetric_scenario(bfqd)) sl = min_t(u64, sl, BFQ_MIN_TT); + else if (bfqq->wr_coeff > 1) + sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC); bfqd->last_idling_start = ktime_get(); hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), @@ -3312,7 +3314,12 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) * whether bfqq is being weight-raised, because * bfq_symmetric_scenario() does not take into account also * weight-raised queues (see comments on - * bfq_weights_tree_add()). + * bfq_weights_tree_add()). In particular, if bfqq is being + * weight-raised, it is important to idle only if there are + * other, non-weight-raised queues that may steal throughput + * to bfqq. Actually, we should be even more precise, and + * differentiate between interactive weight raising and + * soft real-time weight raising. * * As a side note, it is worth considering that the above * device-idling countermeasures may however fail in the @@ -3324,7 +3331,8 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) * to let requests be served in the desired order until all * the requests already queued in the device have been served. */ - asymmetric_scenario = bfqq->wr_coeff > 1 || + asymmetric_scenario = (bfqq->wr_coeff > 1 && + bfqd->wr_busy_queues < bfqd->busy_queues) || !bfq_symmetric_scenario(bfqd); /* @@ -3758,6 +3766,7 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) unsigned long flags; spin_lock_irqsave(&bfqd->lock, flags); + bfqq->bic = NULL; bfq_exit_bfqq(bfqd, bfqq); bic_set_bfqq(bic, NULL, is_sync); spin_unlock_irqrestore(&bfqd->lock, flags); diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 5df32907ff3b..7f8010662437 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -313,8 +313,12 @@ bool bio_integrity_prep(struct bio *bio) ret = bio_integrity_add_page(bio, virt_to_page(buf), bytes, offset); - if (ret == 0) - return false; + if (ret == 0) { + printk(KERN_ERR "could not attach integrity payload\n"); + kfree(buf); + status = BLK_STS_RESOURCE; + goto err_end_io; + } if (ret < bytes) break; diff --git a/block/bio.c b/block/bio.c index 2e5d881423b8..1384f9790882 100644 --- a/block/bio.c +++ b/block/bio.c @@ -773,7 +773,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page return 0; } - if (bio->bi_vcnt >= bio->bi_max_vecs) + if (bio_full(bio)) return 0; /* @@ -821,65 +821,97 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page EXPORT_SYMBOL(bio_add_pc_page); /** - * bio_add_page - attempt to add page to bio - * @bio: destination bio - * @page: page to add - * @len: vec entry length - * @offset: vec entry offset + * __bio_try_merge_page - try appending data to an existing bvec. + * @bio: destination bio + * @page: page to add + * @len: length of the data to add + * @off: offset of the data in @page * - * Attempt to add a page to the bio_vec maplist. This will only fail - * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. + * Try to add the data at @page + @off to the last bvec of @bio. This is a + * a useful optimisation for file systems with a block size smaller than the + * page size. + * + * Return %true on success or %false on failure. */ -int bio_add_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int offset) +bool __bio_try_merge_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int off) { - struct bio_vec *bv; - - /* - * cloned bio must not modify vec list - */ if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) - return 0; + return false; - /* - * For filesystems with a blocksize smaller than the pagesize - * we will often be called with the same page as last time and - * a consecutive offset. Optimize this special case. - */ if (bio->bi_vcnt > 0) { - bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; + struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; - if (page == bv->bv_page && - offset == bv->bv_offset + bv->bv_len) { + if (page == bv->bv_page && off == bv->bv_offset + bv->bv_len) { bv->bv_len += len; - goto done; + bio->bi_iter.bi_size += len; + return true; } } + return false; +} +EXPORT_SYMBOL_GPL(__bio_try_merge_page); - if (bio->bi_vcnt >= bio->bi_max_vecs) - return 0; +/** + * __bio_add_page - add page to a bio in a new segment + * @bio: destination bio + * @page: page to add + * @len: length of the data to add + * @off: offset of the data in @page + * + * Add the data at @page + @off to @bio as a new bvec. The caller must ensure + * that @bio has space for another bvec. + */ +void __bio_add_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int off) +{ + struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt]; - bv = &bio->bi_io_vec[bio->bi_vcnt]; - bv->bv_page = page; - bv->bv_len = len; - bv->bv_offset = offset; + WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); + WARN_ON_ONCE(bio_full(bio)); + + bv->bv_page = page; + bv->bv_offset = off; + bv->bv_len = len; - bio->bi_vcnt++; -done: bio->bi_iter.bi_size += len; + bio->bi_vcnt++; +} +EXPORT_SYMBOL_GPL(__bio_add_page); + +/** + * bio_add_page - attempt to add page to bio + * @bio: destination bio + * @page: page to add + * @len: vec entry length + * @offset: vec entry offset + * + * Attempt to add a page to the bio_vec maplist. This will only fail + * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. + */ +int bio_add_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int offset) +{ + if (!__bio_try_merge_page(bio, page, len, offset)) { + if (bio_full(bio)) + return 0; + __bio_add_page(bio, page, len, offset); + } return len; } EXPORT_SYMBOL(bio_add_page); /** - * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio + * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio * @bio: bio to add pages to * @iter: iov iterator describing the region to be mapped * - * Pins as many pages from *iter and appends them to @bio's bvec array. The + * Pins pages from *iter and appends them to @bio's bvec array. The * pages will have to be released using put_page() when done. + * For multi-segment *iter, this function only adds pages from the + * the next non-empty segment of the iov iterator. */ -int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) +static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) { unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx; struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; @@ -916,6 +948,33 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) iov_iter_advance(iter, size); return 0; } + +/** + * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio + * @bio: bio to add pages to + * @iter: iov iterator describing the region to be mapped + * + * Pins pages from *iter and appends them to @bio's bvec array. The + * pages will have to be released using put_page() when done. + * The function tries, but does not guarantee, to pin as many pages as + * fit into the bio, or are requested in *iter, whatever is smaller. + * If MM encounters an error pinning the requested pages, it stops. + * Error is returned only if 0 pages could be pinned. + */ +int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) +{ + unsigned short orig_vcnt = bio->bi_vcnt; + + do { + int ret = __bio_iov_iter_get_pages(bio, iter); + + if (unlikely(ret)) + return bio->bi_vcnt > orig_vcnt ? 0 : ret; + + } while (iov_iter_count(iter) && !bio_full(bio)); + + return 0; +} EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); struct submit_bio_ret { @@ -1280,8 +1339,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q, } } - if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) + if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) { + if (!map_data) + __free_page(page); break; + } len -= bytes; offset = 0; diff --git a/block/blk-core.c b/block/blk-core.c index b6e3c4254795..57d32555f191 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -339,7 +339,6 @@ void blk_sync_queue(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; - cancel_delayed_work_sync(&q->requeue_work); queue_for_each_hw_ctx(q, hctx, i) cancel_delayed_work_sync(&hctx->run_work); } else { diff --git a/block/blk-flush.c b/block/blk-flush.c index 4938bec8cfef..6603352879e7 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -402,7 +402,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); spin_unlock_irqrestore(&fq->mq_flush_lock, flags); - blk_mq_run_hw_queue(hctx, true); + blk_mq_sched_restart(hctx); } /** diff --git a/block/blk-map.c b/block/blk-map.c index dcfb489a0c8d..53a7b3e4312e 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -152,7 +152,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, return 0; unmap_rq: - __blk_rq_unmap_user(bio); + blk_rq_unmap_user(bio); fail: rq->bio = NULL; return ret; diff --git a/block/blk-merge.c b/block/blk-merge.c index 8d60a5bbcef9..f61b50a01bc7 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -659,6 +659,31 @@ static void blk_account_io_merge(struct request *req) part_stat_unlock(); } } +/* + * Two cases of handling DISCARD merge: + * If max_discard_segments > 1, the driver takes every bio + * as a range and send them to controller together. The ranges + * needn't to be contiguous. + * Otherwise, the bios/requests will be handled as same as + * others which should be contiguous. + */ +static inline bool blk_discard_mergable(struct request *req) +{ + if (req_op(req) == REQ_OP_DISCARD && + queue_max_discard_segments(req->q) > 1) + return true; + return false; +} + +enum elv_merge blk_try_req_merge(struct request *req, struct request *next) +{ + if (blk_discard_mergable(req)) + return ELEVATOR_DISCARD_MERGE; + else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) + return ELEVATOR_BACK_MERGE; + + return ELEVATOR_NO_MERGE; +} /* * For non-mq, this has to be called with the request spinlock acquired. @@ -676,12 +701,6 @@ static struct request *attempt_merge(struct request_queue *q, if (req_op(req) != req_op(next)) return NULL; - /* - * not contiguous - */ - if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) - return NULL; - if (rq_data_dir(req) != rq_data_dir(next) || req->rq_disk != next->rq_disk || req_no_special_merge(next)) @@ -705,11 +724,19 @@ static struct request *attempt_merge(struct request_queue *q, * counts here. Handle DISCARDs separately, as they * have separate settings. */ - if (req_op(req) == REQ_OP_DISCARD) { + + switch (blk_try_req_merge(req, next)) { + case ELEVATOR_DISCARD_MERGE: if (!req_attempt_discard_merge(q, req, next)) return NULL; - } else if (!ll_merge_requests_fn(q, req, next)) + break; + case ELEVATOR_BACK_MERGE: + if (!ll_merge_requests_fn(q, req, next)) + return NULL; + break; + default: return NULL; + } /* * If failfast settings disagree or any of the two is already @@ -738,7 +765,7 @@ static struct request *attempt_merge(struct request_queue *q, req->__data_len += blk_rq_bytes(next); - if (req_op(req) != REQ_OP_DISCARD) + if (!blk_discard_mergable(req)) elv_merge_requests(q, req, next); /* @@ -834,8 +861,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) { - if (req_op(rq) == REQ_OP_DISCARD && - queue_max_discard_segments(rq->q) > 1) + if (blk_discard_mergable(rq)) return ELEVATOR_DISCARD_MERGE; else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) return ELEVATOR_BACK_MERGE; diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index a54b4b070f1c..de209d60a662 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -145,20 +145,25 @@ static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx, static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) { + const size_t size = PAGE_SIZE - 1; unsigned int i, first = 1; - ssize_t ret = 0; + int ret = 0, pos = 0; for_each_cpu(i, hctx->cpumask) { if (first) - ret += sprintf(ret + page, "%u", i); + ret = snprintf(pos + page, size - pos, "%u", i); else - ret += sprintf(ret + page, ", %u", i); + ret = snprintf(pos + page, size - pos, ", %u", i); + + if (ret >= size - pos) + break; first = 0; + pos += ret; } - ret += sprintf(ret + page, "\n"); - return ret; + ret = snprintf(pos + page, size + 1 - pos, "\n"); + return pos + ret; } static struct attribute *default_ctx_attrs[] = { diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index d0ec0d55d3d0..dfc391df0ffe 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -811,6 +811,9 @@ static void __blk_release_queue(struct work_struct *work) blk_free_queue_stats(q->stats); + if (q->mq_ops) + cancel_delayed_work_sync(&q->requeue_work); + blk_exit_rl(q, &q->root_rl); if (q->queue_tags) diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index 6ca015f92766..6490b2759bcb 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c @@ -6,6 +6,7 @@ #include <linux/compat.h> #include <linux/elevator.h> #include <linux/hdreg.h> +#include <linux/pr.h> #include <linux/slab.h> #include <linux/syscalls.h> #include <linux/types.h> @@ -354,6 +355,8 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) * but we call blkdev_ioctl, which gets the lock for us */ case BLKRRPART: + case BLKREPORTZONE: + case BLKRESETZONE: return blkdev_ioctl(bdev, mode, cmd, (unsigned long)compat_ptr(arg)); case BLKBSZSET_32: @@ -401,6 +404,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) case BLKTRACETEARDOWN: /* compatible */ ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg)); return ret; + case IOC_PR_REGISTER: + case IOC_PR_RESERVE: + case IOC_PR_RELEASE: + case IOC_PR_PREEMPT: + case IOC_PR_PREEMPT_ABORT: + case IOC_PR_CLEAR: + return blkdev_ioctl(bdev, mode, cmd, + (unsigned long)compat_ptr(arg)); default: if (disk->fops->compat_ioctl) ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg); diff --git a/block/sed-opal.c b/block/sed-opal.c index 4f5e70d4abc3..c64011cda9fc 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c @@ -2078,13 +2078,16 @@ static int opal_erase_locking_range(struct opal_dev *dev, static int opal_enable_disable_shadow_mbr(struct opal_dev *dev, struct opal_mbr_data *opal_mbr) { + u8 enable_disable = opal_mbr->enable_disable == OPAL_MBR_ENABLE ? + OPAL_TRUE : OPAL_FALSE; + const struct opal_step mbr_steps[] = { { opal_discovery0, }, { start_admin1LSP_opal_session, &opal_mbr->key }, - { set_mbr_done, &opal_mbr->enable_disable }, + { set_mbr_done, &enable_disable }, { end_opal_session, }, { start_admin1LSP_opal_session, &opal_mbr->key }, - { set_mbr_enable_disable, &opal_mbr->enable_disable }, + { set_mbr_enable_disable, &enable_disable }, { end_opal_session, }, { NULL, } }; @@ -2204,7 +2207,7 @@ static int __opal_lock_unlock(struct opal_dev *dev, static int __opal_set_mbr_done(struct opal_dev *dev, struct opal_key *key) { - u8 mbr_done_tf = 1; + u8 mbr_done_tf = OPAL_TRUE; const struct opal_step mbrdone_step [] = { { opal_discovery0, }, { start_admin1LSP_opal_session, key }, |