summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-10-10 10:37:13 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-10-10 10:37:13 -0700
commit1b1391b9c4bfadcaeb89a87edf6c3520dd349e35 (patch)
treedd540f1727e470ae457a2a18c6cc095e3177f0ec
parenteba41c0173c8c27702b720730ed9d399088409f0 (diff)
parent455281c0ef4e2cabdfe2e8b83fa6010d5210811c (diff)
Merge tag 'block-6.18-20251009' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux
Pull block fixes from Jens Axboe: - Don't include __GFP_NOWARN for loop worker allocation, as it already uses GFP_NOWAIT which has __GFP_NOWARN set already - Small series cleaning up the recent bio_iov_iter_get_pages() changes - loop fix for leaking the backing reference file, if validation fails - Update of a comment pertaining to disk/partition stat locking * tag 'block-6.18-20251009' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux: loop: remove redundant __GFP_NOWARN flag block: move bio_iov_iter_get_bdev_pages to block/fops.c iomap: open code bio_iov_iter_get_bdev_pages block: rename bio_iov_iter_get_pages_aligned to bio_iov_iter_get_pages block: remove bio_iov_iter_get_pages block: Update a comment of disk statistics loop: fix backing file reference leak on validation error
-rw-r--r--block/bio.c5
-rw-r--r--block/blk-map.c6
-rw-r--r--block/fops.c13
-rw-r--r--drivers/block/loop.c10
-rw-r--r--fs/iomap/direct-io.c3
-rw-r--r--include/linux/bio.h7
-rw-r--r--include/linux/blkdev.h7
-rw-r--r--include/linux/part_stat.h4
8 files changed, 29 insertions, 26 deletions
diff --git a/block/bio.c b/block/bio.c
index 3a1a848940dd..b3a79285c278 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1316,7 +1316,7 @@ static int bio_iov_iter_align_down(struct bio *bio, struct iov_iter *iter,
}
/**
- * bio_iov_iter_get_pages_aligned - add user or kernel pages to a bio
+ * bio_iov_iter_get_pages - add user or kernel pages to a bio
* @bio: bio to add pages to
* @iter: iov iterator describing the region to be added
* @len_align_mask: the mask to align the total size to, 0 for any length
@@ -1336,7 +1336,7 @@ static int bio_iov_iter_align_down(struct bio *bio, struct iov_iter *iter,
* MM encounters an error pinning the requested pages, it stops. Error
* is returned only if 0 pages could be pinned.
*/
-int bio_iov_iter_get_pages_aligned(struct bio *bio, struct iov_iter *iter,
+int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
unsigned len_align_mask)
{
int ret = 0;
@@ -1360,7 +1360,6 @@ int bio_iov_iter_get_pages_aligned(struct bio *bio, struct iov_iter *iter,
return bio_iov_iter_align_down(bio, iter, len_align_mask);
return ret;
}
-EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages_aligned);
static void submit_bio_wait_endio(struct bio *bio)
{
diff --git a/block/blk-map.c b/block/blk-map.c
index 165f2234f00f..60faf036fb6e 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -283,7 +283,11 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
if (!bio)
return -ENOMEM;
- ret = bio_iov_iter_get_pages(bio, iter);
+ /*
+ * No alignment requirements on our part to support arbitrary
+ * passthrough commands.
+ */
+ ret = bio_iov_iter_get_pages(bio, iter, 0);
if (ret)
goto out_put;
ret = blk_rq_append_bio(rq, bio);
diff --git a/block/fops.c b/block/fops.c
index c2c0396ea9ee..5e3db9fead77 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -43,6 +43,13 @@ static bool blkdev_dio_invalid(struct block_device *bdev, struct kiocb *iocb,
(bdev_logical_block_size(bdev) - 1);
}
+static inline int blkdev_iov_iter_get_pages(struct bio *bio,
+ struct iov_iter *iter, struct block_device *bdev)
+{
+ return bio_iov_iter_get_pages(bio, iter,
+ bdev_logical_block_size(bdev) - 1);
+}
+
#define DIO_INLINE_BIO_VECS 4
static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
@@ -78,7 +85,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
if (iocb->ki_flags & IOCB_ATOMIC)
bio.bi_opf |= REQ_ATOMIC;
- ret = bio_iov_iter_get_bdev_pages(&bio, iter, bdev);
+ ret = blkdev_iov_iter_get_pages(&bio, iter, bdev);
if (unlikely(ret))
goto out;
ret = bio.bi_iter.bi_size;
@@ -212,7 +219,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
bio->bi_end_io = blkdev_bio_end_io;
bio->bi_ioprio = iocb->ki_ioprio;
- ret = bio_iov_iter_get_bdev_pages(bio, iter, bdev);
+ ret = blkdev_iov_iter_get_pages(bio, iter, bdev);
if (unlikely(ret)) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
@@ -348,7 +355,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
*/
bio_iov_bvec_set(bio, iter);
} else {
- ret = bio_iov_iter_get_bdev_pages(bio, iter, bdev);
+ ret = blkdev_iov_iter_get_pages(bio, iter, bdev);
if (unlikely(ret))
goto out_bio_put;
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 053a086d547e..13ce229d450c 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -551,8 +551,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
return -EBADF;
error = loop_check_backing_file(file);
- if (error)
+ if (error) {
+ fput(file);
return error;
+ }
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
@@ -822,7 +824,7 @@ static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
if (worker)
goto queue_work;
- worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT | __GFP_NOWARN);
+ worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT);
/*
* In the event we cannot allocate a worker, just queue on the
* rootcg worker and issue the I/O as the rootcg
@@ -993,8 +995,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
return -EBADF;
error = loop_check_backing_file(file);
- if (error)
+ if (error) {
+ fput(file);
return error;
+ }
is_loop = is_loop_device(file);
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 9802b2cc29bb..5d5d63efbd57 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -433,7 +433,8 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
- ret = bio_iov_iter_get_bdev_pages(bio, dio->submit.iter, iomap->bdev);
+ ret = bio_iov_iter_get_pages(bio, dio->submit.iter,
+ bdev_logical_block_size(iomap->bdev) - 1);
if (unlikely(ret)) {
/*
* We have to stop part way through an IO. We must fall
diff --git a/include/linux/bio.h b/include/linux/bio.h
index a64a30131031..16c1c85613b7 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -446,14 +446,9 @@ int submit_bio_wait(struct bio *bio);
int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data,
size_t len, enum req_op op);
-int bio_iov_iter_get_pages_aligned(struct bio *bio, struct iov_iter *iter,
+int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
unsigned len_align_mask);
-static inline int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
-{
- return bio_iov_iter_get_pages_aligned(bio, iter, 0);
-}
-
void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter);
void __bio_release_pages(struct bio *bio, bool mark_dirty);
extern void bio_set_pages_dirty(struct bio *bio);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index dad5cb5b3812..70b671a9a7f7 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1873,13 +1873,6 @@ static inline int bio_split_rw_at(struct bio *bio,
return bio_split_io_at(bio, lim, segs, max_bytes, lim->dma_alignment);
}
-static inline int bio_iov_iter_get_bdev_pages(struct bio *bio,
- struct iov_iter *iter, struct block_device *bdev)
-{
- return bio_iov_iter_get_pages_aligned(bio, iter,
- bdev_logical_block_size(bdev) - 1);
-}
-
#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
#endif /* _LINUX_BLKDEV_H */
diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h
index eeeff2a04529..729415e91215 100644
--- a/include/linux/part_stat.h
+++ b/include/linux/part_stat.h
@@ -17,8 +17,8 @@ struct disk_stats {
/*
* Macros to operate on percpu disk statistics:
*
- * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters and should
- * be called between disk_stat_lock() and disk_stat_unlock().
+ * part_stat_{add|sub|inc|dec}() modify the stat counters and should
+ * be called between part_stat_lock() and part_stat_unlock().
*
* part_stat_read() can be called at any time.
*/