summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2025-01-03 08:33:57 +0100
committerJens Axboe <axboe@kernel.dk>2025-01-04 15:27:35 -0700
commit6aeb4f836480617be472de767c4cb09c1060a067 (patch)
tree3c8f7a90d542898614b1fbaa7b6f407045166e7c /include/linux
parentc2398e6d5f16e15598d3a37e17107fea477e3f91 (diff)
block: remove bio_add_pc_page
Lift bio_split_rw_at into blk_rq_append_bio so that it validates the hardware limits. With this all passthrough callers can simply add bio_add_page to build the bio and delay checking for exceeding of limits to this point instead of doing it for each page. While this looks like adding a new expensive loop over all bio_vecs, blk_rq_append_bio is already doing that just to counter the number of segments. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Link: https://lore.kernel.org/r/20250103073417.459715-2-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bio.h2
1 files changed, 0 insertions, 2 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 1eec59699100..4b79bf50f4f0 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -413,8 +413,6 @@ int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len,
unsigned off);
bool __must_check bio_add_folio(struct bio *bio, struct folio *folio,
size_t len, size_t off);
-extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
- unsigned int, unsigned int);
void __bio_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int off);
void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,