summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_buf.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2025-03-10 14:19:11 +0100
committerCarlos Maiolino <cem@kernel.org>2025-03-10 14:29:44 +0100
commita2f790b28512c22f7cf4f420a99e1b008e7098fe (patch)
tree6f9260ba5e69d202a73573d80b3ae8c5defa092c /fs/xfs/xfs_buf.c
parent94c78cfa3bd1858eec3c462aedeb7f297d38d768 (diff)
xfs: kill XBF_UNMAPPED
Unmapped buffer access is a pain, so kill it. The switch to large folios means we rarely pay a vmap penalty for large buffers, so this functionality is largely unnecessary now. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Carlos Maiolino <cem@kernel.org>
Diffstat (limited to 'fs/xfs/xfs_buf.c')
-rw-r--r--fs/xfs/xfs_buf.c58
1 files changed, 4 insertions, 54 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index a831e8c755cb..b5ec7d83210f 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -145,7 +145,7 @@ _xfs_buf_alloc(
* We don't want certain flags to appear in b_flags unless they are
* specifically set by later operations on the buffer.
*/
- flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
+ flags &= ~(XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
/*
* A new buffer is held and locked by the owner. This ensures that the
@@ -289,9 +289,7 @@ xfs_buf_alloc_kmem(
*
* The third type of buffer is the multi-page buffer. These are always made
* up of single pages so that they can be fed to vmap_ram() to return a
- * contiguous memory region we can access the data through, or mark it as
- * XBF_UNMAPPED and access the data directly through individual page_address()
- * calls.
+ * contiguous memory region we can access the data through.
*/
static int
xfs_buf_alloc_backing_mem(
@@ -413,8 +411,6 @@ _xfs_buf_map_pages(
if (bp->b_page_count == 1) {
/* A single page buffer is always mappable */
bp->b_addr = page_address(bp->b_pages[0]);
- } else if (flags & XBF_UNMAPPED) {
- bp->b_addr = NULL;
} else {
int retried = 0;
unsigned nofs_flag;
@@ -1345,7 +1341,7 @@ __xfs_buf_ioend(
trace_xfs_buf_iodone(bp, _RET_IP_);
if (bp->b_flags & XBF_READ) {
- if (!bp->b_error && bp->b_addr && is_vmalloc_addr(bp->b_addr))
+ if (!bp->b_error && is_vmalloc_addr(bp->b_addr))
invalidate_kernel_vmap_range(bp->b_addr,
xfs_buf_vmap_len(bp));
if (!bp->b_error && bp->b_ops)
@@ -1526,7 +1522,7 @@ xfs_buf_submit_bio(
__bio_add_page(bio, bp->b_pages[p], PAGE_SIZE, 0);
bio->bi_iter.bi_size = size; /* limit to the actual size used */
- if (bp->b_addr && is_vmalloc_addr(bp->b_addr))
+ if (is_vmalloc_addr(bp->b_addr))
flush_kernel_vmap_range(bp->b_addr,
xfs_buf_vmap_len(bp));
}
@@ -1657,52 +1653,6 @@ xfs_buf_submit(
xfs_buf_submit_bio(bp);
}
-void *
-xfs_buf_offset(
- struct xfs_buf *bp,
- size_t offset)
-{
- struct page *page;
-
- if (bp->b_addr)
- return bp->b_addr + offset;
-
- page = bp->b_pages[offset >> PAGE_SHIFT];
- return page_address(page) + (offset & (PAGE_SIZE-1));
-}
-
-void
-xfs_buf_zero(
- struct xfs_buf *bp,
- size_t boff,
- size_t bsize)
-{
- size_t bend;
-
- if (bp->b_addr) {
- memset(bp->b_addr + boff, 0, bsize);
- return;
- }
-
- bend = boff + bsize;
- while (boff < bend) {
- struct page *page;
- int page_index, page_offset, csize;
-
- page_index = boff >> PAGE_SHIFT;
- page_offset = boff & ~PAGE_MASK;
- page = bp->b_pages[page_index];
- csize = min_t(size_t, PAGE_SIZE - page_offset,
- BBTOB(bp->b_length) - boff);
-
- ASSERT((csize + page_offset) <= PAGE_SIZE);
-
- memset(page_address(page) + page_offset, 0, csize);
-
- boff += csize;
- }
-}
-
/*
* Log a message about and stale a buffer that a caller has decided is corrupt.
*