summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2026-03-27 10:00:32 +0100
committerJens Axboe <axboe@kernel.dk>2026-04-09 19:14:58 -0600
commit539fb773a3f7c07cf7fd00617f33ed4e33058d72 (patch)
treea166a72891ab46794861eb87963b2d9544b581bc /block
parentb774765fb804045ee774476ded8e52482ae5ecb7 (diff)
block: refactor blkdev_zone_mgmt_ioctl
Split the zone reset case into a separate helper so that the conditional locking goes away. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Link: https://patch.msgid.link/20260327090032.3722065-1-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-zoned.c41
1 files changed, 18 insertions, 23 deletions
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index bfd9733ebd31..30cad2bb9291 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -417,20 +417,32 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
return 0;
}
-static int blkdev_truncate_zone_range(struct block_device *bdev,
- blk_mode_t mode, const struct blk_zone_range *zrange)
+static int blkdev_reset_zone(struct block_device *bdev, blk_mode_t mode,
+ struct blk_zone_range *zrange)
{
loff_t start, end;
+ int ret = -EINVAL;
+ inode_lock(bdev->bd_mapping->host);
+ filemap_invalidate_lock(bdev->bd_mapping);
if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
/* Out of range */
- return -EINVAL;
+ goto out_unlock;
start = zrange->sector << SECTOR_SHIFT;
end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
- return truncate_bdev_range(bdev, mode, start, end);
+ ret = truncate_bdev_range(bdev, mode, start, end);
+ if (ret)
+ goto out_unlock;
+
+ ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET, zrange->sector,
+ zrange->nr_sectors);
+out_unlock:
+ filemap_invalidate_unlock(bdev->bd_mapping);
+ inode_unlock(bdev->bd_mapping->host);
+ return ret;
}
/*
@@ -443,7 +455,6 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
void __user *argp = (void __user *)arg;
struct blk_zone_range zrange;
enum req_op op;
- int ret;
if (!argp)
return -EINVAL;
@@ -459,15 +470,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
switch (cmd) {
case BLKRESETZONE:
- op = REQ_OP_ZONE_RESET;
-
- /* Invalidate the page cache, including dirty pages. */
- inode_lock(bdev->bd_mapping->host);
- filemap_invalidate_lock(bdev->bd_mapping);
- ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
- if (ret)
- goto fail;
- break;
+ return blkdev_reset_zone(bdev, mode, &zrange);
case BLKOPENZONE:
op = REQ_OP_ZONE_OPEN;
break;
@@ -481,15 +484,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
return -ENOTTY;
}
- ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors);
-
-fail:
- if (cmd == BLKRESETZONE) {
- filemap_invalidate_unlock(bdev->bd_mapping);
- inode_unlock(bdev->bd_mapping->host);
- }
-
- return ret;
+ return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors);
}
static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone)