diff options
| -rw-r--r-- | block/blk-settings.c | 26 | ||||
| -rw-r--r-- | drivers/md/dm-table.c | 5 | ||||
| -rw-r--r-- | include/linux/blkdev.h | 1 | 
3 files changed, 23 insertions, 9 deletions
| diff --git a/block/blk-settings.c b/block/blk-settings.c index e55f5fc4ca22..36c8c1f2af18 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -229,8 +229,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)  EXPORT_SYMBOL(blk_queue_bounce_limit);  /** - * blk_queue_max_hw_sectors - set max sectors for a request for this queue - * @q:  the request queue for the device + * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request + * @limits: the queue limits   * @max_hw_sectors:  max hardware sectors in the usual 512b unit   *   * Description: @@ -244,7 +244,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);   *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.   *    The soft limit can not exceed max_hw_sectors.   **/ -void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) +void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)  {  	if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {  		max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); @@ -252,9 +252,23 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto  		       __func__, max_hw_sectors);  	} -	q->limits.max_hw_sectors = max_hw_sectors; -	q->limits.max_sectors = min_t(unsigned int, max_hw_sectors, -				      BLK_DEF_MAX_SECTORS); +	limits->max_hw_sectors = max_hw_sectors; +	limits->max_sectors = min_t(unsigned int, max_hw_sectors, +				    BLK_DEF_MAX_SECTORS); +} +EXPORT_SYMBOL(blk_limits_max_hw_sectors); + +/** + * blk_queue_max_hw_sectors - set max sectors for a request for this queue + * @q:  the request queue for the device + * @max_hw_sectors:  max hardware sectors in the usual 512b unit + * + * Description: + *    See description for blk_limits_max_hw_sectors(). + **/ +void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) +{ +	blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);  }  EXPORT_SYMBOL(blk_queue_max_hw_sectors); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e2da1912a2cb..4d705cea0f8c 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -517,9 +517,8 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,  	 */  	if (q->merge_bvec_fn && !ti->type->merge) -		limits->max_sectors = -			min_not_zero(limits->max_sectors, -				     (unsigned int) (PAGE_SIZE >> 9)); +		blk_limits_max_hw_sectors(limits, +					  (unsigned int) (PAGE_SIZE >> 9));  	return 0;  }  EXPORT_SYMBOL_GPL(dm_set_device_limits); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 95aeeeb49e8b..36ab42c9bb99 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -808,6 +808,7 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,  extern void blk_cleanup_queue(struct request_queue *);  extern void blk_queue_make_request(struct request_queue *, make_request_fn *);  extern void blk_queue_bounce_limit(struct request_queue *, u64); +extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);  extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);  extern void blk_queue_max_segments(struct request_queue *, unsigned short);  extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 
