diff options
author | Tejun Heo <tj@kernel.org> | 2015-08-18 14:55:11 -0700 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-08-18 15:49:16 -0700 |
commit | 001bea73e70efdf48a9e00188cf302f6b6aed2bf (patch) | |
tree | 73797e7ac03b5e3e4fd9a3dc90c43f89b1ed57e0 /block | |
parent | 3e41871046bfe0ba7d122a1f14f0c1db2dca0256 (diff) |
blkcg: replace blkcg_policy->pd_size with ->pd_alloc/free_fn() methods
A blkg (blkcg_gq) represents the relationship between a cgroup and
request_queue. Each active policy has a pd (blkg_policy_data) on each
blkg. The pd's were allocated by blkcg core and each policy could
request to allocate extra space at the end by setting
blkcg_policy->pd_size larger than the size of pd.
This is a bit unusual but was done this way mostly to simplify error
handling and all the existing use cases could be handled this way;
however, this is becoming too restrictive now that percpu memory can
be allocated without blocking.
This introduces two new mandatory blkcg_policy methods - pd_alloc_fn()
and pd_free_fn() - which are used to allocate and release pd for a
given policy. As pd allocation is now done from policy side, it can
simply allocate a larger area which embeds pd at the beginning. This
change makes ->pd_size pointless. Removed.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 21 | ||||
-rw-r--r-- | block/blk-throttle.c | 13 | ||||
-rw-r--r-- | block/cfq-iosched.c | 13 |
3 files changed, 35 insertions, 12 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 4defbbabc0ff..d1bc6099bd1e 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -68,7 +68,8 @@ static void blkg_free(struct blkcg_gq *blkg) return; for (i = 0; i < BLKCG_MAX_POLS; i++) - kfree(blkg->pd[i]); + if (blkg->pd[i]) + blkcg_policy[i]->pd_free_fn(blkg->pd[i]); if (blkg->blkcg != &blkcg_root) blk_exit_rl(&blkg->rl); @@ -114,7 +115,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, continue; /* alloc per-policy data and attach it to blkg */ - pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); + pd = pol->pd_alloc_fn(gfp_mask, q->node); if (!pd) goto err_free; @@ -1057,7 +1058,7 @@ int blkcg_activate_policy(struct request_queue *q, blk_queue_bypass_start(q); pd_prealloc: if (!pd_prealloc) { - pd_prealloc = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); + pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node); if (!pd_prealloc) { ret = -ENOMEM; goto out_bypass_end; @@ -1072,7 +1073,7 @@ pd_prealloc: if (blkg->pd[pol->plid]) continue; - pd = kzalloc_node(pol->pd_size, GFP_NOWAIT, q->node); + pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node); if (!pd) swap(pd, pd_prealloc); if (!pd) { @@ -1093,7 +1094,8 @@ pd_prealloc: spin_unlock_irq(q->queue_lock); out_bypass_end: blk_queue_bypass_end(q); - kfree(pd_prealloc); + if (pd_prealloc) + pol->pd_free_fn(pd_prealloc); return ret; } EXPORT_SYMBOL_GPL(blkcg_activate_policy); @@ -1128,8 +1130,10 @@ void blkcg_deactivate_policy(struct request_queue *q, if (pol->pd_exit_fn) pol->pd_exit_fn(blkg); - kfree(blkg->pd[pol->plid]); - blkg->pd[pol->plid] = NULL; + if (blkg->pd[pol->plid]) { + pol->pd_free_fn(blkg->pd[pol->plid]); + blkg->pd[pol->plid] = NULL; + } spin_unlock(&blkg->blkcg->lock); } @@ -1151,9 +1155,6 @@ int blkcg_policy_register(struct blkcg_policy *pol) struct blkcg *blkcg; int i, ret; - if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) - return -EINVAL; - mutex_lock(&blkcg_pol_register_mutex); mutex_lock(&blkcg_pol_mutex); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index b23193518ac7..f1dd691c5359 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -403,6 +403,11 @@ static void throtl_service_queue_exit(struct throtl_service_queue *sq) del_timer_sync(&sq->pending_timer); } +static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node) +{ + return kzalloc_node(sizeof(struct throtl_grp), gfp, node); +} + static void throtl_pd_init(struct blkcg_gq *blkg) { struct throtl_grp *tg = blkg_to_tg(blkg); @@ -493,6 +498,11 @@ static void throtl_pd_exit(struct blkcg_gq *blkg) throtl_service_queue_exit(&tg->service_queue); } +static void throtl_pd_free(struct blkg_policy_data *pd) +{ + kfree(pd); +} + static void throtl_pd_reset_stats(struct blkcg_gq *blkg) { struct throtl_grp *tg = blkg_to_tg(blkg); @@ -1468,12 +1478,13 @@ static void throtl_shutdown_wq(struct request_queue *q) } static struct blkcg_policy blkcg_policy_throtl = { - .pd_size = sizeof(struct throtl_grp), .cftypes = throtl_files, + .pd_alloc_fn = throtl_pd_alloc, .pd_init_fn = throtl_pd_init, .pd_online_fn = throtl_pd_online, .pd_exit_fn = throtl_pd_exit, + .pd_free_fn = throtl_pd_free, .pd_reset_stats_fn = throtl_pd_reset_stats, }; diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 9c9ec7cc9f99..69ce2883099e 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1582,6 +1582,11 @@ static void cfq_cpd_init(const struct blkcg *blkcg) } } +static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node) +{ + return kzalloc_node(sizeof(struct cfq_group), gfp, node); +} + static void cfq_pd_init(struct blkcg_gq *blkg) { struct cfq_group *cfqg = blkg_to_cfqg(blkg); @@ -1618,6 +1623,11 @@ static void cfq_pd_offline(struct blkcg_gq *blkg) cfqg_stats_xfer_dead(cfqg); } +static void cfq_pd_free(struct blkg_policy_data *pd) +{ + return kfree(pd); +} + /* offset delta from cfqg->stats to cfqg->dead_stats */ static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) - offsetof(struct cfq_group, stats); @@ -4633,13 +4643,14 @@ static struct elevator_type iosched_cfq = { #ifdef CONFIG_CFQ_GROUP_IOSCHED static struct blkcg_policy blkcg_policy_cfq = { - .pd_size = sizeof(struct cfq_group), .cpd_size = sizeof(struct cfq_group_data), .cftypes = cfq_blkcg_files, .cpd_init_fn = cfq_cpd_init, + .pd_alloc_fn = cfq_pd_alloc, .pd_init_fn = cfq_pd_init, .pd_offline_fn = cfq_pd_offline, + .pd_free_fn = cfq_pd_free, .pd_reset_stats_fn = cfq_pd_reset_stats, }; #endif |