diff options
author | Ganesh Mahendran <opensource.ganesh@gmail.com> | 2016-07-28 15:47:49 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-28 16:07:41 -0700 |
commit | b4fd07a0864a06d7a8b20a624d851736330d6fd8 (patch) | |
tree | 9f14c7f02483f2a6e9aa2c24beb0649e1f320d66 /mm | |
parent | cf675acb743f045b7482384be60987e69260e43d (diff) |
mm/zsmalloc: use class->objs_per_zspage to get num of max objects
num of max objects in zspage is stored in each size_class now. So there
is no need to re-calculate it.
Link: http://lkml.kernel.org/r/1467882338-4300-3-git-send-email-opensource.ganesh@gmail.com
Signed-off-by: Ganesh Mahendran <opensource.ganesh@gmail.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/zsmalloc.c | 18 |
1 files changed, 7 insertions, 11 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 49143de9934c..72e0b296984b 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -635,8 +635,7 @@ static int zs_stats_size_show(struct seq_file *s, void *v) freeable = zs_can_compact(class); spin_unlock(&class->lock); - objs_per_zspage = get_maxobj_per_zspage(class->size, - class->pages_per_zspage); + objs_per_zspage = class->objs_per_zspage; pages_used = obj_allocated / objs_per_zspage * class->pages_per_zspage; @@ -1014,8 +1013,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class, cache_free_zspage(pool, zspage); - zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( - class->size, class->pages_per_zspage)); + zs_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage); atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); } @@ -1366,7 +1364,7 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage) if (prev->pages_per_zspage != pages_per_zspage) return false; - if (get_maxobj_per_zspage(prev->size, prev->pages_per_zspage) + if (prev->objs_per_zspage != get_maxobj_per_zspage(size, pages_per_zspage)) return false; @@ -1592,8 +1590,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) record_obj(handle, obj); atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); - zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage( - class->size, class->pages_per_zspage)); + zs_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage); /* We completely set up zspage so mark them as movable */ SetZsPageMovable(pool, zspage); @@ -2265,8 +2262,7 @@ static unsigned long zs_can_compact(struct size_class *class) return 0; obj_wasted = obj_allocated - obj_used; - obj_wasted /= get_maxobj_per_zspage(class->size, - class->pages_per_zspage); + obj_wasted /= class->objs_per_zspage; return obj_wasted * class->pages_per_zspage; } @@ -2473,8 +2469,8 @@ struct zs_pool *zs_create_pool(const char *name) class->size = size; class->index = i; class->pages_per_zspage = pages_per_zspage; - class->objs_per_zspage = class->pages_per_zspage * - PAGE_SIZE / class->size; + class->objs_per_zspage = get_maxobj_per_zspage(class->size, + class->pages_per_zspage); spin_lock_init(&class->lock); pool->size_class[i] = class; for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS; |