diff options
author | Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> | 2015-06-24 16:56:59 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-24 17:49:42 -0700 |
commit | 641844f5616d7c6597309f560838f996466d7aac (patch) | |
tree | 1b9fff7ae2ba5ec92959563c52a144c01735341a /mm/hugetlb.c | |
parent | 414e2fb8ce5a999571c21eb2ca4d66e53ddce800 (diff) |
mm/hugetlb: introduce minimum hugepage order
Currently the initial value of order in dissolve_free_huge_page is 64 or
32, which leads to the following warning in static checker:
mm/hugetlb.c:1203 dissolve_free_huge_pages()
warn: potential right shift more than type allows '9,18,64'
This is a potential risk of infinite loop, because 1 << order (== 0) is used
in for-loop like this:
for (pfn =3D start_pfn; pfn < end_pfn; pfn +=3D 1 << order)
...
So this patch fixes it by using global minimum_order calculated at boot time.
text data bss dec hex filename
28313 469 84236 113018 1b97a mm/hugetlb.o
28256 473 84236 112965 1b945 mm/hugetlb.o (patched)
Fixes: c8721bbbdd36 ("mm: memory-hotplug: enable memory hotplug to handle hugepage")
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 19 |
1 files changed, 11 insertions, 8 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 716465ae57aa..10de25cf1f99 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -40,6 +40,11 @@ int hugepages_treat_as_movable; int hugetlb_max_hstate __read_mostly; unsigned int default_hstate_idx; struct hstate hstates[HUGE_MAX_HSTATE]; +/* + * Minimum page order among possible hugepage sizes, set to a proper value + * at boot time. + */ +static unsigned int minimum_order __read_mostly = UINT_MAX; __initdata LIST_HEAD(huge_boot_pages); @@ -1188,19 +1193,13 @@ static void dissolve_free_huge_page(struct page *page) */ void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) { - unsigned int order = 8 * sizeof(void *); unsigned long pfn; - struct hstate *h; if (!hugepages_supported()) return; - /* Set scan step to minimum hugepage size */ - for_each_hstate(h) - if (order > huge_page_order(h)) - order = huge_page_order(h); - VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order)); - for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) + VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order)); + for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) dissolve_free_huge_page(pfn_to_page(pfn)); } @@ -1627,10 +1626,14 @@ static void __init hugetlb_init_hstates(void) struct hstate *h; for_each_hstate(h) { + if (minimum_order > huge_page_order(h)) + minimum_order = huge_page_order(h); + /* oversize hugepages were init'ed in early boot */ if (!hstate_is_gigantic(h)) hugetlb_hstate_alloc_pages(h); } + VM_BUG_ON(minimum_order == UINT_MAX); } static char * __init memfmt(char *buf, unsigned long n) |