summaryrefslogtreecommitdiff
path: root/mm/memory_hotplug.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-19 09:45:58 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-19 09:45:58 -0700
commit249be8511b269495bc95cb8bdfdd5840b2ba73c0 (patch)
tree6920bde053faa0284b52b2a9c9695f5516520377 /mm/memory_hotplug.c
parent3bfe1fc46794631366faa3ef075e1b0ff7ba120a (diff)
parenteec4844fae7c033a0c1fc1eb3b8517aeb8b6cc49 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge yet more updates from Andrew Morton: "The rest of MM and a kernel-wide procfs cleanup. Summary of the more significant patches: - Patch series "mm/memory_hotplug: Factor out memory block devicehandling", v3. David Hildenbrand. Some spring-cleaning of the memory hotplug code, notably in drivers/base/memory.c - "mm: thp: fix false negative of shmem vma's THP eligibility". Yang Shi. Fix /proc/pid/smaps output for THP pages used in shmem. - "resource: fix locking in find_next_iomem_res()" + 1. Nadav Amit. Bugfix and speedup for kernel/resource.c - Patch series "mm: Further memory block device cleanups", David Hildenbrand. More spring-cleaning of the memory hotplug code. - Patch series "mm: Sub-section memory hotplug support". Dan Williams. Generalise the memory hotplug code so that pmem can use it more completely. Then remove the hacks from the libnvdimm code which were there to work around the memory-hotplug code's constraints. - "proc/sysctl: add shared variables for range check", Matteo Croce. We have about 250 instances of int zero; ... .extra1 = &zero, in the tree. This is a tree-wide sweep to make all those private "zero"s and "one"s use global variables. Alas, it isn't practical to make those two global integers const" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (38 commits) proc/sysctl: add shared variables for range check mm: migrate: remove unused mode argument mm/sparsemem: cleanup 'section number' data types libnvdimm/pfn: stop padding pmem namespaces to section alignment libnvdimm/pfn: fix fsdax-mode namespace info-block zero-fields mm/devm_memremap_pages: enable sub-section remap mm: document ZONE_DEVICE memory-model implications mm/sparsemem: support sub-section hotplug mm/sparsemem: prepare for sub-section ranges mm: kill is_dev_zone() helper mm/hotplug: kill is_dev_zone() usage in __remove_pages() mm/sparsemem: convert kmalloc_section_memmap() to populate_section_memmap() mm/hotplug: prepare shrink_{zone, pgdat}_span for sub-section removal mm/sparsemem: add helpers track active portions of a section at boot mm/sparsemem: introduce a SECTION_IS_EARLY flag mm/sparsemem: introduce struct mem_section_usage drivers/base/memory.c: get rid of find_memory_block_hinted() mm/memory_hotplug: move and simplify walk_memory_blocks() mm/memory_hotplug: rename walk_memory_range() and pass start+size instead of pfns mm: make register_mem_sect_under_node() static ...
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r--mm/memory_hotplug.c275
1 files changed, 105 insertions, 170 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 4ebe696138e8..2a9bbddb0e55 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -166,9 +166,10 @@ void put_page_bootmem(struct page *page)
#ifndef CONFIG_SPARSEMEM_VMEMMAP
static void register_page_bootmem_info_section(unsigned long start_pfn)
{
- unsigned long *usemap, mapsize, section_nr, i;
+ unsigned long mapsize, section_nr, i;
struct mem_section *ms;
struct page *page, *memmap;
+ struct mem_section_usage *usage;
section_nr = pfn_to_section_nr(start_pfn);
ms = __nr_to_section(section_nr);
@@ -188,10 +189,10 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)
for (i = 0; i < mapsize; i++, page++)
get_page_bootmem(section_nr, page, SECTION_INFO);
- usemap = ms->pageblock_flags;
- page = virt_to_page(usemap);
+ usage = ms->usage;
+ page = virt_to_page(usage);
- mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
+ mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
for (i = 0; i < mapsize; i++, page++)
get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
@@ -200,9 +201,10 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)
#else /* CONFIG_SPARSEMEM_VMEMMAP */
static void register_page_bootmem_info_section(unsigned long start_pfn)
{
- unsigned long *usemap, mapsize, section_nr, i;
+ unsigned long mapsize, section_nr, i;
struct mem_section *ms;
struct page *page, *memmap;
+ struct mem_section_usage *usage;
section_nr = pfn_to_section_nr(start_pfn);
ms = __nr_to_section(section_nr);
@@ -211,10 +213,10 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)
register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
- usemap = ms->pageblock_flags;
- page = virt_to_page(usemap);
+ usage = ms->usage;
+ page = virt_to_page(usage);
- mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
+ mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
for (i = 0; i < mapsize; i++, page++)
get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
@@ -250,22 +252,31 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
}
#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
-static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
- struct vmem_altmap *altmap, bool want_memblock)
+static int check_pfn_span(unsigned long pfn, unsigned long nr_pages,
+ const char *reason)
{
- int ret;
-
- if (pfn_valid(phys_start_pfn))
- return -EEXIST;
-
- ret = sparse_add_one_section(nid, phys_start_pfn, altmap);
- if (ret < 0)
- return ret;
-
- if (!want_memblock)
- return 0;
-
- return hotplug_memory_register(nid, __pfn_to_section(phys_start_pfn));
+ /*
+ * Disallow all operations smaller than a sub-section and only
+ * allow operations smaller than a section for
+ * SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range()
+ * enforces a larger memory_block_size_bytes() granularity for
+ * memory that will be marked online, so this check should only
+ * fire for direct arch_{add,remove}_memory() users outside of
+ * add_memory_resource().
+ */
+ unsigned long min_align;
+
+ if (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
+ min_align = PAGES_PER_SUBSECTION;
+ else
+ min_align = PAGES_PER_SECTION;
+ if (!IS_ALIGNED(pfn, min_align)
+ || !IS_ALIGNED(nr_pages, min_align)) {
+ WARN(1, "Misaligned __%s_pages start: %#lx end: #%lx\n",
+ reason, pfn, pfn + nr_pages - 1);
+ return -EINVAL;
+ }
+ return 0;
}
/*
@@ -274,62 +285,54 @@ static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
* call this function after deciding the zone to which to
* add the new pages.
*/
-int __ref __add_pages(int nid, unsigned long phys_start_pfn,
- unsigned long nr_pages, struct mhp_restrictions *restrictions)
+int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
+ struct mhp_restrictions *restrictions)
{
- unsigned long i;
- int err = 0;
- int start_sec, end_sec;
+ int err;
+ unsigned long nr, start_sec, end_sec;
struct vmem_altmap *altmap = restrictions->altmap;
- /* during initialize mem_map, align hot-added range to section */
- start_sec = pfn_to_section_nr(phys_start_pfn);
- end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
-
if (altmap) {
/*
* Validate altmap is within bounds of the total request
*/
- if (altmap->base_pfn != phys_start_pfn
+ if (altmap->base_pfn != pfn
|| vmem_altmap_offset(altmap) > nr_pages) {
pr_warn_once("memory add fail, invalid altmap\n");
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
altmap->alloc = 0;
}
- for (i = start_sec; i <= end_sec; i++) {
- err = __add_section(nid, section_nr_to_pfn(i), altmap,
- restrictions->flags & MHP_MEMBLOCK_API);
+ err = check_pfn_span(pfn, nr_pages, "add");
+ if (err)
+ return err;
- /*
- * EEXIST is finally dealt with by ioresource collision
- * check. see add_memory() => register_memory_resource()
- * Warning will be printed if there is collision.
- */
- if (err && (err != -EEXIST))
+ start_sec = pfn_to_section_nr(pfn);
+ end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
+ for (nr = start_sec; nr <= end_sec; nr++) {
+ unsigned long pfns;
+
+ pfns = min(nr_pages, PAGES_PER_SECTION
+ - (pfn & ~PAGE_SECTION_MASK));
+ err = sparse_add_section(nid, pfn, pfns, altmap);
+ if (err)
break;
- err = 0;
+ pfn += pfns;
+ nr_pages -= pfns;
cond_resched();
}
vmemmap_populate_print_last();
-out:
return err;
}
-#ifdef CONFIG_MEMORY_HOTREMOVE
/* find the smallest valid pfn in the range [start_pfn, end_pfn) */
static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
unsigned long start_pfn,
unsigned long end_pfn)
{
- struct mem_section *ms;
-
- for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
- ms = __pfn_to_section(start_pfn);
-
- if (unlikely(!valid_section(ms)))
+ for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
+ if (unlikely(!pfn_valid(start_pfn)))
continue;
if (unlikely(pfn_to_nid(start_pfn) != nid))
@@ -349,15 +352,12 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
unsigned long start_pfn,
unsigned long end_pfn)
{
- struct mem_section *ms;
unsigned long pfn;
/* pfn is the end pfn of a memory section. */
pfn = end_pfn - 1;
- for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
- ms = __pfn_to_section(pfn);
-
- if (unlikely(!valid_section(ms)))
+ for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
+ if (unlikely(!pfn_valid(pfn)))
continue;
if (unlikely(pfn_to_nid(pfn) != nid))
@@ -379,7 +379,6 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
unsigned long zone_end_pfn = z;
unsigned long pfn;
- struct mem_section *ms;
int nid = zone_to_nid(zone);
zone_span_writelock(zone);
@@ -416,17 +415,15 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
* it check the zone has only hole or not.
*/
pfn = zone_start_pfn;
- for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
- ms = __pfn_to_section(pfn);
-
- if (unlikely(!valid_section(ms)))
+ for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) {
+ if (unlikely(!pfn_valid(pfn)))
continue;
if (page_zone(pfn_to_page(pfn)) != zone)
continue;
- /* If the section is current section, it continues the loop */
- if (start_pfn == pfn)
+ /* Skip range to be removed */
+ if (pfn >= start_pfn && pfn < end_pfn)
continue;
/* If we find valid section, we have nothing to do */
@@ -447,7 +444,6 @@ static void shrink_pgdat_span(struct pglist_data *pgdat,
unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
unsigned long pgdat_end_pfn = p;
unsigned long pfn;
- struct mem_section *ms;
int nid = pgdat->node_id;
if (pgdat_start_pfn == start_pfn) {
@@ -484,17 +480,15 @@ static void shrink_pgdat_span(struct pglist_data *pgdat,
* has only hole or not.
*/
pfn = pgdat_start_pfn;
- for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
- ms = __pfn_to_section(pfn);
-
- if (unlikely(!valid_section(ms)))
+ for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SUBSECTION) {
+ if (unlikely(!pfn_valid(pfn)))
continue;
if (pfn_to_nid(pfn) != nid)
continue;
- /* If the section is current section, it continues the loop */
- if (start_pfn == pfn)
+ /* Skip range to be removed */
+ if (pfn >= start_pfn && pfn < end_pfn)
continue;
/* If we find valid section, we have nothing to do */
@@ -506,10 +500,10 @@ static void shrink_pgdat_span(struct pglist_data *pgdat,
pgdat->node_spanned_pages = 0;
}
-static void __remove_zone(struct zone *zone, unsigned long start_pfn)
+static void __remove_zone(struct zone *zone, unsigned long start_pfn,
+ unsigned long nr_pages)
{
struct pglist_data *pgdat = zone->zone_pgdat;
- int nr_pages = PAGES_PER_SECTION;
unsigned long flags;
pgdat_resize_lock(zone->zone_pgdat, &flags);
@@ -518,29 +512,23 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn)
pgdat_resize_unlock(zone->zone_pgdat, &flags);
}
-static void __remove_section(struct zone *zone, struct mem_section *ms,
- unsigned long map_offset,
- struct vmem_altmap *altmap)
+static void __remove_section(struct zone *zone, unsigned long pfn,
+ unsigned long nr_pages, unsigned long map_offset,
+ struct vmem_altmap *altmap)
{
- unsigned long start_pfn;
- int scn_nr;
+ struct mem_section *ms = __nr_to_section(pfn_to_section_nr(pfn));
if (WARN_ON_ONCE(!valid_section(ms)))
return;
- unregister_memory_section(ms);
-
- scn_nr = __section_nr(ms);
- start_pfn = section_nr_to_pfn((unsigned long)scn_nr);
- __remove_zone(zone, start_pfn);
-
- sparse_remove_one_section(zone, ms, map_offset, altmap);
+ __remove_zone(zone, pfn, nr_pages);
+ sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap);
}
/**
* __remove_pages() - remove sections of pages from a zone
* @zone: zone from which pages need to be removed
- * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
+ * @pfn: starting pageframe (must be aligned to start of a section)
* @nr_pages: number of pages to remove (must be multiple of section size)
* @altmap: alternative device page map or %NULL if default memmap is used
*
@@ -549,38 +537,35 @@ static void __remove_section(struct zone *zone, struct mem_section *ms,
* sure that pages are marked reserved and zones are adjust properly by
* calling offline_pages().
*/
-void __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
+void __remove_pages(struct zone *zone, unsigned long pfn,
unsigned long nr_pages, struct vmem_altmap *altmap)
{
- unsigned long i;
unsigned long map_offset = 0;
- int sections_to_remove;
+ unsigned long nr, start_sec, end_sec;
- /* In the ZONE_DEVICE case device driver owns the memory region */
- if (is_dev_zone(zone))
- map_offset = vmem_altmap_offset(altmap);
+ map_offset = vmem_altmap_offset(altmap);
clear_zone_contiguous(zone);
- /*
- * We can only remove entire sections
- */
- BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
- BUG_ON(nr_pages % PAGES_PER_SECTION);
+ if (check_pfn_span(pfn, nr_pages, "remove"))
+ return;
- sections_to_remove = nr_pages / PAGES_PER_SECTION;
- for (i = 0; i < sections_to_remove; i++) {
- unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
+ start_sec = pfn_to_section_nr(pfn);
+ end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
+ for (nr = start_sec; nr <= end_sec; nr++) {
+ unsigned long pfns;
cond_resched();
- __remove_section(zone, __pfn_to_section(pfn), map_offset,
- altmap);
+ pfns = min(nr_pages, PAGES_PER_SECTION
+ - (pfn & ~PAGE_SECTION_MASK));
+ __remove_section(zone, pfn, pfns, map_offset, altmap);
+ pfn += pfns;
+ nr_pages -= pfns;
map_offset = 0;
}
set_zone_contiguous(zone);
}
-#endif /* CONFIG_MEMORY_HOTREMOVE */
int set_online_page_callback(online_page_callback_t callback)
{
@@ -1049,16 +1034,11 @@ int try_online_node(int nid)
static int check_hotplug_memory_range(u64 start, u64 size)
{
- unsigned long block_sz = memory_block_size_bytes();
- u64 block_nr_pages = block_sz >> PAGE_SHIFT;
- u64 nr_pages = size >> PAGE_SHIFT;
- u64 start_pfn = PFN_DOWN(start);
-
/* memory range must be block size aligned */
- if (!nr_pages || !IS_ALIGNED(start_pfn, block_nr_pages) ||
- !IS_ALIGNED(nr_pages, block_nr_pages)) {
+ if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) ||
+ !IS_ALIGNED(size, memory_block_size_bytes())) {
pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx",
- block_sz, start, size);
+ memory_block_size_bytes(), start, size);
return -EINVAL;
}
@@ -1078,9 +1058,7 @@ static int online_memory_block(struct memory_block *mem, void *arg)
*/
int __ref add_memory_resource(int nid, struct resource *res)
{
- struct mhp_restrictions restrictions = {
- .flags = MHP_MEMBLOCK_API,
- };
+ struct mhp_restrictions restrictions = {};
u64 start, size;
bool new_node = false;
int ret;
@@ -1112,6 +1090,13 @@ int __ref add_memory_resource(int nid, struct resource *res)
if (ret < 0)
goto error;
+ /* create memory block devices after memory was added */
+ ret = create_memory_block_devices(start, size);
+ if (ret) {
+ arch_remove_memory(nid, start, size, NULL);
+ goto error;
+ }
+
if (new_node) {
/* If sysfs file of new node can't be created, cpu on the node
* can't be hot-added. There is no rollback way now.
@@ -1135,8 +1120,7 @@ int __ref add_memory_resource(int nid, struct resource *res)
/* online pages if requested */
if (memhp_auto_online)
- walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1),
- NULL, online_memory_block);
+ walk_memory_blocks(start, size, NULL, online_memory_block);
return ret;
error:
@@ -1671,58 +1655,7 @@ int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
{
return __offline_pages(start_pfn, start_pfn + nr_pages);
}
-#endif /* CONFIG_MEMORY_HOTREMOVE */
-/**
- * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
- * @start_pfn: start pfn of the memory range
- * @end_pfn: end pfn of the memory range
- * @arg: argument passed to func
- * @func: callback for each memory section walked
- *
- * This function walks through all present mem sections in range
- * [start_pfn, end_pfn) and call func on each mem section.
- *
- * Returns the return value of func.
- */
-int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
- void *arg, int (*func)(struct memory_block *, void *))
-{
- struct memory_block *mem = NULL;
- struct mem_section *section;
- unsigned long pfn, section_nr;
- int ret;
-
- for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
- section_nr = pfn_to_section_nr(pfn);
- if (!present_section_nr(section_nr))
- continue;
-
- section = __nr_to_section(section_nr);
- /* same memblock? */
- if (mem)
- if ((section_nr >= mem->start_section_nr) &&
- (section_nr <= mem->end_section_nr))
- continue;
-
- mem = find_memory_block_hinted(section, mem);
- if (!mem)
- continue;
-
- ret = func(mem, arg);
- if (ret) {
- kobject_put(&mem->dev.kobj);
- return ret;
- }
- }
-
- if (mem)
- kobject_put(&mem->dev.kobj);
-
- return 0;
-}
-
-#ifdef CONFIG_MEMORY_HOTREMOVE
static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
{
int ret = !is_memblock_offlined(mem);
@@ -1833,8 +1766,7 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
* whether all memory blocks in question are offline and return error
* if this is not the case.
*/
- rc = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
- check_memblock_offlined_cb);
+ rc = walk_memory_blocks(start, size, NULL, check_memblock_offlined_cb);
if (rc)
goto done;
@@ -1843,6 +1775,9 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
memblock_free(start, size);
memblock_remove(start, size);
+ /* remove memory block devices before removing memory */
+ remove_memory_block_devices(start, size);
+
arch_remove_memory(nid, start, size, NULL);
__release_memory_resource(start, size);