summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnshuman Khandual <anshuman.khandual@arm.com>2026-03-09 02:57:25 +0000
committerCatalin Marinas <catalin.marinas@arm.com>2026-04-01 18:04:54 +0100
commit95a58852b0e5413b6ef4c93da60a80e89da9986a (patch)
treefc4906d2c1326a54ff2005c4fad2f507e3edbef9
parent48478b9f791376b4b89018d7afdfd06865498f65 (diff)
arm64/mm: Reject memory removal that splits a kernel leaf mapping
Linear and vmemmap mappings that get torn down during a memory hot remove operation might contain leaf level entries on any page table level. If the requested memory range's linear or vmemmap mappings falls within such leaf entries, new mappings need to be created for the remaining memory mapped on the leaf entry earlier, following standard break before make aka BBM rules. But kernel cannot tolerate BBM and hence remapping to fine grained leaves would not be possible on systems without BBML2_NOABORT. Currently memory hot remove operation does not perform such restructuring, and so removing memory ranges that could split a kernel leaf level mapping need to be rejected. While memory_hotplug.c does appear to permit hot removing arbitrary ranges of memory, the higher layers that drive memory_hotplug (e.g. ACPI, virtio, ...) all appear to treat memory as fixed size devices. So it is impossible to hot unplug a different amount than was previously hot plugged, and hence we should never see a rejection in practice, but adding the check makes us robust against a future change. Cc: Will Deacon <will@kernel.org> Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Link: https://lore.kernel.org/all/aWZYXhrT6D2M-7-N@willie-the-truck/ Reviewed-by: David Hildenbrand (Arm) <david@kernel.org> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Suggested-by: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/mm/mmu.c120
1 files changed, 114 insertions, 6 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 5dbf988120c8..5fb9a66f0754 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -2014,6 +2014,107 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
__remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size);
}
+
+static bool addr_splits_kernel_leaf(unsigned long addr)
+{
+ pgd_t *pgdp, pgd;
+ p4d_t *p4dp, p4d;
+ pud_t *pudp, pud;
+ pmd_t *pmdp, pmd;
+ pte_t *ptep, pte;
+
+ /*
+ * If the given address points at a the start address of
+ * a possible leaf, we certainly won't split. Otherwise,
+ * check if we would actually split a leaf by traversing
+ * the page tables further.
+ */
+ if (IS_ALIGNED(addr, PGDIR_SIZE))
+ return false;
+
+ pgdp = pgd_offset_k(addr);
+ pgd = pgdp_get(pgdp);
+ if (!pgd_present(pgd))
+ return false;
+
+ if (IS_ALIGNED(addr, P4D_SIZE))
+ return false;
+
+ p4dp = p4d_offset(pgdp, addr);
+ p4d = p4dp_get(p4dp);
+ if (!p4d_present(p4d))
+ return false;
+
+ if (IS_ALIGNED(addr, PUD_SIZE))
+ return false;
+
+ pudp = pud_offset(p4dp, addr);
+ pud = pudp_get(pudp);
+ if (!pud_present(pud))
+ return false;
+
+ if (pud_leaf(pud))
+ return true;
+
+ if (IS_ALIGNED(addr, CONT_PMD_SIZE))
+ return false;
+
+ pmdp = pmd_offset(pudp, addr);
+ pmd = pmdp_get(pmdp);
+ if (!pmd_present(pmd))
+ return false;
+
+ if (pmd_cont(pmd))
+ return true;
+
+ if (IS_ALIGNED(addr, PMD_SIZE))
+ return false;
+
+ if (pmd_leaf(pmd))
+ return true;
+
+ if (IS_ALIGNED(addr, CONT_PTE_SIZE))
+ return false;
+
+ ptep = pte_offset_kernel(pmdp, addr);
+ pte = __ptep_get(ptep);
+ if (!pte_present(pte))
+ return false;
+
+ if (pte_cont(pte))
+ return true;
+
+ return !IS_ALIGNED(addr, PAGE_SIZE);
+}
+
+static bool can_unmap_without_split(unsigned long pfn, unsigned long nr_pages)
+{
+ unsigned long phys_start, phys_end, start, end;
+
+ phys_start = PFN_PHYS(pfn);
+ phys_end = phys_start + nr_pages * PAGE_SIZE;
+
+ /* PFN range's linear map edges are leaf entry aligned */
+ start = __phys_to_virt(phys_start);
+ end = __phys_to_virt(phys_end);
+ if (addr_splits_kernel_leaf(start) || addr_splits_kernel_leaf(end)) {
+ pr_warn("[%lx %lx] splits a leaf entry in linear map\n",
+ phys_start, phys_end);
+ return false;
+ }
+
+ /* PFN range's vmemmap edges are leaf entry aligned */
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP));
+ start = (unsigned long)pfn_to_page(pfn);
+ end = (unsigned long)pfn_to_page(pfn + nr_pages);
+ if (addr_splits_kernel_leaf(start) || addr_splits_kernel_leaf(end)) {
+ pr_warn("[%lx %lx] splits a leaf entry in vmemmap\n",
+ phys_start, phys_end);
+ return false;
+ }
+ return true;
+}
+
/*
* This memory hotplug notifier helps prevent boot memory from being
* inadvertently removed as it blocks pfn range offlining process in
@@ -2022,8 +2123,11 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
* In future if and when boot memory could be removed, this notifier
* should be dropped and free_hotplug_page_range() should handle any
* reserved pages allocated during boot.
+ *
+ * This also blocks any memory remove that would have caused a split
+ * in leaf entry in kernel linear or vmemmap mapping.
*/
-static int prevent_bootmem_remove_notifier(struct notifier_block *nb,
+static int prevent_memory_remove_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct mem_section *ms;
@@ -2069,11 +2173,15 @@ static int prevent_bootmem_remove_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
}
+
+ if (!can_unmap_without_split(pfn, arg->nr_pages))
+ return NOTIFY_BAD;
+
return NOTIFY_OK;
}
-static struct notifier_block prevent_bootmem_remove_nb = {
- .notifier_call = prevent_bootmem_remove_notifier,
+static struct notifier_block prevent_memory_remove_nb = {
+ .notifier_call = prevent_memory_remove_notifier,
};
/*
@@ -2123,7 +2231,7 @@ static void validate_bootmem_online(void)
}
}
-static int __init prevent_bootmem_remove_init(void)
+static int __init prevent_memory_remove_init(void)
{
int ret = 0;
@@ -2131,13 +2239,13 @@ static int __init prevent_bootmem_remove_init(void)
return ret;
validate_bootmem_online();
- ret = register_memory_notifier(&prevent_bootmem_remove_nb);
+ ret = register_memory_notifier(&prevent_memory_remove_nb);
if (ret)
pr_err("%s: Notifier registration failed %d\n", __func__, ret);
return ret;
}
-early_initcall(prevent_bootmem_remove_init);
+early_initcall(prevent_memory_remove_init);
#endif
pte_t modify_prot_start_ptes(struct vm_area_struct *vma, unsigned long addr,