summaryrefslogtreecommitdiff
path: root/drivers/misc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-12 11:32:37 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-12 11:32:37 -0800
commit4cff5c05e076d2ee4e34122aa956b84a2eaac587 (patch)
tree6717207240b3881d1b48ff7cd86b193506756e6c /drivers/misc
parent541c43310e85dbf35368b43b720c6724bc8ad8ec (diff)
parentfb4ddf2085115ed28dedc427d9491707b476bbfe (diff)
Merge tag 'mm-stable-2026-02-11-19-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton: - "powerpc/64s: do not re-activate batched TLB flush" makes arch_{enter|leave}_lazy_mmu_mode() nest properly (Alexander Gordeev) It adds a generic enter/leave layer and switches architectures to use it. Various hacks were removed in the process. - "zram: introduce compressed data writeback" implements data compression for zram writeback (Richard Chang and Sergey Senozhatsky) - "mm: folio_zero_user: clear page ranges" adds clearing of contiguous page ranges for hugepages. Large improvements during demand faulting are demonstrated (David Hildenbrand) - "memcg cleanups" tidies up some memcg code (Chen Ridong) - "mm/damon: introduce {,max_}nr_snapshots and tracepoint for damos stats" improves DAMOS stat's provided information, deterministic control, and readability (SeongJae Park) - "selftests/mm: hugetlb cgroup charging: robustness fixes" fixes a few issues in the hugetlb cgroup charging selftests (Li Wang) - "Fix va_high_addr_switch.sh test failure - again" addresses several issues in the va_high_addr_switch test (Chunyu Hu) - "mm/damon/tests/core-kunit: extend existing test scenarios" improves the KUnit test coverage for DAMON (Shu Anzai) - "mm/khugepaged: fix dirty page handling for MADV_COLLAPSE" fixes a glitch in khugepaged which was causing madvise(MADV_COLLAPSE) to transiently return -EAGAIN (Shivank Garg) - "arch, mm: consolidate hugetlb early reservation" reworks and consolidates a pile of straggly code related to reservation of hugetlb memory from bootmem and creation of CMA areas for hugetlb (Mike Rapoport) - "mm: clean up anon_vma implementation" cleans up the anon_vma implementation in various ways (Lorenzo Stoakes) - "tweaks for __alloc_pages_slowpath()" does a little streamlining of the page allocator's slowpath code (Vlastimil Babka) - "memcg: separate private and public ID namespaces" cleans up the memcg ID code and prevents the internal-only private IDs from being exposed to userspace (Shakeel Butt) - "mm: hugetlb: allocate frozen gigantic folio" cleans up the allocation of frozen folios and avoids some atomic refcount operations (Kefeng Wang) - "mm/damon: advance DAMOS-based LRU sorting" improves DAMOS's movement of memory betewwn the active and inactive LRUs and adds auto-tuning of the ratio-based quotas and of monitoring intervals (SeongJae Park) - "Support page table check on PowerPC" makes CONFIG_PAGE_TABLE_CHECK_ENFORCED work on powerpc (Andrew Donnellan) - "nodemask: align nodes_and{,not} with underlying bitmap ops" makes nodes_and() and nodes_andnot() propagate the return values from the underlying bit operations, enabling some cleanup in calling code (Yury Norov) - "mm/damon: hide kdamond and kdamond_lock from API callers" cleans up some DAMON internal interfaces (SeongJae Park) - "mm/khugepaged: cleanups and scan limit fix" does some cleanup work in khupaged and fixes a scan limit accounting issue (Shivank Garg) - "mm: balloon infrastructure cleanups" goes to town on the balloon infrastructure and its page migration function. Mainly cleanups, also some locking simplification (David Hildenbrand) - "mm/vmscan: add tracepoint and reason for kswapd_failures reset" adds additional tracepoints to the page reclaim code (Jiayuan Chen) - "Replace wq users and add WQ_PERCPU to alloc_workqueue() users" is part of Marco's kernel-wide migration from the legacy workqueue APIs over to the preferred unbound workqueues (Marco Crivellari) - "Various mm kselftests improvements/fixes" provides various unrelated improvements/fixes for the mm kselftests (Kevin Brodsky) - "mm: accelerate gigantic folio allocation" greatly speeds up gigantic folio allocation, mainly by avoiding unnecessary work in pfn_range_valid_contig() (Kefeng Wang) - "selftests/damon: improve leak detection and wss estimation reliability" improves the reliability of two of the DAMON selftests (SeongJae Park) - "mm/damon: cleanup kdamond, damon_call(), damos filter and DAMON_MIN_REGION" does some cleanup work in the core DAMON code (SeongJae Park) - "Docs/mm/damon: update intro, modules, maintainer profile, and misc" performs maintenance work on the DAMON documentation (SeongJae Park) - "mm: add and use vma_assert_stabilised() helper" refactors and cleans up the core VMA code. The main aim here is to be able to use the mmap write lock's lockdep state to perform various assertions regarding the locking which the VMA code requires (Lorenzo Stoakes) - "mm, swap: swap table phase II: unify swapin use" removes some old swap code (swap cache bypassing and swap synchronization) which wasn't working very well. Various other cleanups and simplifications were made. The end result is a 20% speedup in one benchmark (Kairui Song) - "enable PT_RECLAIM on more 64-bit architectures" makes PT_RECLAIM available on 64-bit alpha, loongarch, mips, parisc, and um. Various cleanups were performed along the way (Qi Zheng) * tag 'mm-stable-2026-02-11-19-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (325 commits) mm/memory: handle non-split locks correctly in zap_empty_pte_table() mm: move pte table reclaim code to memory.c mm: make PT_RECLAIM depends on MMU_GATHER_RCU_TABLE_FREE mm: convert __HAVE_ARCH_TLB_REMOVE_TABLE to CONFIG_HAVE_ARCH_TLB_REMOVE_TABLE config um: mm: enable MMU_GATHER_RCU_TABLE_FREE parisc: mm: enable MMU_GATHER_RCU_TABLE_FREE mips: mm: enable MMU_GATHER_RCU_TABLE_FREE LoongArch: mm: enable MMU_GATHER_RCU_TABLE_FREE alpha: mm: enable MMU_GATHER_RCU_TABLE_FREE mm: change mm/pt_reclaim.c to use asm/tlb.h instead of asm-generic/tlb.h mm/damon/stat: remove __read_mostly from memory_idle_ms_percentiles zsmalloc: make common caches global mm: add SPDX id lines to some mm source files mm/zswap: use %pe to print error pointers mm/vmscan: use %pe to print error pointers mm/readahead: fix typo in comment mm: khugepaged: fix NR_FILE_PAGES and NR_SHMEM in collapse_file() mm: refactor vma_map_pages to use vm_insert_pages mm/damon: unify address range representation with damon_addr_range mm/cma: replace snprintf with strscpy in cma_new_area ...
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/vmw_balloon.c105
2 files changed, 30 insertions, 77 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index d7d41b054b98..5cc79d1517af 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -410,7 +410,7 @@ config DS1682
config VMWARE_BALLOON
tristate "VMware Balloon Driver"
depends on VMWARE_VMCI && X86 && HYPERVISOR_GUEST
- select MEMORY_BALLOON
+ select BALLOON
help
This is VMware physical memory management driver which acts
like a "balloon" that can be inflated to reclaim physical pages
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index cc1d18b3df5c..216a16395968 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -29,7 +29,7 @@
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/balloon_compaction.h>
+#include <linux/balloon.h>
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <asm/hypervisor.h>
@@ -354,11 +354,16 @@ struct vmballoon {
/**
* @huge_pages - list of the inflated 2MB pages.
*
- * Protected by @b_dev_info.pages_lock .
+ * Protected by @huge_pages_lock.
*/
struct list_head huge_pages;
/**
+ * @huge_pages_lock: lock for the list of inflated 2MB pages.
+ */
+ spinlock_t huge_pages_lock;
+
+ /**
* @vmci_doorbell.
*
* Protected by @conf_sem.
@@ -987,7 +992,6 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b,
unsigned int *n_pages,
enum vmballoon_page_size_type page_size)
{
- unsigned long flags;
struct page *page;
if (page_size == VMW_BALLOON_4K_PAGE) {
@@ -995,9 +999,9 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b,
} else {
/*
* Keep the huge pages in a local list which is not available
- * for the balloon compaction mechanism.
+ * for the balloon page migration.
*/
- spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
+ spin_lock(&b->huge_pages_lock);
list_for_each_entry(page, pages, lru) {
vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
@@ -1006,7 +1010,7 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b,
list_splice_init(pages, &b->huge_pages);
__count_vm_events(BALLOON_INFLATE, *n_pages *
vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
- spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
+ spin_unlock(&b->huge_pages_lock);
}
*n_pages = 0;
@@ -1033,7 +1037,6 @@ static void vmballoon_dequeue_page_list(struct vmballoon *b,
{
struct page *page, *tmp;
unsigned int i = 0;
- unsigned long flags;
/* In the case of 4k pages, use the compaction infrastructure */
if (page_size == VMW_BALLOON_4K_PAGE) {
@@ -1043,7 +1046,7 @@ static void vmballoon_dequeue_page_list(struct vmballoon *b,
}
/* 2MB pages */
- spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
+ spin_lock(&b->huge_pages_lock);
list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
@@ -1054,7 +1057,7 @@ static void vmballoon_dequeue_page_list(struct vmballoon *b,
__count_vm_events(BALLOON_DEFLATE,
i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
- spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
+ spin_unlock(&b->huge_pages_lock);
*n_pages = i;
}
@@ -1716,7 +1719,7 @@ static inline void vmballoon_debugfs_exit(struct vmballoon *b)
#endif /* CONFIG_DEBUG_FS */
-#ifdef CONFIG_BALLOON_COMPACTION
+#ifdef CONFIG_BALLOON_MIGRATION
/**
* vmballoon_migratepage() - migrates a balloon page.
* @b_dev_info: balloon device information descriptor.
@@ -1724,18 +1727,17 @@ static inline void vmballoon_debugfs_exit(struct vmballoon *b)
* @page: a ballooned page that should be migrated.
* @mode: migration mode, ignored.
*
- * This function is really open-coded, but that is according to the interface
- * that balloon_compaction provides.
- *
* Return: zero on success, -EAGAIN when migration cannot be performed
- * momentarily, and -EBUSY if migration failed and should be retried
- * with that specific page.
+ * momentarily, -EBUSY if migration failed and should be retried
+ * with that specific page, and -ENOENT when deflating @page
+ * succeeded but inflating @newpage failed, effectively deflating
+ * the balloon.
*/
static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
struct page *newpage, struct page *page,
enum migrate_mode mode)
{
- unsigned long status, flags;
+ unsigned long status;
struct vmballoon *b;
int ret = 0;
@@ -1773,14 +1775,6 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
goto out_unlock;
}
- /*
- * The page is isolated, so it is safe to delete it without holding
- * @pages_lock . We keep holding @comm_lock since we will need it in a
- * second.
- */
- balloon_page_finalize(page);
- put_page(page);
-
/* Inflate */
vmballoon_add_page(b, 0, newpage);
status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
@@ -1799,60 +1793,21 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
* change.
*/
atomic64_dec(&b->size);
- } else {
/*
- * Success. Take a reference for the page, and we will add it to
- * the list after acquiring the lock.
+ * Tell the core that we're deflating the old page and don't
+ * need the new page.
*/
- get_page(newpage);
- }
-
- /* Update the balloon list under the @pages_lock */
- spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
-
- /*
- * On inflation success, we already took a reference for the @newpage.
- * If we succeed just insert it to the list and update the statistics
- * under the lock.
- */
- if (status == VMW_BALLOON_SUCCESS) {
- balloon_page_insert(&b->b_dev_info, newpage);
- __count_vm_event(BALLOON_MIGRATE);
+ ret = -ENOENT;
}
-
- /*
- * We deflated successfully, so regardless to the inflation success, we
- * need to reduce the number of isolated_pages.
- */
- b->b_dev_info.isolated_pages--;
- spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
-
out_unlock:
up_read(&b->conf_sem);
return ret;
}
-
-/**
- * vmballoon_compaction_init() - initialized compaction for the balloon.
- *
- * @b: pointer to the balloon.
- *
- * If during the initialization a failure occurred, this function does not
- * perform cleanup. The caller must call vmballoon_compaction_deinit() in this
- * case.
- *
- * Return: zero on success or error code on failure.
- */
-static __init void vmballoon_compaction_init(struct vmballoon *b)
-{
- b->b_dev_info.migratepage = vmballoon_migratepage;
-}
-
-#else /* CONFIG_BALLOON_COMPACTION */
-static inline void vmballoon_compaction_init(struct vmballoon *b)
-{
-}
-#endif /* CONFIG_BALLOON_COMPACTION */
+#else /* CONFIG_BALLOON_MIGRATION */
+int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
+ struct page *newpage, struct page *page,
+ enum migrate_mode mode);
+#endif /* CONFIG_BALLOON_MIGRATION */
static int __init vmballoon_init(void)
{
@@ -1871,14 +1826,12 @@ static int __init vmballoon_init(void)
if (error)
return error;
- /*
- * Initialization of compaction must be done after the call to
- * balloon_devinfo_init() .
- */
balloon_devinfo_init(&balloon.b_dev_info);
- vmballoon_compaction_init(&balloon);
+ if (IS_ENABLED(CONFIG_BALLOON_MIGRATION))
+ balloon.b_dev_info.migratepage = vmballoon_migratepage;
INIT_LIST_HEAD(&balloon.huge_pages);
+ spin_lock_init(&balloon.huge_pages_lock);
spin_lock_init(&balloon.comm_lock);
init_rwsem(&balloon.conf_sem);
balloon.vmci_doorbell = VMCI_INVALID_HANDLE;