summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2025-02-27 09:24:04 -0800
committerAlexei Starovoitov <ast@kernel.org>2025-02-27 09:41:09 -0800
commit93ed6fc268c4cc3f1c2b3718d2beb0aa6d04ddc4 (patch)
treee236a453607f51e48c9c3910f57fae94fb5bbe15 /include/linux
parent2014c95afecee3e76ca4a56956a936e23283f05b (diff)
parentc9eb8102e21e8c4c6fb74d1921d99e4d43713520 (diff)
Merge branch 'bpf-mm-introduce-try_alloc_pages'
Alexei Starovoitov says: ==================== The main motivation is to make alloc page and slab reentrant and remove bpf_mem_alloc. v8->v9: - Squash Vlastimil's fix/feature for localtry_trylock, and udpate commit log as suggested by Sebastian. - Drop _noprof suffix in try_alloc_pages kdoc - rebase v8: https://lore.kernel.org/bpf/20250213033556.9534-1-alexei.starovoitov@gmail.com/ v7->v8: - rebase: s/free_unref_page/free_frozen_page/ v6->v7: - Took Sebastian's patch for localtry_lock_t as-is with minor addition of local_trylock_acquire() for proper LOCKDEP. Kept his authorship. - Adjusted patch 4 to use it. The rest is unchanged. v6: https://lore.kernel.org/bpf/20250124035655.78899-1-alexei.starovoitov@gmail.com/ v5->v6: - Addressed comments from Sebastian, Vlastimil - New approach for local_lock_t in patch 3. Instead of unconditionally increasing local_lock_t size to 4 bytes introduce local_trylock_t and use _Generic() tricks to manipulate active field. - Address stackdepot reentrance issues. alloc part in patch 1 and free part in patch 2. - Inlined mem_cgroup_cancel_charge() in patch 4 since this helper is being removed. - Added Acks. - Dropped failslab, kfence, kmemleak patch. - Improved bpf_map_alloc_pages() in patch 6 a bit to demo intended usage. It will be refactored further. - Considered using __GFP_COMP in try_alloc_pages to simplify free_pages_nolock a bit, but then decided to make it work for all types of pages, since free_pages_nolock() is used by stackdepot and currently it's using non-compound order 2. I felt it's best to leave it as-is and make free_pages_nolock() support all pages. v5: https://lore.kernel.org/all/20250115021746.34691-1-alexei.starovoitov@gmail.com/ v4->v5: - Fixed patch 1 and 4 commit logs and comments per Michal suggestions. Added Acks. - Added patch 6 to make failslab, kfence, kmemleak complaint with trylock mode. It's a prerequisite for reentrant slab patches. v4: https://lore.kernel.org/bpf/20250114021922.92609-1-alexei.starovoitov@gmail.com/ v3->v4: Addressed feedback from Michal and Shakeel: - GFP_TRYLOCK flag is gone. gfpflags_allow_spinning() is used instead. - Improved comments and commit logs. v3: https://lore.kernel.org/bpf/20241218030720.1602449-1-alexei.starovoitov@gmail.com/ v2->v3: To address the issues spotted by Sebastian, Vlastimil, Steven: - Made GFP_TRYLOCK internal to mm/internal.h try_alloc_pages() and free_pages_nolock() are the only interfaces. - Since spin_trylock() is not safe in RT from hard IRQ and NMI disable such usage in lock_trylock and in try_alloc_pages(). In such case free_pages_nolock() falls back to llist right away. - Process trylock_free_pages llist when preemptible. - Check for things like unaccepted memory and order <= 3 early. - Don't call into __alloc_pages_slowpath() at all. - Inspired by Vlastimil's struct local_tryirq_lock adopted it in local_lock_t. Extra 4 bytes in !RT in local_lock_t shouldn't affect any of the current local_lock_t users. This is patch 3. - Tested with bpf selftests in RT and !RT and realized how much more work is necessary on bpf side to play nice with RT. The urgency of this work got higher. The alternative is to convert bpf bits left and right to bpf_mem_alloc. v2: https://lore.kernel.org/bpf/20241210023936.46871-1-alexei.starovoitov@gmail.com/ v1->v2: - fixed buggy try_alloc_pages_noprof() in PREEMPT_RT. Thanks Peter. - optimize all paths by doing spin_trylock_irqsave() first and only then check for gfp_flags & __GFP_TRYLOCK. Then spin_lock_irqsave() if it's a regular mode. So new gfp flag will not add performance overhead. - patches 2-5 are new. They introduce lockless and/or trylock free_pages_nolock() and memcg support. So it's in usable shape for bpf in patch 6. v1: https://lore.kernel.org/bpf/20241116014854.55141-1-alexei.starovoitov@gmail.com/ ==================== Link: https://patch.msgid.link/20250222024427.30294-1-alexei.starovoitov@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bpf.h2
-rw-r--r--include/linux/gfp.h23
-rw-r--r--include/linux/local_lock.h70
-rw-r--r--include/linux/local_lock_internal.h146
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/mmzone.h3
6 files changed, 247 insertions, 1 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f3f50e29d639..e1838a341817 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2348,7 +2348,7 @@ int generic_map_delete_batch(struct bpf_map *map,
struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
-int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
+int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
unsigned long nr_pages, struct page **page_array);
#ifdef CONFIG_MEMCG
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 6bb1a5a7a4ae..ceb226c2e25c 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -39,6 +39,25 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
}
+static inline bool gfpflags_allow_spinning(const gfp_t gfp_flags)
+{
+ /*
+ * !__GFP_DIRECT_RECLAIM -> direct claim is not allowed.
+ * !__GFP_KSWAPD_RECLAIM -> it's not safe to wake up kswapd.
+ * All GFP_* flags including GFP_NOWAIT use one or both flags.
+ * try_alloc_pages() is the only API that doesn't specify either flag.
+ *
+ * This is stronger than GFP_NOWAIT or GFP_ATOMIC because
+ * those are guaranteed to never block on a sleeping lock.
+ * Here we are enforcing that the allocation doesn't ever spin
+ * on any locks (i.e. only trylocks). There is no high level
+ * GFP_$FOO flag for this use in try_alloc_pages() as the
+ * regular page allocator doesn't fully support this
+ * allocation mode.
+ */
+ return !(gfp_flags & __GFP_RECLAIM);
+}
+
#ifdef CONFIG_HIGHMEM
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
#else
@@ -335,6 +354,9 @@ static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
}
#define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__))
+struct page *try_alloc_pages_noprof(int nid, unsigned int order);
+#define try_alloc_pages(...) alloc_hooks(try_alloc_pages_noprof(__VA_ARGS__))
+
extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order);
#define __get_free_pages(...) alloc_hooks(get_free_pages_noprof(__VA_ARGS__))
@@ -357,6 +379,7 @@ __meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mas
__get_free_pages((gfp_mask) | GFP_DMA, (order))
extern void __free_pages(struct page *page, unsigned int order);
+extern void free_pages_nolock(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
#define __free_page(page) __free_pages((page), 0)
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
index 091dc0b6bdfb..1a0bc35839e3 100644
--- a/include/linux/local_lock.h
+++ b/include/linux/local_lock.h
@@ -51,6 +51,76 @@
#define local_unlock_irqrestore(lock, flags) \
__local_unlock_irqrestore(lock, flags)
+/**
+ * localtry_lock_init - Runtime initialize a lock instance
+ */
+#define localtry_lock_init(lock) __localtry_lock_init(lock)
+
+/**
+ * localtry_lock - Acquire a per CPU local lock
+ * @lock: The lock variable
+ */
+#define localtry_lock(lock) __localtry_lock(lock)
+
+/**
+ * localtry_lock_irq - Acquire a per CPU local lock and disable interrupts
+ * @lock: The lock variable
+ */
+#define localtry_lock_irq(lock) __localtry_lock_irq(lock)
+
+/**
+ * localtry_lock_irqsave - Acquire a per CPU local lock, save and disable
+ * interrupts
+ * @lock: The lock variable
+ * @flags: Storage for interrupt flags
+ */
+#define localtry_lock_irqsave(lock, flags) \
+ __localtry_lock_irqsave(lock, flags)
+
+/**
+ * localtry_trylock - Try to acquire a per CPU local lock.
+ * @lock: The lock variable
+ *
+ * The function can be used in any context such as NMI or HARDIRQ. Due to
+ * locking constrains it will _always_ fail to acquire the lock in NMI or
+ * HARDIRQ context on PREEMPT_RT.
+ */
+#define localtry_trylock(lock) __localtry_trylock(lock)
+
+/**
+ * localtry_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
+ * interrupts if acquired
+ * @lock: The lock variable
+ * @flags: Storage for interrupt flags
+ *
+ * The function can be used in any context such as NMI or HARDIRQ. Due to
+ * locking constrains it will _always_ fail to acquire the lock in NMI or
+ * HARDIRQ context on PREEMPT_RT.
+ */
+#define localtry_trylock_irqsave(lock, flags) \
+ __localtry_trylock_irqsave(lock, flags)
+
+/**
+ * local_unlock - Release a per CPU local lock
+ * @lock: The lock variable
+ */
+#define localtry_unlock(lock) __localtry_unlock(lock)
+
+/**
+ * local_unlock_irq - Release a per CPU local lock and enable interrupts
+ * @lock: The lock variable
+ */
+#define localtry_unlock_irq(lock) __localtry_unlock_irq(lock)
+
+/**
+ * localtry_unlock_irqrestore - Release a per CPU local lock and restore
+ * interrupt flags
+ * @lock: The lock variable
+ * @flags: Interrupt flags to restore
+ */
+#define localtry_unlock_irqrestore(lock, flags) \
+ __localtry_unlock_irqrestore(lock, flags)
+
DEFINE_GUARD(local_lock, local_lock_t __percpu*,
local_lock(_T),
local_unlock(_T))
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 8dd71fbbb6d2..67bd13d142fa 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -15,6 +15,11 @@ typedef struct {
#endif
} local_lock_t;
+typedef struct {
+ local_lock_t llock;
+ unsigned int acquired;
+} localtry_lock_t;
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCAL_LOCK_DEBUG_INIT(lockname) \
.dep_map = { \
@@ -31,6 +36,13 @@ static inline void local_lock_acquire(local_lock_t *l)
l->owner = current;
}
+static inline void local_trylock_acquire(local_lock_t *l)
+{
+ lock_map_acquire_try(&l->dep_map);
+ DEBUG_LOCKS_WARN_ON(l->owner);
+ l->owner = current;
+}
+
static inline void local_lock_release(local_lock_t *l)
{
DEBUG_LOCKS_WARN_ON(l->owner != current);
@@ -45,11 +57,13 @@ static inline void local_lock_debug_init(local_lock_t *l)
#else /* CONFIG_DEBUG_LOCK_ALLOC */
# define LOCAL_LOCK_DEBUG_INIT(lockname)
static inline void local_lock_acquire(local_lock_t *l) { }
+static inline void local_trylock_acquire(local_lock_t *l) { }
static inline void local_lock_release(local_lock_t *l) { }
static inline void local_lock_debug_init(local_lock_t *l) { }
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
+#define INIT_LOCALTRY_LOCK(lockname) { .llock = { LOCAL_LOCK_DEBUG_INIT(lockname.llock) }}
#define __local_lock_init(lock) \
do { \
@@ -118,6 +132,104 @@ do { \
#define __local_unlock_nested_bh(lock) \
local_lock_release(this_cpu_ptr(lock))
+/* localtry_lock_t variants */
+
+#define __localtry_lock_init(lock) \
+do { \
+ __local_lock_init(&(lock)->llock); \
+ WRITE_ONCE((lock)->acquired, 0); \
+} while (0)
+
+#define __localtry_lock(lock) \
+ do { \
+ localtry_lock_t *lt; \
+ preempt_disable(); \
+ lt = this_cpu_ptr(lock); \
+ local_lock_acquire(&lt->llock); \
+ WRITE_ONCE(lt->acquired, 1); \
+ } while (0)
+
+#define __localtry_lock_irq(lock) \
+ do { \
+ localtry_lock_t *lt; \
+ local_irq_disable(); \
+ lt = this_cpu_ptr(lock); \
+ local_lock_acquire(&lt->llock); \
+ WRITE_ONCE(lt->acquired, 1); \
+ } while (0)
+
+#define __localtry_lock_irqsave(lock, flags) \
+ do { \
+ localtry_lock_t *lt; \
+ local_irq_save(flags); \
+ lt = this_cpu_ptr(lock); \
+ local_lock_acquire(&lt->llock); \
+ WRITE_ONCE(lt->acquired, 1); \
+ } while (0)
+
+#define __localtry_trylock(lock) \
+ ({ \
+ localtry_lock_t *lt; \
+ bool _ret; \
+ \
+ preempt_disable(); \
+ lt = this_cpu_ptr(lock); \
+ if (!READ_ONCE(lt->acquired)) { \
+ WRITE_ONCE(lt->acquired, 1); \
+ local_trylock_acquire(&lt->llock); \
+ _ret = true; \
+ } else { \
+ _ret = false; \
+ preempt_enable(); \
+ } \
+ _ret; \
+ })
+
+#define __localtry_trylock_irqsave(lock, flags) \
+ ({ \
+ localtry_lock_t *lt; \
+ bool _ret; \
+ \
+ local_irq_save(flags); \
+ lt = this_cpu_ptr(lock); \
+ if (!READ_ONCE(lt->acquired)) { \
+ WRITE_ONCE(lt->acquired, 1); \
+ local_trylock_acquire(&lt->llock); \
+ _ret = true; \
+ } else { \
+ _ret = false; \
+ local_irq_restore(flags); \
+ } \
+ _ret; \
+ })
+
+#define __localtry_unlock(lock) \
+ do { \
+ localtry_lock_t *lt; \
+ lt = this_cpu_ptr(lock); \
+ WRITE_ONCE(lt->acquired, 0); \
+ local_lock_release(&lt->llock); \
+ preempt_enable(); \
+ } while (0)
+
+#define __localtry_unlock_irq(lock) \
+ do { \
+ localtry_lock_t *lt; \
+ lt = this_cpu_ptr(lock); \
+ WRITE_ONCE(lt->acquired, 0); \
+ local_lock_release(&lt->llock); \
+ local_irq_enable(); \
+ } while (0)
+
+#define __localtry_unlock_irqrestore(lock, flags) \
+ do { \
+ localtry_lock_t *lt; \
+ lt = this_cpu_ptr(lock); \
+ WRITE_ONCE(lt->acquired, 0); \
+ local_lock_release(&lt->llock); \
+ local_irq_restore(flags); \
+ } while (0)
+
#else /* !CONFIG_PREEMPT_RT */
/*
@@ -125,8 +237,10 @@ do { \
* critical section while staying preemptible.
*/
typedef spinlock_t local_lock_t;
+typedef spinlock_t localtry_lock_t;
#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
+#define INIT_LOCALTRY_LOCK(lockname) INIT_LOCAL_LOCK(lockname)
#define __local_lock_init(l) \
do { \
@@ -169,4 +283,36 @@ do { \
spin_unlock(this_cpu_ptr((lock))); \
} while (0)
+/* localtry_lock_t variants */
+
+#define __localtry_lock_init(lock) __local_lock_init(lock)
+#define __localtry_lock(lock) __local_lock(lock)
+#define __localtry_lock_irq(lock) __local_lock(lock)
+#define __localtry_lock_irqsave(lock, flags) __local_lock_irqsave(lock, flags)
+#define __localtry_unlock(lock) __local_unlock(lock)
+#define __localtry_unlock_irq(lock) __local_unlock(lock)
+#define __localtry_unlock_irqrestore(lock, flags) __local_unlock_irqrestore(lock, flags)
+
+#define __localtry_trylock(lock) \
+ ({ \
+ int __locked; \
+ \
+ if (in_nmi() | in_hardirq()) { \
+ __locked = 0; \
+ } else { \
+ migrate_disable(); \
+ __locked = spin_trylock(this_cpu_ptr((lock))); \
+ if (!__locked) \
+ migrate_enable(); \
+ } \
+ __locked; \
+ })
+
+#define __localtry_trylock_irqsave(lock, flags) \
+ ({ \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __localtry_trylock(lock); \
+ })
+
#endif /* CONFIG_PREEMPT_RT */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6b27db7f9496..483aa90242cd 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -99,6 +99,10 @@ struct page {
/* Or, free page */
struct list_head buddy_list;
struct list_head pcp_list;
+ struct {
+ struct llist_node pcp_llist;
+ unsigned int order;
+ };
};
/* See page-flags.h for PAGE_MAPPING_FLAGS */
struct address_space *mapping;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9540b41894da..e16939553930 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -972,6 +972,9 @@ struct zone {
/* Primarily protects free_area */
spinlock_t lock;
+ /* Pages to be freed when next trylock succeeds */
+ struct llist_head trylock_free_pages;
+
/* Write-intensive fields used by compaction and vmstats. */
CACHELINE_PADDING(_pad2_);