summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/kasan.h13
-rw-r--r--include/linux/local_lock.h2
-rw-r--r--include/linux/local_lock_internal.h16
-rw-r--r--include/linux/maple_tree.h6
-rw-r--r--include/linux/memcontrol.h12
-rw-r--r--include/linux/rtmutex.h10
-rw-r--r--include/linux/slab.h51
8 files changed, 100 insertions, 12 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 5ebf26fcdcfa..0ceb4e09306c 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -354,7 +354,7 @@ static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
}
#define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__))
-struct page *alloc_pages_nolock_noprof(int nid, unsigned int order);
+struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order);
#define alloc_pages_nolock(...) alloc_hooks(alloc_pages_nolock_noprof(__VA_ARGS__))
extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order);
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index fe5ce9215821..b4973e7c2940 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -200,7 +200,7 @@ static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
}
bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
- bool still_accessible);
+ bool still_accessible, bool no_quarantine);
/**
* kasan_slab_free - Poison, initialize, and quarantine a slab object.
* @object: Object to be freed.
@@ -226,11 +226,13 @@ bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
* @Return true if KASAN took ownership of the object; false otherwise.
*/
static __always_inline bool kasan_slab_free(struct kmem_cache *s,
- void *object, bool init,
- bool still_accessible)
+ void *object, bool init,
+ bool still_accessible,
+ bool no_quarantine)
{
if (kasan_enabled())
- return __kasan_slab_free(s, object, init, still_accessible);
+ return __kasan_slab_free(s, object, init, still_accessible,
+ no_quarantine);
return false;
}
@@ -427,7 +429,8 @@ static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
}
static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
- bool init, bool still_accessible)
+ bool init, bool still_accessible,
+ bool no_quarantine)
{
return false;
}
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
index 2ba846419524..0d91d060e3e9 100644
--- a/include/linux/local_lock.h
+++ b/include/linux/local_lock.h
@@ -66,6 +66,8 @@
*/
#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock))
+#define local_lock_is_locked(lock) __local_lock_is_locked(lock)
+
/**
* local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
* interrupts if acquired
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index d80b5306a2c0..a4dc479157b5 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -17,7 +17,10 @@ typedef struct {
/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
typedef struct {
- local_lock_t llock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+ struct task_struct *owner;
+#endif
u8 acquired;
} local_trylock_t;
@@ -31,7 +34,7 @@ typedef struct {
.owner = NULL,
# define LOCAL_TRYLOCK_DEBUG_INIT(lockname) \
- .llock = { LOCAL_LOCK_DEBUG_INIT((lockname).llock) },
+ LOCAL_LOCK_DEBUG_INIT(lockname)
static inline void local_lock_acquire(local_lock_t *l)
{
@@ -81,7 +84,7 @@ do { \
local_lock_debug_init(lock); \
} while (0)
-#define __local_trylock_init(lock) __local_lock_init(lock.llock)
+#define __local_trylock_init(lock) __local_lock_init((local_lock_t *)lock)
#define __spinlock_nested_bh_init(lock) \
do { \
@@ -162,6 +165,9 @@ do { \
!!tl; \
})
+/* preemption or migration must be disabled before calling __local_lock_is_locked */
+#define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired)
+
#define __local_lock_release(lock) \
do { \
local_trylock_t *tl; \
@@ -282,4 +288,8 @@ do { \
__local_trylock(lock); \
})
+/* migration must be disabled before calling __local_lock_is_locked */
+#define __local_lock_is_locked(__lock) \
+ (rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current)
+
#endif /* CONFIG_PREEMPT_RT */
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index bafe143b1f78..51a64ff23b88 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -442,7 +442,9 @@ struct ma_state {
struct maple_enode *node; /* The node containing this entry */
unsigned long min; /* The minimum index of this node - implied pivot min */
unsigned long max; /* The maximum index of this node - implied pivot max */
- struct maple_alloc *alloc; /* Allocated nodes for this operation */
+ struct slab_sheaf *sheaf; /* Allocated nodes for this operation */
+ struct maple_node *alloc; /* A single allocated node for fast path writes */
+ unsigned long node_request; /* The number of nodes to allocate for this operation */
enum maple_status status; /* The status of the state (active, start, none, etc) */
unsigned char depth; /* depth of tree descent during write */
unsigned char offset;
@@ -490,7 +492,9 @@ struct ma_wr_state {
.status = ma_start, \
.min = 0, \
.max = ULONG_MAX, \
+ .sheaf = NULL, \
.alloc = NULL, \
+ .node_request = 0, \
.mas_flags = 0, \
.store_type = wr_invalid, \
}
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index fb27e3d2fdac..9924f157aae0 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -341,17 +341,25 @@ enum page_memcg_data_flags {
__NR_MEMCG_DATA_FLAGS = (1UL << 2),
};
+#define __OBJEXTS_ALLOC_FAIL MEMCG_DATA_OBJEXTS
#define __FIRST_OBJEXT_FLAG __NR_MEMCG_DATA_FLAGS
#else /* CONFIG_MEMCG */
+#define __OBJEXTS_ALLOC_FAIL (1UL << 0)
#define __FIRST_OBJEXT_FLAG (1UL << 0)
#endif /* CONFIG_MEMCG */
enum objext_flags {
- /* slabobj_ext vector failed to allocate */
- OBJEXTS_ALLOC_FAIL = __FIRST_OBJEXT_FLAG,
+ /*
+ * Use bit 0 with zero other bits to signal that slabobj_ext vector
+ * failed to allocate. The same bit 0 with valid upper bits means
+ * MEMCG_DATA_OBJEXTS.
+ */
+ OBJEXTS_ALLOC_FAIL = __OBJEXTS_ALLOC_FAIL,
+ /* slabobj_ext vector allocated with kmalloc_nolock() */
+ OBJEXTS_NOSPIN_ALLOC = __FIRST_OBJEXT_FLAG,
/* the next bit after the last actual flag */
__NR_OBJEXTS_FLAGS = (__FIRST_OBJEXT_FLAG << 1),
};
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index fa9f1021541e..ede4c6bf6f22 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -44,6 +44,16 @@ static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock)
return READ_ONCE(lock->owner) != NULL;
}
+#ifdef CONFIG_RT_MUTEXES
+#define RT_MUTEX_HAS_WAITERS 1UL
+
+static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
+{
+ unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
+
+ return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
+}
+#endif
extern void rt_mutex_base_init(struct rt_mutex_base *rtb);
/**
diff --git a/include/linux/slab.h b/include/linux/slab.h
index d5a8ab98035c..561597dd2164 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -335,6 +335,37 @@ struct kmem_cache_args {
* %NULL means no constructor.
*/
void (*ctor)(void *);
+ /**
+ * @sheaf_capacity: Enable sheaves of given capacity for the cache.
+ *
+ * With a non-zero value, allocations from the cache go through caching
+ * arrays called sheaves. Each cpu has a main sheaf that's always
+ * present, and a spare sheaf that may be not present. When both become
+ * empty, there's an attempt to replace an empty sheaf with a full sheaf
+ * from the per-node barn.
+ *
+ * When no full sheaf is available, and gfp flags allow blocking, a
+ * sheaf is allocated and filled from slab(s) using bulk allocation.
+ * Otherwise the allocation falls back to the normal operation
+ * allocating a single object from a slab.
+ *
+ * Analogically when freeing and both percpu sheaves are full, the barn
+ * may replace it with an empty sheaf, unless it's over capacity. In
+ * that case a sheaf is bulk freed to slab pages.
+ *
+ * The sheaves do not enforce NUMA placement of objects, so allocations
+ * via kmem_cache_alloc_node() with a node specified other than
+ * NUMA_NO_NODE will bypass them.
+ *
+ * Bulk allocation and free operations also try to use the cpu sheaves
+ * and barn, but fallback to using slab pages directly.
+ *
+ * When slub_debug is enabled for the cache, the sheaf_capacity argument
+ * is ignored.
+ *
+ * %0 means no sheaves will be created.
+ */
+ unsigned int sheaf_capacity;
};
struct kmem_cache *__kmem_cache_create_args(const char *name,
@@ -470,6 +501,7 @@ void * __must_check krealloc_noprof(const void *objp, size_t new_size,
#define krealloc(...) alloc_hooks(krealloc_noprof(__VA_ARGS__))
void kfree(const void *objp);
+void kfree_nolock(const void *objp);
void kfree_sensitive(const void *objp);
size_t __ksize(const void *objp);
@@ -798,6 +830,22 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
int node) __assume_slab_alignment __malloc;
#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
+struct slab_sheaf *
+kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size);
+
+int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf **sheafp, unsigned int size);
+
+void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf);
+
+void *kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *cachep, gfp_t gfp,
+ struct slab_sheaf *sheaf) __assume_slab_alignment __malloc;
+#define kmem_cache_alloc_from_sheaf(...) \
+ alloc_hooks(kmem_cache_alloc_from_sheaf_noprof(__VA_ARGS__))
+
+unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf);
+
/*
* These macros allow declaring a kmem_buckets * parameter alongside size, which
* can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
@@ -910,6 +958,9 @@ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f
}
#define kmalloc(...) alloc_hooks(kmalloc_noprof(__VA_ARGS__))
+void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node);
+#define kmalloc_nolock(...) alloc_hooks(kmalloc_nolock_noprof(__VA_ARGS__))
+
#define kmem_buckets_alloc(_b, _size, _flags) \
alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))