summaryrefslogtreecommitdiff
path: root/include/linux/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h51
1 files changed, 51 insertions, 0 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 6dc300bac2a1..cf443f064a66 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -335,6 +335,37 @@ struct kmem_cache_args {
* %NULL means no constructor.
*/
void (*ctor)(void *);
+ /**
+ * @sheaf_capacity: Enable sheaves of given capacity for the cache.
+ *
+ * With a non-zero value, allocations from the cache go through caching
+ * arrays called sheaves. Each cpu has a main sheaf that's always
+ * present, and a spare sheaf that may be not present. When both become
+ * empty, there's an attempt to replace an empty sheaf with a full sheaf
+ * from the per-node barn.
+ *
+ * When no full sheaf is available, and gfp flags allow blocking, a
+ * sheaf is allocated and filled from slab(s) using bulk allocation.
+ * Otherwise the allocation falls back to the normal operation
+ * allocating a single object from a slab.
+ *
+ * Analogically when freeing and both percpu sheaves are full, the barn
+ * may replace it with an empty sheaf, unless it's over capacity. In
+ * that case a sheaf is bulk freed to slab pages.
+ *
+ * The sheaves do not enforce NUMA placement of objects, so allocations
+ * via kmem_cache_alloc_node() with a node specified other than
+ * NUMA_NO_NODE will bypass them.
+ *
+ * Bulk allocation and free operations also try to use the cpu sheaves
+ * and barn, but fallback to using slab pages directly.
+ *
+ * When slub_debug is enabled for the cache, the sheaf_capacity argument
+ * is ignored.
+ *
+ * %0 means no sheaves will be created.
+ */
+ unsigned int sheaf_capacity;
};
struct kmem_cache *__kmem_cache_create_args(const char *name,
@@ -474,6 +505,7 @@ void * __must_check krealloc_node_align_noprof(const void *objp, size_t new_size
#define krealloc(...) krealloc_node(__VA_ARGS__, NUMA_NO_NODE)
void kfree(const void *objp);
+void kfree_nolock(const void *objp);
void kfree_sensitive(const void *objp);
size_t __ksize(const void *objp);
@@ -802,6 +834,22 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
int node) __assume_slab_alignment __malloc;
#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
+struct slab_sheaf *
+kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size);
+
+int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf **sheafp, unsigned int size);
+
+void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf);
+
+void *kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *cachep, gfp_t gfp,
+ struct slab_sheaf *sheaf) __assume_slab_alignment __malloc;
+#define kmem_cache_alloc_from_sheaf(...) \
+ alloc_hooks(kmem_cache_alloc_from_sheaf_noprof(__VA_ARGS__))
+
+unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf);
+
/*
* These macros allow declaring a kmem_buckets * parameter alongside size, which
* can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
@@ -914,6 +962,9 @@ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f
}
#define kmalloc(...) alloc_hooks(kmalloc_noprof(__VA_ARGS__))
+void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node);
+#define kmalloc_nolock(...) alloc_hooks(kmalloc_nolock_noprof(__VA_ARGS__))
+
#define kmem_buckets_alloc(_b, _size, _flags) \
alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))