summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/kmemleak.c22
-rw-r--r--mm/slub.c21
2 files changed, 30 insertions, 13 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 1ac56ceb29b6..95ad827fcd69 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -837,13 +837,12 @@ static void delete_object_full(unsigned long ptr, unsigned int objflags)
struct kmemleak_object *object;
object = find_and_remove_object(ptr, 0, objflags);
- if (!object) {
-#ifdef DEBUG
- kmemleak_warn("Freeing unknown object at 0x%08lx\n",
- ptr);
-#endif
+ if (!object)
+ /*
+ * kmalloc_nolock() -> kfree() calls kmemleak_free()
+ * without kmemleak_alloc().
+ */
return;
- }
__delete_object(object);
}
@@ -926,13 +925,12 @@ static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
struct kmemleak_object *object;
object = __find_and_get_object(ptr, 0, objflags);
- if (!object) {
- kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
- ptr,
- (color == KMEMLEAK_GREY) ? "Grey" :
- (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
+ if (!object)
+ /*
+ * kmalloc_nolock() -> kfree_rcu() calls kmemleak_ignore()
+ * without kmemleak_alloc().
+ */
return;
- }
paint_it(object, color);
put_object(object);
}
diff --git a/mm/slub.c b/mm/slub.c
index 591e41e5acc4..3f64a6b94571 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2585,6 +2585,24 @@ struct rcu_delayed_free {
* Returns true if freeing of the object can proceed, false if its reuse
* was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned
* to KFENCE.
+ *
+ * For objects allocated via kmalloc_nolock(), only a subset of alloc hooks
+ * are invoked, so some free hooks must handle asymmetric hook calls.
+ *
+ * Alloc hooks called for kmalloc_nolock():
+ * - kmsan_slab_alloc()
+ * - kasan_slab_alloc()
+ * - memcg_slab_post_alloc_hook()
+ * - alloc_tagging_slab_alloc_hook()
+ *
+ * Free hooks that must handle missing corresponding alloc hooks:
+ * - kmemleak_free_recursive()
+ * - kfence_free()
+ *
+ * Free hooks that have no alloc hook counterpart, and thus safe to call:
+ * - debug_check_no_locks_freed()
+ * - debug_check_no_obj_freed()
+ * - __kcsan_check_access()
*/
static __always_inline
bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
@@ -6394,7 +6412,7 @@ void kvfree_rcu_cb(struct rcu_head *head)
/**
* kfree - free previously allocated memory
- * @object: pointer returned by kmalloc() or kmem_cache_alloc()
+ * @object: pointer returned by kmalloc(), kmalloc_nolock(), or kmem_cache_alloc()
*
* If @object is NULL, no operation is performed.
*/
@@ -6413,6 +6431,7 @@ void kfree(const void *object)
page = virt_to_page(object);
slab = page_slab(page);
if (!slab) {
+ /* kmalloc_nolock() doesn't support large kmalloc */
free_large_kmalloc(page, (void *)object);
return;
}