diff options
| -rw-r--r-- | mm/slub.c | 15 |
1 files changed, 12 insertions, 3 deletions
diff --git a/mm/slub.c b/mm/slub.c index 56143bfd1ae3..75e4388d507d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -821,6 +821,15 @@ static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) return *(unsigned int *)p; } +/* + * For debugging context when we want to check if the struct slab pointer + * appears to be valid. + */ +static inline bool validate_slab_ptr(struct slab *slab) +{ + return PageSlab(slab_page(slab)); +} + #ifdef CONFIG_SLUB_DEBUG static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; static DEFINE_SPINLOCK(object_map_lock); @@ -1453,7 +1462,7 @@ static int check_slab(struct kmem_cache *s, struct slab *slab) { int maxobj; - if (!folio_test_slab(slab_folio(slab))) { + if (!validate_slab_ptr(slab)) { slab_err(s, slab, "Not a valid slab page"); return 0; } @@ -1653,7 +1662,7 @@ static noinline bool alloc_debug_processing(struct kmem_cache *s, return true; bad: - if (folio_test_slab(slab_folio(slab))) { + if (validate_slab_ptr(slab)) { /* * If this is a slab page then lets do the best we can * to avoid issues in the future. Marking all objects @@ -2818,7 +2827,7 @@ static void *alloc_single_from_partial(struct kmem_cache *s, slab->inuse++; if (!alloc_debug_processing(s, slab, object, orig_size)) { - if (folio_test_slab(slab_folio(slab))) + if (validate_slab_ptr(slab)) remove_partial(n, slab); return NULL; } |
