summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-01-10 13:23:49 +0100
committerPekka Enberg <penberg@kernel.org>2014-01-13 21:34:39 +0200
commitc65c1877bd6826ce0d9713d76e30a7bed8e49f38 (patch)
tree27b07e870c75ac9f393017614bd6a93d0cca9415
parent8afb1474db4701d1ab80cd8251137a3260e6913e (diff)
slub: use lockdep_assert_held
Instead of using comments in an attempt at getting the locking right, use proper assertions that actively warn you if you got it wrong. Also add extra braces in a few sites to comply with coding-style. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--mm/slub.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 89490d9d91e0..367b224f2aa5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -985,23 +985,22 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
/*
* Tracking of fully allocated slabs for debugging purposes.
- *
- * list_lock must be held.
*/
static void add_full(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page)
{
+ lockdep_assert_held(&n->list_lock);
+
if (!(s->flags & SLAB_STORE_USER))
return;
list_add(&page->lru, &n->full);
}
-/*
- * list_lock must be held.
- */
-static void remove_full(struct kmem_cache *s, struct page *page)
+static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
{
+ lockdep_assert_held(&n->list_lock);
+
if (!(s->flags & SLAB_STORE_USER))
return;
@@ -1250,7 +1249,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page,
void *object, u8 val) { return 1; }
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {}
-static inline void remove_full(struct kmem_cache *s, struct page *page) {}
+static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
+ struct page *page) {}
static inline unsigned long kmem_cache_flags(unsigned long object_size,
unsigned long flags, const char *name,
void (*ctor)(void *))
@@ -1504,12 +1504,12 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
/*
* Management of partially allocated slabs.
- *
- * list_lock must be held.
*/
static inline void add_partial(struct kmem_cache_node *n,
struct page *page, int tail)
{
+ lockdep_assert_held(&n->list_lock);
+
n->nr_partial++;
if (tail == DEACTIVATE_TO_TAIL)
list_add_tail(&page->lru, &n->partial);
@@ -1517,12 +1517,11 @@ static inline void add_partial(struct kmem_cache_node *n,
list_add(&page->lru, &n->partial);
}
-/*
- * list_lock must be held.
- */
static inline void remove_partial(struct kmem_cache_node *n,
struct page *page)
{
+ lockdep_assert_held(&n->list_lock);
+
list_del(&page->lru);
n->nr_partial--;
}
@@ -1532,8 +1531,6 @@ static inline void remove_partial(struct kmem_cache_node *n,
* return the pointer to the freelist.
*
* Returns a list of objects or NULL if it fails.
- *
- * Must hold list_lock since we modify the partial list.
*/
static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page,
@@ -1543,6 +1540,8 @@ static inline void *acquire_slab(struct kmem_cache *s,
unsigned long counters;
struct page new;
+ lockdep_assert_held(&n->list_lock);
+
/*
* Zap the freelist and set the frozen bit.
* The old freelist is the list of objects for the
@@ -1887,7 +1886,7 @@ redo:
else if (l == M_FULL)
- remove_full(s, page);
+ remove_full(s, n, page);
if (m == M_PARTIAL) {
@@ -2541,7 +2540,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
new.inuse--;
if ((!new.inuse || !prior) && !was_frozen) {
- if (kmem_cache_has_cpu_partial(s) && !prior)
+ if (kmem_cache_has_cpu_partial(s) && !prior) {
/*
* Slab was on no list before and will be
@@ -2551,7 +2550,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/
new.frozen = 1;
- else { /* Needs to be taken off a list */
+ } else { /* Needs to be taken off a list */
n = get_node(s, page_to_nid(page));
/*
@@ -2600,7 +2599,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
if (kmem_cache_debug(s))
- remove_full(s, page);
+ remove_full(s, n, page);
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
@@ -2614,9 +2613,10 @@ slab_empty:
*/
remove_partial(n, page);
stat(s, FREE_REMOVE_PARTIAL);
- } else
+ } else {
/* Slab must be on the full list */
- remove_full(s, page);
+ remove_full(s, n, page);
+ }
spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, FREE_SLAB);