summaryrefslogtreecommitdiff
path: root/arch/sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-05-11 11:26:10 +0900
committerPaul Mundt <lethal@linux-sh.org>2007-05-14 09:18:34 +0900
commit38c425f69c8d949620384f917e00652eaf390ec9 (patch)
tree0e4cbaf7e1be44c92db0587d77fc8e4f70227db1 /arch/sh
parent0facbe3a34556bbc30333971e32c5430b087fcb1 (diff)
sh: Kill off pmb slab cache destructor.
This is the last remaining slab destructor in the kernel, which we kill off and move the resultant list tracking logic up to the pmb_alloc()/pmb_free() paths. As Christoph Lameter pointed out, it's potentially unsafe to be taking the list lock in the destructor anyways, so this is also more fundamentally correct. With this in place, we're all set for killing off slab destructors from the kernel entirely. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/mm/pmb.c79
1 files changed, 38 insertions, 41 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 02aae06527dc..b6a5a338145b 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -3,7 +3,7 @@
*
* Privileged Space Mapping Buffer (PMB) Support.
*
- * Copyright (C) 2005, 2006 Paul Mundt
+ * Copyright (C) 2005, 2006, 2007 Paul Mundt
*
* P1/P2 Section mapping definitions from map32.h, which was:
*
@@ -68,6 +68,32 @@ static inline unsigned long mk_pmb_data(unsigned int entry)
return mk_pmb_entry(entry) | PMB_DATA;
}
+static DEFINE_SPINLOCK(pmb_list_lock);
+static struct pmb_entry *pmb_list;
+
+static inline void pmb_list_add(struct pmb_entry *pmbe)
+{
+ struct pmb_entry **p, *tmp;
+
+ p = &pmb_list;
+ while ((tmp = *p) != NULL)
+ p = &tmp->next;
+
+ pmbe->next = tmp;
+ *p = pmbe;
+}
+
+static inline void pmb_list_del(struct pmb_entry *pmbe)
+{
+ struct pmb_entry **p, *tmp;
+
+ for (p = &pmb_list; (tmp = *p); p = &tmp->next)
+ if (tmp == pmbe) {
+ *p = tmp->next;
+ return;
+ }
+}
+
struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
unsigned long flags)
{
@@ -81,11 +107,19 @@ struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
pmbe->ppn = ppn;
pmbe->flags = flags;
+ spin_lock_irq(&pmb_list_lock);
+ pmb_list_add(pmbe);
+ spin_unlock_irq(&pmb_list_lock);
+
return pmbe;
}
void pmb_free(struct pmb_entry *pmbe)
{
+ spin_lock_irq(&pmb_list_lock);
+ pmb_list_del(pmbe);
+ spin_unlock_irq(&pmb_list_lock);
+
kmem_cache_free(pmb_cache, pmbe);
}
@@ -167,31 +201,6 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
clear_bit(entry, &pmb_map);
}
-static DEFINE_SPINLOCK(pmb_list_lock);
-static struct pmb_entry *pmb_list;
-
-static inline void pmb_list_add(struct pmb_entry *pmbe)
-{
- struct pmb_entry **p, *tmp;
-
- p = &pmb_list;
- while ((tmp = *p) != NULL)
- p = &tmp->next;
-
- pmbe->next = tmp;
- *p = pmbe;
-}
-
-static inline void pmb_list_del(struct pmb_entry *pmbe)
-{
- struct pmb_entry **p, *tmp;
-
- for (p = &pmb_list; (tmp = *p); p = &tmp->next)
- if (tmp == pmbe) {
- *p = tmp->next;
- return;
- }
-}
static struct {
unsigned long size;
@@ -283,25 +292,14 @@ void pmb_unmap(unsigned long addr)
} while (pmbe);
}
-static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
+static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep,
+ unsigned long flags)
{
struct pmb_entry *pmbe = pmb;
memset(pmb, 0, sizeof(struct pmb_entry));
- spin_lock_irq(&pmb_list_lock);
-
pmbe->entry = PMB_NO_ENTRY;
- pmb_list_add(pmbe);
-
- spin_unlock_irq(&pmb_list_lock);
-}
-
-static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
-{
- spin_lock_irq(&pmb_list_lock);
- pmb_list_del(pmb);
- spin_unlock_irq(&pmb_list_lock);
}
static int __init pmb_init(void)
@@ -312,8 +310,7 @@ static int __init pmb_init(void)
BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
- SLAB_PANIC, pmb_cache_ctor,
- pmb_cache_dtor);
+ SLAB_PANIC, pmb_cache_ctor, NULL);
jump_to_P2();