summaryrefslogtreecommitdiff
path: root/arch/sparc/include/asm/pgtable_32.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-05-12 12:26:47 -0700
committerDavid S. Miller <davem@davemloft.net>2012-05-12 12:26:47 -0700
commita46d6056f6585b1afefde190ae78ea894d720693 (patch)
tree592accd762468a11a675ea16cce719fafe9bbf2f /arch/sparc/include/asm/pgtable_32.h
parent3d386c0ef60cf4810f6d5c62b637a8fb55ec9b2e (diff)
sparc32: Un-btfixup {pte,pmd,pgd}_clear().
Also we can remove BTFIXUPCALL_SWAPO0G0 as that is no longer used. This was rather amusing, we were setting the btfixup vectors based upon cpu type but all to the same exact generic srmmu routines. Furthermore, we were inconsistently marking the fixup as either BTFIXUPCALL_SWAPO0G0 or BTFIXUPCALL_NORM. What a mess, glad we could untangle this stuff. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/include/asm/pgtable_32.h')
-rw-r--r--arch/sparc/include/asm/pgtable_32.h44
1 files changed, 38 insertions, 6 deletions
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 227118577a94..144ab311c52a 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -119,6 +119,22 @@ extern unsigned long empty_zero_page;
#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
/*
+ * In general all page table modifications should use the V8 atomic
+ * swap instruction. This insures the mmu and the cpu are in sync
+ * with respect to ref/mod bits in the page tables.
+ */
+static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
+{
+ __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
+ return value;
+}
+
+static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval)
+{
+ srmmu_swap((unsigned long *)ptep, pte_val(pteval));
+}
+
+/*
*/
BTFIXUPDEF_CALL_CONST(struct page *, pmd_page, pmd_t)
BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page_vaddr, pgd_t)
@@ -127,7 +143,6 @@ BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page_vaddr, pgd_t)
#define pgd_page_vaddr(pgd) BTFIXUP_CALL(pgd_page_vaddr)(pgd)
BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t)
-BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
static inline int pte_none(pte_t pte)
{
@@ -135,11 +150,19 @@ static inline int pte_none(pte_t pte)
}
#define pte_present(pte) BTFIXUP_CALL(pte_present)(pte)
-#define pte_clear(mm,addr,pte) BTFIXUP_CALL(pte_clear)(pte)
+
+static inline void __pte_clear(pte_t *ptep)
+{
+ srmmu_set_pte(ptep, __pte(0));
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ __pte_clear(ptep);
+}
BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t)
BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t)
-BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
static inline int pmd_none(pmd_t pmd)
{
@@ -148,17 +171,26 @@ static inline int pmd_none(pmd_t pmd)
#define pmd_bad(pmd) BTFIXUP_CALL(pmd_bad)(pmd)
#define pmd_present(pmd) BTFIXUP_CALL(pmd_present)(pmd)
-#define pmd_clear(pmd) BTFIXUP_CALL(pmd_clear)(pmd)
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ int i;
+ for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
+ srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
+}
BTFIXUPDEF_CALL_CONST(int, pgd_none, pgd_t)
BTFIXUPDEF_CALL_CONST(int, pgd_bad, pgd_t)
BTFIXUPDEF_CALL_CONST(int, pgd_present, pgd_t)
-BTFIXUPDEF_CALL(void, pgd_clear, pgd_t *)
#define pgd_none(pgd) BTFIXUP_CALL(pgd_none)(pgd)
#define pgd_bad(pgd) BTFIXUP_CALL(pgd_bad)(pgd)
#define pgd_present(pgd) BTFIXUP_CALL(pgd_present)(pgd)
-#define pgd_clear(pgd) BTFIXUP_CALL(pgd_clear)(pgd)
+
+static inline void pgd_clear(pgd_t *pgdp)
+{
+ srmmu_set_pte((pte_t *)pgdp, __pte(0));
+}
/*
* The following only work if pte_present() is true.