summaryrefslogtreecommitdiff
path: root/arch/s390/include/asm/pgtable.h
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2015-02-12 13:08:27 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2015-03-25 11:49:33 +0100
commit5a79859ae0f35d25c67a03e82bf0c80592f16a39 (patch)
tree37264d49f069812f19ced94e6ae171814fb7e498 /arch/s390/include/asm/pgtable.h
parent1833c9f647e9bda1cd24653ff8f9c207b5f5b911 (diff)
s390: remove 31 bit support
Remove the 31 bit support in order to reduce maintenance cost and effectively remove dead code. Since a couple of years there is no distribution left that comes with a 31 bit kernel. The 31 bit kernel also has been broken since more than a year before anybody noticed. In addition I added a removal warning to the kernel shown at ipl for 5 minutes: a960062e5826 ("s390: add 31 bit warning message") which let everybody know about the plan to remove 31 bit code. We didn't get any response. Given that the last 31 bit only machine was introduced in 1999 let's remove the code. Anybody with 31 bit user space code can still use the compat mode. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/pgtable.h')
-rw-r--r--arch/s390/include/asm/pgtable.h125
1 files changed, 5 insertions, 120 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index e08ec38f8c6e..989cfae9e202 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -66,15 +66,9 @@ extern unsigned long zero_page_mask;
* table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
*/
-#ifndef CONFIG_64BIT
-# define PMD_SHIFT 20
-# define PUD_SHIFT 20
-# define PGDIR_SHIFT 20
-#else /* CONFIG_64BIT */
-# define PMD_SHIFT 20
-# define PUD_SHIFT 31
-# define PGDIR_SHIFT 42
-#endif /* CONFIG_64BIT */
+#define PMD_SHIFT 20
+#define PUD_SHIFT 31
+#define PGDIR_SHIFT 42
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
@@ -90,15 +84,8 @@ extern unsigned long zero_page_mask;
* that leads to 1024 pte per pgd
*/
#define PTRS_PER_PTE 256
-#ifndef CONFIG_64BIT
-#define __PAGETABLE_PUD_FOLDED
-#define PTRS_PER_PMD 1
-#define __PAGETABLE_PMD_FOLDED
-#define PTRS_PER_PUD 1
-#else /* CONFIG_64BIT */
#define PTRS_PER_PMD 2048
#define PTRS_PER_PUD 2048
-#endif /* CONFIG_64BIT */
#define PTRS_PER_PGD 2048
#define FIRST_USER_ADDRESS 0UL
@@ -127,23 +114,19 @@ extern struct page *vmemmap;
#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
-#ifdef CONFIG_64BIT
extern unsigned long MODULES_VADDR;
extern unsigned long MODULES_END;
#define MODULES_VADDR MODULES_VADDR
#define MODULES_END MODULES_END
#define MODULES_LEN (1UL << 31)
-#endif
static inline int is_module_addr(void *addr)
{
-#ifdef CONFIG_64BIT
BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
if (addr < (void *)MODULES_VADDR)
return 0;
if (addr > (void *)MODULES_END)
return 0;
-#endif
return 1;
}
@@ -284,56 +267,6 @@ static inline int is_module_addr(void *addr)
* pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
*/
-#ifndef CONFIG_64BIT
-
-/* Bits in the segment table address-space-control-element */
-#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
-#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
-#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
-#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
-#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
-
-/* Bits in the segment table entry */
-#define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */
-#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
-#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
-#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
-#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
-#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
-
-#define _SEGMENT_ENTRY_DIRTY 0 /* No sw dirty bit for 31-bit */
-#define _SEGMENT_ENTRY_YOUNG 0 /* No sw young bit for 31-bit */
-#define _SEGMENT_ENTRY_READ 0 /* No sw read bit for 31-bit */
-#define _SEGMENT_ENTRY_WRITE 0 /* No sw write bit for 31-bit */
-#define _SEGMENT_ENTRY_LARGE 0 /* No large pages for 31-bit */
-#define _SEGMENT_ENTRY_BITS_LARGE 0
-#define _SEGMENT_ENTRY_ORIGIN_LARGE 0
-
-#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
-#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
-
-/*
- * Segment table entry encoding (I = invalid, R = read-only bit):
- * ..R...I.....
- * prot-none ..1...1.....
- * read-only ..1...0.....
- * read-write ..0...0.....
- * empty ..0...1.....
- */
-
-/* Page status table bits for virtualization */
-#define PGSTE_ACC_BITS 0xf0000000UL
-#define PGSTE_FP_BIT 0x08000000UL
-#define PGSTE_PCL_BIT 0x00800000UL
-#define PGSTE_HR_BIT 0x00400000UL
-#define PGSTE_HC_BIT 0x00200000UL
-#define PGSTE_GR_BIT 0x00040000UL
-#define PGSTE_GC_BIT 0x00020000UL
-#define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */
-#define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */
-
-#else /* CONFIG_64BIT */
-
/* Bits in the segment/region table address-space-control-element */
#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
@@ -417,8 +350,6 @@ static inline int is_module_addr(void *addr)
#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
-#endif /* CONFIG_64BIT */
-
/* Guest Page State used for virtualization */
#define _PGSTE_GPS_ZERO 0x0000000080000000UL
#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
@@ -509,19 +440,6 @@ static inline int mm_use_skey(struct mm_struct *mm)
/*
* pgd/pmd/pte query functions
*/
-#ifndef CONFIG_64BIT
-
-static inline int pgd_present(pgd_t pgd) { return 1; }
-static inline int pgd_none(pgd_t pgd) { return 0; }
-static inline int pgd_bad(pgd_t pgd) { return 0; }
-
-static inline int pud_present(pud_t pud) { return 1; }
-static inline int pud_none(pud_t pud) { return 0; }
-static inline int pud_large(pud_t pud) { return 0; }
-static inline int pud_bad(pud_t pud) { return 0; }
-
-#else /* CONFIG_64BIT */
-
static inline int pgd_present(pgd_t pgd)
{
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
@@ -583,8 +501,6 @@ static inline int pud_bad(pud_t pud)
return (pud_val(pud) & mask) != 0;
}
-#endif /* CONFIG_64BIT */
-
static inline int pmd_present(pmd_t pmd)
{
return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
@@ -916,18 +832,14 @@ static inline int pte_unused(pte_t pte)
static inline void pgd_clear(pgd_t *pgd)
{
-#ifdef CONFIG_64BIT
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
-#endif
}
static inline void pud_clear(pud_t *pud)
{
-#ifdef CONFIG_64BIT
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pud_val(*pud) = _REGION3_ENTRY_EMPTY;
-#endif
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -1026,10 +938,6 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
{
unsigned long pto = (unsigned long) ptep;
-#ifndef CONFIG_64BIT
- /* pto in ESA mode must point to the start of the segment table */
- pto &= 0x7ffffc00;
-#endif
/* Invalidation + global TLB flush for the pte */
asm volatile(
" ipte %2,%3"
@@ -1040,10 +948,6 @@ static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
{
unsigned long pto = (unsigned long) ptep;
-#ifndef CONFIG_64BIT
- /* pto in ESA mode must point to the start of the segment table */
- pto &= 0x7ffffc00;
-#endif
/* Invalidation + local TLB flush for the pte */
asm volatile(
" .insn rrf,0xb2210000,%2,%3,0,1"
@@ -1054,10 +958,6 @@ static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
{
unsigned long pto = (unsigned long) ptep;
-#ifndef CONFIG_64BIT
- /* pto in ESA mode must point to the start of the segment table */
- pto &= 0x7ffffc00;
-#endif
/* Invalidate a range of ptes + global TLB flush of the ptes */
do {
asm volatile(
@@ -1376,17 +1276,6 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-#ifndef CONFIG_64BIT
-
-#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
-#define pud_deref(pmd) ({ BUG(); 0UL; })
-#define pgd_deref(pmd) ({ BUG(); 0UL; })
-
-#define pud_offset(pgd, address) ((pud_t *) pgd)
-#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
-
-#else /* CONFIG_64BIT */
-
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
@@ -1407,8 +1296,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
return pmd + pmd_index(address);
}
-#endif /* CONFIG_64BIT */
-
#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -1729,11 +1616,9 @@ static inline int has_transparent_hugepage(void)
* 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
* 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
*/
-#ifndef CONFIG_64BIT
-#define __SWP_OFFSET_MASK (~0UL >> 12)
-#else
+
#define __SWP_OFFSET_MASK (~0UL >> 11)
-#endif
+
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
{
pte_t pte;