diff options
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r-- | arch/arm/mm/mmu.c | 415 |
1 files changed, 217 insertions, 198 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4722582b17b8..3c67e92f7d59 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -11,18 +11,20 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> -#include <linux/bootmem.h> #include <linux/mman.h> #include <linux/nodemask.h> +#include <linux/memblock.h> +#include <linux/fs.h> #include <asm/cputype.h> -#include <asm/mach-types.h> #include <asm/sections.h> #include <asm/cachetype.h> #include <asm/setup.h> #include <asm/sizes.h> +#include <asm/smp_plat.h> #include <asm/tlb.h> #include <asm/highmem.h> +#include <asm/traps.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -61,7 +63,7 @@ struct cachepolicy { const char policy[16]; unsigned int cr_mask; unsigned int pmd; - unsigned int pte; + pteval_t pte; }; static struct cachepolicy cache_policies[] __initdata = { @@ -99,59 +101,66 @@ static struct cachepolicy cache_policies[] __initdata = { * writebuffer to be turned off. (Note: the write * buffer should not be on and the cache off). */ -static void __init early_cachepolicy(char **p) +static int __init early_cachepolicy(char *p) { int i; for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { int len = strlen(cache_policies[i].policy); - if (memcmp(*p, cache_policies[i].policy, len) == 0) { + if (memcmp(p, cache_policies[i].policy, len) == 0) { cachepolicy = i; cr_alignment &= ~cache_policies[i].cr_mask; cr_no_alignment &= ~cache_policies[i].cr_mask; - *p += len; break; } } if (i == ARRAY_SIZE(cache_policies)) printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); + /* + * This restriction is partly to do with the way we boot; it is + * unpredictable to have memory mapped using two different sets of + * memory attributes (shared, type, and cache attribs). We can not + * change these attributes once the initial assembly has setup the + * page tables. + */ if (cpu_architecture() >= CPU_ARCH_ARMv6) { printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); cachepolicy = CPOLICY_WRITEBACK; } flush_cache_all(); set_cr(cr_alignment); + return 0; } -__early_param("cachepolicy=", early_cachepolicy); +early_param("cachepolicy", early_cachepolicy); -static void __init early_nocache(char **__unused) +static int __init early_nocache(char *__unused) { char *p = "buffered"; printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); - early_cachepolicy(&p); + early_cachepolicy(p); + return 0; } -__early_param("nocache", early_nocache); +early_param("nocache", early_nocache); -static void __init early_nowrite(char **__unused) +static int __init early_nowrite(char *__unused) { char *p = "uncached"; printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); - early_cachepolicy(&p); + early_cachepolicy(p); + return 0; } -__early_param("nowb", early_nowrite); +early_param("nowb", early_nowrite); -static void __init early_ecc(char **p) +static int __init early_ecc(char *p) { - if (memcmp(*p, "on", 2) == 0) { + if (memcmp(p, "on", 2) == 0) ecc_mask = PMD_PROTECTION; - *p += 2; - } else if (memcmp(*p, "off", 3) == 0) { + else if (memcmp(p, "off", 3) == 0) ecc_mask = 0; - *p += 3; - } + return 0; } -__early_param("ecc=", early_ecc); +early_param("ecc", early_ecc); static int __init noalign_setup(char *__unused) { @@ -182,7 +191,7 @@ void adjust_cr(unsigned long mask, unsigned long set) } #endif -#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE +#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE static struct mem_type mem_types[] = { @@ -227,17 +236,19 @@ static struct mem_type mem_types[] = { }, [MT_LOW_VECTORS] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_EXEC, + L_PTE_RDONLY, .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_USER, }, [MT_HIGH_VECTORS] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_USER | L_PTE_EXEC, + L_PTE_USER | L_PTE_RDONLY, .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_USER, }, [MT_MEMORY] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, + .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, @@ -246,9 +257,24 @@ static struct mem_type mem_types[] = { .domain = DOMAIN_KERNEL, }, [MT_MEMORY_NONCACHED] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_MT_BUFFERABLE, + .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, + [MT_MEMORY_DTCM] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_XN, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, + .domain = DOMAIN_KERNEL, + }, + [MT_MEMORY_ITCM] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, + .prot_l1 = PMD_TYPE_TABLE, + .domain = DOMAIN_KERNEL, + }, }; const struct mem_type *get_mem_type(unsigned int type) @@ -282,9 +308,8 @@ static void __init build_mem_type_table(void) cachepolicy = CPOLICY_WRITEBACK; ecc_mask = 0; } -#ifdef CONFIG_SMP - cachepolicy = CPOLICY_WRITEALLOC; -#endif + if (is_smp()) + cachepolicy = CPOLICY_WRITEALLOC; /* * Strip out features not present on earlier architectures. @@ -378,21 +403,22 @@ static void __init build_mem_type_table(void) cp = &cache_policies[cachepolicy]; vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; -#ifndef CONFIG_SMP /* * Only use write-through for non-SMP systems */ - if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) + if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; -#endif /* * Enable CPU-specific coherency if supported. * (Only available on XSC3 at the moment.) */ - if (arch_is_coherent() && cpu_is_xsc3()) + if (arch_is_coherent() && cpu_is_xsc3()) { mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; - + mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; + } /* * ARMv6 and above have extended page tables. */ @@ -405,16 +431,23 @@ static void __init build_mem_type_table(void) mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; -#ifdef CONFIG_SMP - /* - * Mark memory with the "shared" attribute for SMP systems - */ - user_pgprot |= L_PTE_SHARED; - kern_pgprot |= L_PTE_SHARED; - vecs_pgprot |= L_PTE_SHARED; - mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; - mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; -#endif + if (is_smp()) { + /* + * Mark memory with the "shared" attribute + * for SMP systems + */ + user_pgprot |= L_PTE_SHARED; + kern_pgprot |= L_PTE_SHARED; + vecs_pgprot |= L_PTE_SHARED; + mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; + mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; + mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; + mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; + } } /* @@ -445,12 +478,13 @@ static void __init build_mem_type_table(void) pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | - L_PTE_DIRTY | L_PTE_WRITE | - L_PTE_EXEC | kern_pgprot); + L_PTE_DIRTY | kern_pgprot); mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; + mem_types[MT_MEMORY].prot_pte |= kern_pgprot; + mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; mem_types[MT_ROM].prot_sect |= cp->pmd; switch (cp->pmd) { @@ -474,20 +508,43 @@ static void __init build_mem_type_table(void) } } +#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE +pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot) +{ + if (!pfn_valid(pfn)) + return pgprot_noncached(vma_prot); + else if (file->f_flags & O_SYNC) + return pgprot_writecombine(vma_prot); + return vma_prot; +} +EXPORT_SYMBOL(phys_mem_access_prot); +#endif + #define vectors_base() (vectors_high() ? 0xffff0000 : 0) -static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, - unsigned long end, unsigned long pfn, - const struct mem_type *type) +static void __init *early_alloc(unsigned long sz) { - pte_t *pte; + void *ptr = __va(memblock_alloc(sz, sz)); + memset(ptr, 0, sz); + return ptr; +} +static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) +{ if (pmd_none(*pmd)) { - pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); - __pmd_populate(pmd, __pa(pte) | type->prot_l1); + pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); + __pmd_populate(pmd, __pa(pte), prot); } + BUG_ON(pmd_bad(*pmd)); + return pte_offset_kernel(pmd, addr); +} - pte = pte_offset_kernel(pmd, addr); +static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, + unsigned long end, unsigned long pfn, + const struct mem_type *type) +{ + pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); do { set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); pfn++; @@ -495,7 +552,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, } static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, - unsigned long end, unsigned long phys, + unsigned long end, phys_addr_t phys, const struct mem_type *type) { pmd_t *pmd = pmd_offset(pgd, addr); @@ -530,7 +587,8 @@ static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, static void __init create_36bit_mapping(struct map_desc *md, const struct mem_type *type) { - unsigned long phys, addr, length, end; + unsigned long addr, length, end; + phys_addr_t phys; pgd_t *pgd; addr = md->virtual; @@ -592,7 +650,7 @@ static void __init create_36bit_mapping(struct map_desc *md, * offsets, and we take full advantage of sections and * supersections. */ -void __init create_mapping(struct map_desc *md) +static void __init create_mapping(struct map_desc *md) { unsigned long phys, addr, length, end; const struct mem_type *type; @@ -656,16 +714,16 @@ void __init iotable_init(struct map_desc *io_desc, int nr) create_mapping(io_desc + i); } -static unsigned long __initdata vmalloc_reserve = SZ_128M; +static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M); /* * vmalloc=size forces the vmalloc area to be exactly 'size' * bytes. This can be used to increase (or decrease) the vmalloc * area - the default is 128m. */ -static void __init early_vmalloc(char **arg) +static int __init early_vmalloc(char *arg) { - vmalloc_reserve = memparse(*arg, arg); + unsigned long vmalloc_reserve = memparse(arg, NULL); if (vmalloc_reserve < SZ_16M) { vmalloc_reserve = SZ_16M; @@ -680,50 +738,61 @@ static void __init early_vmalloc(char **arg) "vmalloc area is too big, limiting to %luMB\n", vmalloc_reserve >> 20); } + + vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve); + return 0; } -__early_param("vmalloc=", early_vmalloc); +early_param("vmalloc", early_vmalloc); -#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) +static phys_addr_t lowmem_limit __initdata = 0; static void __init sanity_check_meminfo(void) { - int i, j; + int i, j, highmem = 0; + + lowmem_limit = __pa(vmalloc_min - 1) + 1; + memblock_set_current_limit(lowmem_limit); for (i = 0, j = 0; i < meminfo.nr_banks; i++) { struct membank *bank = &meminfo.bank[j]; *bank = meminfo.bank[i]; #ifdef CONFIG_HIGHMEM + if (__va(bank->start) > vmalloc_min || + __va(bank->start) < (void *)PAGE_OFFSET) + highmem = 1; + + bank->highmem = highmem; + /* * Split those memory banks which are partially overlapping * the vmalloc area greatly simplifying things later. */ - if (__va(bank->start) < VMALLOC_MIN && - bank->size > VMALLOC_MIN - __va(bank->start)) { + if (__va(bank->start) < vmalloc_min && + bank->size > vmalloc_min - __va(bank->start)) { if (meminfo.nr_banks >= NR_BANKS) { printk(KERN_CRIT "NR_BANKS too low, " "ignoring high memory\n"); - } else if (cache_is_vipt_aliasing()) { - printk(KERN_CRIT "HIGHMEM is not yet supported " - "with VIPT aliasing cache, " - "ignoring high memory\n"); } else { memmove(bank + 1, bank, (meminfo.nr_banks - i) * sizeof(*bank)); meminfo.nr_banks++; i++; - bank[1].size -= VMALLOC_MIN - __va(bank->start); - bank[1].start = __pa(VMALLOC_MIN - 1) + 1; + bank[1].size -= vmalloc_min - __va(bank->start); + bank[1].start = __pa(vmalloc_min - 1) + 1; + bank[1].highmem = highmem = 1; j++; } - bank->size = VMALLOC_MIN - __va(bank->start); + bank->size = vmalloc_min - __va(bank->start); } #else + bank->highmem = highmem; + /* * Check whether this memory bank would entirely overlap * the vmalloc area. */ - if (__va(bank->start) >= VMALLOC_MIN || + if (__va(bank->start) >= vmalloc_min || __va(bank->start) < (void *)PAGE_OFFSET) { printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " "(vmalloc region overlap).\n", @@ -735,9 +804,9 @@ static void __init sanity_check_meminfo(void) * Check whether this memory bank would partially overlap * the vmalloc area. */ - if (__va(bank->start + bank->size) > VMALLOC_MIN || + if (__va(bank->start + bank->size) > vmalloc_min || __va(bank->start + bank->size) < __va(bank->start)) { - unsigned long newsize = VMALLOC_MIN - __va(bank->start); + unsigned long newsize = vmalloc_min - __va(bank->start); printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " "to -%.8lx (vmalloc region overlap).\n", bank->start, bank->start + bank->size - 1, @@ -747,12 +816,43 @@ static void __init sanity_check_meminfo(void) #endif j++; } +#ifdef CONFIG_HIGHMEM + if (highmem) { + const char *reason = NULL; + + if (cache_is_vipt_aliasing()) { + /* + * Interactions between kmap and other mappings + * make highmem support with aliasing VIPT caches + * rather difficult. + */ + reason = "with VIPT aliasing cache"; + } else if (is_smp() && tlb_ops_need_broadcast()) { + /* + * kmap_high needs to occasionally flush TLB entries, + * however, if the TLB entries need to be broadcast + * we may deadlock: + * kmap_high(irqs off)->flush_all_zero_pkmaps-> + * flush_tlb_kernel_range->smp_call_function_many + * (must not be called with irqs off) + */ + reason = "without hardware TLB ops broadcasting"; + } + if (reason) { + printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", + reason); + while (j > 0 && meminfo.bank[j - 1].highmem) + j--; + } + } +#endif meminfo.nr_banks = j; } static inline void prepare_page_table(void) { unsigned long addr; + phys_addr_t end; /* * Clear out all the mappings below the kernel image. @@ -768,109 +868,39 @@ static inline void prepare_page_table(void) pmd_clear(pmd_off_k(addr)); /* + * Find the end of the first block of lowmem. + */ + end = memblock.memory.regions[0].base + memblock.memory.regions[0].size; + if (end >= lowmem_limit) + end = lowmem_limit; + + /* * Clear out all the kernel space mappings, except for the first * memory bank, up to the end of the vmalloc region. */ - for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0])); + for (addr = __phys_to_virt(end); addr < VMALLOC_END; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); } /* - * Reserve the various regions of node 0 + * Reserve the special regions of memory */ -void __init reserve_node_zero(pg_data_t *pgdat) +void __init arm_mm_memblock_reserve(void) { - unsigned long res_size = 0; - - /* - * Register the kernel text and data with bootmem. - * Note that this can only be in node 0. - */ -#ifdef CONFIG_XIP_KERNEL - reserve_bootmem_node(pgdat, __pa(_data), _end - _data, - BOOTMEM_DEFAULT); -#else - reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext, - BOOTMEM_DEFAULT); -#endif - /* * Reserve the page tables. These are already in use, * and can only be in node 0. */ - reserve_bootmem_node(pgdat, __pa(swapper_pg_dir), - PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT); - - /* - * Hmm... This should go elsewhere, but we really really need to - * stop things allocating the low memory; ideally we need a better - * implementation of GFP_DMA which does not assume that DMA-able - * memory starts at zero. - */ - if (machine_is_integrator() || machine_is_cintegrator()) - res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; - - /* - * These should likewise go elsewhere. They pre-reserve the - * screen memory region at the start of main system memory. - */ - if (machine_is_edb7211()) - res_size = 0x00020000; - if (machine_is_p720t()) - res_size = 0x00014000; - - /* H1940 and RX3715 need to reserve this for suspend */ - - if (machine_is_h1940() || machine_is_rx3715()) { - reserve_bootmem_node(pgdat, 0x30003000, 0x1000, - BOOTMEM_DEFAULT); - reserve_bootmem_node(pgdat, 0x30081000, 0x1000, - BOOTMEM_DEFAULT); - } - - if (machine_is_palmld() || machine_is_palmtx()) { - reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, - BOOTMEM_EXCLUSIVE); - reserve_bootmem_node(pgdat, 0xa0200000, 0x1000, - BOOTMEM_EXCLUSIVE); - } - - if (machine_is_treo680()) { - reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, - BOOTMEM_EXCLUSIVE); - reserve_bootmem_node(pgdat, 0xa2000000, 0x1000, - BOOTMEM_EXCLUSIVE); - } - - if (machine_is_palmt5()) - reserve_bootmem_node(pgdat, 0xa0200000, 0x1000, - BOOTMEM_EXCLUSIVE); - - /* - * U300 - This platform family can share physical memory - * between two ARM cpus, one running Linux and the other - * running another OS. - */ - if (machine_is_u300()) { -#ifdef CONFIG_MACH_U300_SINGLE_RAM -#if ((CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1) == 1) && \ - CONFIG_MACH_U300_2MB_ALIGNMENT_FIX - res_size = 0x00100000; -#endif -#endif - } + memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t)); #ifdef CONFIG_SA1111 /* * Because of the SA1111 DMA bug, we want to preserve our * precious DMA-able memory... */ - res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; + memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); #endif - if (res_size) - reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size, - BOOTMEM_DEFAULT); } /* @@ -884,12 +914,11 @@ static void __init devicemaps_init(struct machine_desc *mdesc) { struct map_desc map; unsigned long addr; - void *vectors; /* * Allocate the vector page early. */ - vectors = alloc_bootmem_low_pages(PAGE_SIZE); + vectors_page = early_alloc(PAGE_SIZE); for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); @@ -929,7 +958,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) * location (0xffff0000). If we aren't using high-vectors, also * create a mapping at the low-vectors virtual address. */ - map.pfn = __phys_to_pfn(virt_to_phys(vectors)); + map.pfn = __phys_to_pfn(virt_to_phys(vectors_page)); map.virtual = 0xffff0000; map.length = PAGE_SIZE; map.type = MT_HIGH_VECTORS; @@ -960,14 +989,35 @@ static void __init devicemaps_init(struct machine_desc *mdesc) static void __init kmap_init(void) { #ifdef CONFIG_HIGHMEM - pmd_t *pmd = pmd_off_k(PKMAP_BASE); - pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); - BUG_ON(!pmd_none(*pmd) || !pte); - __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE); - pkmap_page_table = pte + PTRS_PER_PTE; + pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), + PKMAP_BASE, _PAGE_KERNEL_TABLE); #endif } +static void __init map_lowmem(void) +{ + struct memblock_region *reg; + + /* Map all the lowmem memory banks. */ + for_each_memblock(memory, reg) { + phys_addr_t start = reg->base; + phys_addr_t end = start + reg->size; + struct map_desc map; + + if (end > lowmem_limit) + end = lowmem_limit; + if (start >= end) + break; + + map.pfn = __phys_to_pfn(start); + map.virtual = __phys_to_virt(start); + map.length = end - start; + map.type = MT_MEMORY; + + create_mapping(&map); + } +} + /* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. @@ -979,48 +1029,17 @@ void __init paging_init(struct machine_desc *mdesc) build_mem_type_table(); sanity_check_meminfo(); prepare_page_table(); - bootmem_init(); + map_lowmem(); devicemaps_init(mdesc); kmap_init(); top_pmd = pmd_off_k(0xffff0000); - /* - * allocate the zero page. Note that this always succeeds and - * returns a zeroed result. - */ - zero_page = alloc_bootmem_low_pages(PAGE_SIZE); - empty_zero_page = virt_to_page(zero_page); - flush_dcache_page(empty_zero_page); -} - -/* - * In order to soft-boot, we need to insert a 1:1 mapping in place of - * the user-mode pages. This will then ensure that we have predictable - * results when turning the mmu off - */ -void setup_mm_for_reboot(char mode) -{ - unsigned long base_pmdval; - pgd_t *pgd; - int i; - - if (current->mm && current->mm->pgd) - pgd = current->mm->pgd; - else - pgd = init_mm.pgd; - - base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; - if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) - base_pmdval |= PMD_BIT4; + /* allocate the zero page. */ + zero_page = early_alloc(PAGE_SIZE); - for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { - unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; - pmd_t *pmd; + bootmem_init(); - pmd = pmd_off(pgd, i << PGDIR_SHIFT); - pmd[0] = __pmd(pmdval); - pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); - flush_pmd_entry(pmd); - } + empty_zero_page = virt_to_page(zero_page); + __flush_dcache_page(NULL, empty_zero_page); } |