diff options
author | Roman Zippel <zippel@linux-m68k.org> | 2007-05-31 00:40:54 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-31 07:58:14 -0700 |
commit | 12d810c1b8c2b913d48e629e2b5c01d105029839 (patch) | |
tree | b39162d3168f6173af3d0e5790e16eb45a70dfaf /arch/m68k/mm | |
parent | 00c541eae7a477e3d1adb1ebf27cccc0bdb5f824 (diff) |
m68k: discontinuous memory support
Fix support for discontinuous memory
Signed-off-by: Roman Zippel <zippel@linux-m68k.org>
Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/m68k/mm')
-rw-r--r-- | arch/m68k/mm/init.c | 119 | ||||
-rw-r--r-- | arch/m68k/mm/memory.c | 73 | ||||
-rw-r--r-- | arch/m68k/mm/motorola.c | 101 |
3 files changed, 147 insertions, 146 deletions
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index ab90213e5c54..f1de19e1dde6 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -7,6 +7,7 @@ * to motorola.c and sun3mmu.c */ +#include <linux/module.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/mm.h> @@ -31,6 +32,37 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); +static bootmem_data_t __initdata bootmem_data[MAX_NUMNODES]; + +pg_data_t pg_data_map[MAX_NUMNODES]; +EXPORT_SYMBOL(pg_data_map); + +int m68k_virt_to_node_shift; + +#ifndef CONFIG_SINGLE_MEMORY_CHUNK +pg_data_t *pg_data_table[65]; +EXPORT_SYMBOL(pg_data_table); +#endif + +void m68k_setup_node(int node) +{ +#ifndef CONFIG_SINGLE_MEMORY_CHUNK + struct mem_info *info = m68k_memory + node; + int i, end; + + i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift(); + end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift(); + for (; i <= end; i++) { + if (pg_data_table[i]) + printk("overlap at %u for chunk %u\n", i, node); + pg_data_table[i] = pg_data_map + node; + } +#endif + pg_data_map[node].bdata = bootmem_data + node; + node_set_online(node); +} + + /* * ZERO_PAGE is a special page that is used for zero-initialized * data and COW. @@ -40,52 +72,51 @@ void *empty_zero_page; void show_mem(void) { - unsigned long i; - int free = 0, total = 0, reserved = 0, shared = 0; - int cached = 0; - - printk("\nMem-info:\n"); - show_free_areas(); - printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); - i = max_mapnr; - while (i-- > 0) { - total++; - if (PageReserved(mem_map+i)) - reserved++; - else if (PageSwapCache(mem_map+i)) - cached++; - else if (!page_count(mem_map+i)) - free++; - else - shared += page_count(mem_map+i) - 1; - } - printk("%d pages of RAM\n",total); - printk("%d free pages\n",free); - printk("%d reserved pages\n",reserved); - printk("%d pages shared\n",shared); - printk("%d pages swap cached\n",cached); + pg_data_t *pgdat; + int free = 0, total = 0, reserved = 0, shared = 0; + int cached = 0; + int i; + + printk("\nMem-info:\n"); + show_free_areas(); + printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); + for_each_online_pgdat(pgdat) { + for (i = 0; i < pgdat->node_spanned_pages; i++) { + struct page *page = pgdat->node_mem_map + i; + total++; + if (PageReserved(page)) + reserved++; + else if (PageSwapCache(page)) + cached++; + else if (!page_count(page)) + free++; + else + shared += page_count(page) - 1; + } + } + printk("%d pages of RAM\n",total); + printk("%d free pages\n",free); + printk("%d reserved pages\n",reserved); + printk("%d pages shared\n",shared); + printk("%d pages swap cached\n",cached); } extern void init_pointer_table(unsigned long ptable); /* References to section boundaries */ -extern char _text, _etext, _edata, __bss_start, _end; -extern char __init_begin, __init_end; +extern char _text[], _etext[]; +extern char __init_begin[], __init_end[]; extern pmd_t *zero_pgtable; void __init mem_init(void) { + pg_data_t *pgdat; int codepages = 0; int datapages = 0; int initpages = 0; - unsigned long tmp; -#ifndef CONFIG_SUN3 int i; -#endif - - max_mapnr = num_physpages = (((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT); #ifdef CONFIG_ATARI if (MACH_IS_ATARI) @@ -93,19 +124,25 @@ void __init mem_init(void) #endif /* this will put all memory onto the freelists */ - totalram_pages = free_all_bootmem(); - - for (tmp = PAGE_OFFSET ; tmp < (unsigned long)high_memory; tmp += PAGE_SIZE) { - if (PageReserved(virt_to_page(tmp))) { - if (tmp >= (unsigned long)&_text - && tmp < (unsigned long)&_etext) + totalram_pages = num_physpages = 0; + for_each_online_pgdat(pgdat) { + num_physpages += pgdat->node_present_pages; + + totalram_pages += free_all_bootmem_node(pgdat); + for (i = 0; i < pgdat->node_spanned_pages; i++) { + struct page *page = pgdat->node_mem_map + i; + char *addr = page_to_virt(page); + + if (!PageReserved(page)) + continue; + if (addr >= _text && + addr < _etext) codepages++; - else if (tmp >= (unsigned long) &__init_begin - && tmp < (unsigned long) &__init_end) + else if (addr >= __init_begin && + addr < __init_end) initpages++; else datapages++; - continue; } } @@ -124,7 +161,7 @@ void __init mem_init(void) printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n", (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), - max_mapnr << (PAGE_SHIFT-10), + totalram_pages << (PAGE_SHIFT-10), codepages << (PAGE_SHIFT-10), datapages << (PAGE_SHIFT-10), initpages << (PAGE_SHIFT-10)); diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c index 13c0b4ad01eb..b7473525b431 100644 --- a/arch/m68k/mm/memory.c +++ b/arch/m68k/mm/memory.c @@ -127,67 +127,6 @@ int free_pointer_table (pmd_t *ptable) return 0; } -#ifdef DEBUG_INVALID_PTOV -int mm_inv_cnt = 5; -#endif - -#ifndef CONFIG_SINGLE_MEMORY_CHUNK -/* - * The following two routines map from a physical address to a kernel - * virtual address and vice versa. - */ -unsigned long mm_vtop(unsigned long vaddr) -{ - int i=0; - unsigned long voff = (unsigned long)vaddr - PAGE_OFFSET; - - do { - if (voff < m68k_memory[i].size) { -#ifdef DEBUGPV - printk ("VTOP(%p)=%lx\n", vaddr, - m68k_memory[i].addr + voff); -#endif - return m68k_memory[i].addr + voff; - } - voff -= m68k_memory[i].size; - } while (++i < m68k_num_memory); - - /* As a special case allow `__pa(high_memory)'. */ - if (voff == 0) - return m68k_memory[i-1].addr + m68k_memory[i-1].size; - - return -1; -} -EXPORT_SYMBOL(mm_vtop); - -unsigned long mm_ptov (unsigned long paddr) -{ - int i = 0; - unsigned long poff, voff = PAGE_OFFSET; - - do { - poff = paddr - m68k_memory[i].addr; - if (poff < m68k_memory[i].size) { -#ifdef DEBUGPV - printk ("PTOV(%lx)=%lx\n", paddr, poff + voff); -#endif - return poff + voff; - } - voff += m68k_memory[i].size; - } while (++i < m68k_num_memory); - -#ifdef DEBUG_INVALID_PTOV - if (mm_inv_cnt > 0) { - mm_inv_cnt--; - printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n", - paddr, __builtin_return_address(0)); - } -#endif - return -1; -} -EXPORT_SYMBOL(mm_ptov); -#endif - /* invalidate page in both caches */ static inline void clear040(unsigned long paddr) { @@ -354,15 +293,3 @@ void cache_push (unsigned long paddr, int len) } EXPORT_SYMBOL(cache_push); -#ifndef CONFIG_SINGLE_MEMORY_CHUNK -int mm_end_of_chunk (unsigned long addr, int len) -{ - int i; - - for (i = 0; i < m68k_num_memory; i++) - if (m68k_memory[i].addr + m68k_memory[i].size == addr + len) - return 1; - return 0; -} -EXPORT_SYMBOL(mm_end_of_chunk); -#endif diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 98ef00547b37..7d571a2b44dd 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -43,6 +43,11 @@ unsigned long mm_cachebits; EXPORT_SYMBOL(mm_cachebits); #endif +/* size of memory already mapped in head.S */ +#define INIT_MAPPED_SIZE (4UL<<20) + +extern unsigned long availmem; + static pte_t * __init kernel_page_table(void) { pte_t *ptablep; @@ -98,19 +103,20 @@ static pmd_t * __init kernel_ptr_table(void) return last_pgtable; } -static unsigned long __init -map_chunk (unsigned long addr, long size) +static void __init map_node(int node) { #define PTRTREESIZE (256*1024) #define ROOTTREESIZE (32*1024*1024) - static unsigned long virtaddr = PAGE_OFFSET; - unsigned long physaddr; + unsigned long physaddr, virtaddr, size; pgd_t *pgd_dir; pmd_t *pmd_dir; pte_t *pte_dir; - physaddr = (addr | m68k_supervisor_cachemode | - _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); + size = m68k_memory[node].size; + physaddr = m68k_memory[node].addr; + virtaddr = (unsigned long)phys_to_virt(physaddr); + physaddr |= m68k_supervisor_cachemode | + _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY; if (CPU_IS_040_OR_060) physaddr |= _PAGE_GLOBAL040; @@ -190,8 +196,6 @@ map_chunk (unsigned long addr, long size) #ifdef DEBUG printk("\n"); #endif - - return virtaddr; } /* @@ -200,15 +204,16 @@ map_chunk (unsigned long addr, long size) */ void __init paging_init(void) { - int chunk; - unsigned long mem_avail = 0; unsigned long zones_size[MAX_NR_ZONES] = { 0, }; + unsigned long min_addr, max_addr; + unsigned long addr, size, end; + int i; #ifdef DEBUG { extern unsigned long availmem; - printk ("start of paging_init (%p, %lx, %lx, %lx)\n", - kernel_pg_dir, availmem, start_mem, end_mem); + printk ("start of paging_init (%p, %lx)\n", + kernel_pg_dir, availmem); } #endif @@ -222,27 +227,62 @@ void __init paging_init(void) pgprot_val(protection_map[i]) |= _PAGE_CACHE040; } + min_addr = m68k_memory[0].addr; + max_addr = min_addr + m68k_memory[0].size; + for (i = 1; i < m68k_num_memory;) { + if (m68k_memory[i].addr < min_addr) { + printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n", + m68k_memory[i].addr, m68k_memory[i].size); + printk("Fix your bootloader or use a memfile to make use of this area!\n"); + m68k_num_memory--; + memmove(m68k_memory + i, m68k_memory + i + 1, + (m68k_num_memory - i) * sizeof(struct mem_info)); + continue; + } + addr = m68k_memory[i].addr + m68k_memory[i].size; + if (addr > max_addr) + max_addr = addr; + i++; + } + m68k_memoffset = min_addr - PAGE_OFFSET; + m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6; + module_fixup(NULL, __start_fixup, __stop_fixup); flush_icache(); + high_memory = phys_to_virt(max_addr); + + min_low_pfn = availmem >> PAGE_SHIFT; + max_low_pfn = max_addr >> PAGE_SHIFT; + + for (i = 0; i < m68k_num_memory; i++) { + addr = m68k_memory[i].addr; + end = addr + m68k_memory[i].size; + m68k_setup_node(i); + availmem = PAGE_ALIGN(availmem); + availmem += init_bootmem_node(NODE_DATA(i), + availmem >> PAGE_SHIFT, + addr >> PAGE_SHIFT, + end >> PAGE_SHIFT); + } + /* * Map the physical memory available into the kernel virtual - * address space. It may allocate some memory for page - * tables and thus modify availmem. + * address space. First initialize the bootmem allocator with + * the memory we already mapped, so map_node() has something + * to allocate. */ + addr = m68k_memory[0].addr; + size = m68k_memory[0].size; + free_bootmem_node(NODE_DATA(0), availmem, min(INIT_MAPPED_SIZE, size) - (availmem - addr)); + map_node(0); + if (size > INIT_MAPPED_SIZE) + free_bootmem_node(NODE_DATA(0), addr + INIT_MAPPED_SIZE, size - INIT_MAPPED_SIZE); - for (chunk = 0; chunk < m68k_num_memory; chunk++) { - mem_avail = map_chunk (m68k_memory[chunk].addr, - m68k_memory[chunk].size); - - } + for (i = 1; i < m68k_num_memory; i++) + map_node(i); flush_tlb_all(); -#ifdef DEBUG - printk ("memory available is %ldKB\n", mem_avail >> 10); - printk ("start_mem is %#lx\nvirtual_end is %#lx\n", - start_mem, end_mem); -#endif /* * initialize the bad page table and bad page to point @@ -259,14 +299,11 @@ void __init paging_init(void) #ifdef DEBUG printk ("before free_area_init\n"); #endif - zones_size[ZONE_DMA] = (mach_max_dma_address < (unsigned long)high_memory ? - (mach_max_dma_address+1) : (unsigned long)high_memory); - zones_size[ZONE_NORMAL] = (unsigned long)high_memory - zones_size[0]; - - zones_size[ZONE_DMA] = (zones_size[ZONE_DMA] - PAGE_OFFSET) >> PAGE_SHIFT; - zones_size[ZONE_NORMAL] >>= PAGE_SHIFT; - - free_area_init(zones_size); + for (i = 0; i < m68k_num_memory; i++) { + zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT; + free_area_init_node(i, pg_data_map + i, zones_size, + m68k_memory[i].addr >> PAGE_SHIFT, NULL); + } } extern char __init_begin, __init_end; |