diff options
author | Paul Mackerras <paulus@samba.org> | 2008-03-25 13:31:46 +1100 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-03-25 13:31:46 +1100 |
commit | 16fddf5457d2a7eb5e96ceb016a8f722eca97af6 (patch) | |
tree | ae3083a50c55f1e1a2c83f475d0e8bb2da8d7196 /arch | |
parent | 5492a7e4cba8e38419d489f0865de0a67c737e8a (diff) | |
parent | cc7feea39bed2951cc29af3ad642f39a99dfe8d3 (diff) |
Merge branch 'linux-2.6' into merge
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 11 | ||||
-rw-r--r-- | arch/powerpc/sysdev/bestcomm/bestcomm.c | 8 | ||||
-rw-r--r-- | arch/powerpc/sysdev/ipic.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/ds.c | 3 | ||||
-rw-r--r-- | arch/sparc64/kernel/head.S | 8 | ||||
-rw-r--r-- | arch/sparc64/kernel/process.c | 3 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 17 | ||||
-rw-r--r-- | arch/sparc64/kernel/sys_sparc32.c | 3 | ||||
-rw-r--r-- | arch/sparc64/kernel/trampoline.S | 188 | ||||
-rw-r--r-- | arch/sparc64/mm/init.c | 38 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 6 |
11 files changed, 112 insertions, 175 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 590f1f67c874..a83dfa3cf40c 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -351,9 +351,14 @@ static void __init htab_init_page_sizes(void) mmu_vmalloc_psize = MMU_PAGE_64K; if (mmu_linear_psize == MMU_PAGE_4K) mmu_linear_psize = MMU_PAGE_64K; - if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) - mmu_io_psize = MMU_PAGE_64K; - else + if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) { + /* + * Don't use 64k pages for ioremap on pSeries, since + * that would stop us accessing the HEA ethernet. + */ + if (!machine_is(pseries)) + mmu_io_psize = MMU_PAGE_64K; + } else mmu_ci_restrictions = 1; } #endif /* CONFIG_PPC_64K_PAGES */ diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c index f589999361e0..64ec7d629363 100644 --- a/arch/powerpc/sysdev/bestcomm/bestcomm.c +++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c @@ -52,6 +52,10 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size) int i, tasknum = -1; struct bcom_task *tsk; + /* Don't try to do anything if bestcomm init failed */ + if (!bcom_eng) + return NULL; + /* Get and reserve a task num */ spin_lock(&bcom_eng->lock); @@ -484,8 +488,8 @@ mpc52xx_bcom_remove(struct of_device *op) } static struct of_device_id mpc52xx_bcom_of_match[] = { - { .type = "dma-controller", .compatible = "fsl,mpc5200-bestcomm", }, - { .type = "dma-controller", .compatible = "mpc5200-bestcomm", }, + { .compatible = "fsl,mpc5200-bestcomm", }, + { .compatible = "mpc5200-bestcomm", }, {}, }; diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index ae0dbf4c1d66..0f2dfb0aaa6a 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -906,7 +906,7 @@ static int __init init_ipic_sysfs(void) { int rc; - if (!primary_ipic->regs) + if (!primary_ipic || !primary_ipic->regs) return -ENODEV; printk(KERN_DEBUG "Registering ipic with sysfs...\n"); diff --git a/arch/sparc64/kernel/ds.c b/arch/sparc64/kernel/ds.c index bd76482077be..edb74f5a1186 100644 --- a/arch/sparc64/kernel/ds.c +++ b/arch/sparc64/kernel/ds.c @@ -972,8 +972,7 @@ static void process_ds_work(void) LIST_HEAD(todo); spin_lock_irqsave(&ds_lock, flags); - list_splice(&ds_work_list, &todo); - INIT_LIST_HEAD(&ds_work_list); + list_splice_init(&ds_work_list, &todo); spin_unlock_irqrestore(&ds_lock, flags); list_for_each_entry_safe(qp, tmp, &todo, list) { diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index 44b105c04dd3..34f8ff57c56b 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -288,8 +288,12 @@ sun4v_chip_type: /* Leave arg2 as-is, prom_mmu_ihandle_cache */ mov -1, %l3 stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default) - sethi %hi(8 * 1024 * 1024), %l3 - stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: size (8MB) + /* 4MB align the kernel image size. */ + set (_end - KERNBASE), %l3 + set ((4 * 1024 * 1024) - 1), %l4 + add %l3, %l4, %l3 + andn %l3, %l4, %l3 + stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: roundup(ksize, 4MB) sethi %hi(KERNBASE), %l3 stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE) stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index e116e38b160e..acf8c5250aa9 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c @@ -731,9 +731,6 @@ asmlinkage int sparc_execve(struct pt_regs *regs) current_thread_info()->xfsr[0] = 0; current_thread_info()->fpsaved[0] = 0; regs->tstate &= ~TSTATE_PEF; - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); } out: return error; diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index cc454731d879..5a1126b363a4 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -284,14 +284,17 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg) { extern unsigned long sparc64_ttable_tl0; extern unsigned long kern_locked_tte_data; - extern int bigkernel; struct hvtramp_descr *hdesc; unsigned long trampoline_ra; struct trap_per_cpu *tb; u64 tte_vaddr, tte_data; unsigned long hv_err; + int i; - hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL); + hdesc = kzalloc(sizeof(*hdesc) + + (sizeof(struct hvtramp_mapping) * + num_kernel_image_mappings - 1), + GFP_KERNEL); if (!hdesc) { printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " "hvtramp_descr.\n"); @@ -299,7 +302,7 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg) } hdesc->cpu = cpu; - hdesc->num_mappings = (bigkernel ? 2 : 1); + hdesc->num_mappings = num_kernel_image_mappings; tb = &trap_block[cpu]; tb->hdesc = hdesc; @@ -312,13 +315,11 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg) tte_vaddr = (unsigned long) KERNBASE; tte_data = kern_locked_tte_data; - hdesc->maps[0].vaddr = tte_vaddr; - hdesc->maps[0].tte = tte_data; - if (bigkernel) { + for (i = 0; i < hdesc->num_mappings; i++) { + hdesc->maps[i].vaddr = tte_vaddr; + hdesc->maps[i].tte = tte_data; tte_vaddr += 0x400000; tte_data += 0x400000; - hdesc->maps[1].vaddr = tte_vaddr; - hdesc->maps[1].tte = tte_data; } trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c index deaba2bd0535..2455fa498876 100644 --- a/arch/sparc64/kernel/sys_sparc32.c +++ b/arch/sparc64/kernel/sys_sparc32.c @@ -678,9 +678,6 @@ asmlinkage long sparc32_execve(struct pt_regs *regs) current_thread_info()->xfsr[0] = 0; current_thread_info()->fpsaved[0] = 0; regs->tstate &= ~TSTATE_PEF; - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); } out: return error; diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index 4ae2e525d68b..56ff55211341 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S @@ -105,7 +105,7 @@ startup_continue: wr %g2, 0, %tick_cmpr /* Call OBP by hand to lock KERNBASE into i/d tlbs. - * We lock 2 consequetive entries if we are 'bigkernel'. + * We lock 'num_kernel_image_mappings' consequetive entries. */ sethi %hi(prom_entry_lock), %g2 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 @@ -119,6 +119,29 @@ startup_continue: add %l2, -(192 + 128), %sp flushw + /* Setup the loop variables: + * %l3: VADDR base + * %l4: TTE base + * %l5: Loop iterator, iterates from 0 to 'num_kernel_image_mappings' + * %l6: Number of TTE entries to map + * %l7: Highest TTE entry number, we count down + */ + sethi %hi(KERNBASE), %l3 + sethi %hi(kern_locked_tte_data), %l4 + ldx [%l4 + %lo(kern_locked_tte_data)], %l4 + clr %l5 + sethi %hi(num_kernel_image_mappings), %l6 + lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 + add %l6, 1, %l6 + + mov 15, %l7 + BRANCH_IF_ANY_CHEETAH(g1,g5,2f) + + mov 63, %l7 +2: + +3: + /* Lock into I-MMU */ sethi %hi(call_method), %g2 or %g2, %lo(call_method), %g2 stx %g2, [%sp + 2047 + 128 + 0x00] @@ -132,63 +155,26 @@ startup_continue: sethi %hi(prom_mmu_ihandle_cache), %g2 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 stx %g2, [%sp + 2047 + 128 + 0x20] - sethi %hi(KERNBASE), %g2 - stx %g2, [%sp + 2047 + 128 + 0x28] - sethi %hi(kern_locked_tte_data), %g2 - ldx [%g2 + %lo(kern_locked_tte_data)], %g2 - stx %g2, [%sp + 2047 + 128 + 0x30] - - mov 15, %g2 - BRANCH_IF_ANY_CHEETAH(g1,g5,1f) - mov 63, %g2 -1: - stx %g2, [%sp + 2047 + 128 + 0x38] - sethi %hi(p1275buf), %g2 - or %g2, %lo(p1275buf), %g2 - ldx [%g2 + 0x08], %o1 - call %o1 - add %sp, (2047 + 128), %o0 + /* Each TTE maps 4MB, convert index to offset. */ + sllx %l5, 22, %g1 - sethi %hi(bigkernel), %g2 - lduw [%g2 + %lo(bigkernel)], %g2 - brz,pt %g2, do_dtlb - nop + add %l3, %g1, %g2 + stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR + add %l4, %g1, %g2 + stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE - sethi %hi(call_method), %g2 - or %g2, %lo(call_method), %g2 - stx %g2, [%sp + 2047 + 128 + 0x00] - mov 5, %g2 - stx %g2, [%sp + 2047 + 128 + 0x08] - mov 1, %g2 - stx %g2, [%sp + 2047 + 128 + 0x10] - sethi %hi(itlb_load), %g2 - or %g2, %lo(itlb_load), %g2 - stx %g2, [%sp + 2047 + 128 + 0x18] - sethi %hi(prom_mmu_ihandle_cache), %g2 - lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 - stx %g2, [%sp + 2047 + 128 + 0x20] - sethi %hi(KERNBASE + 0x400000), %g2 - stx %g2, [%sp + 2047 + 128 + 0x28] - sethi %hi(kern_locked_tte_data), %g2 - ldx [%g2 + %lo(kern_locked_tte_data)], %g2 - sethi %hi(0x400000), %g1 - add %g2, %g1, %g2 - stx %g2, [%sp + 2047 + 128 + 0x30] - - mov 14, %g2 - BRANCH_IF_ANY_CHEETAH(g1,g5,1f) - - mov 62, %g2 -1: + /* TTE index is highest minus loop index. */ + sub %l7, %l5, %g2 stx %g2, [%sp + 2047 + 128 + 0x38] + sethi %hi(p1275buf), %g2 or %g2, %lo(p1275buf), %g2 ldx [%g2 + 0x08], %o1 call %o1 add %sp, (2047 + 128), %o0 -do_dtlb: + /* Lock into D-MMU */ sethi %hi(call_method), %g2 or %g2, %lo(call_method), %g2 stx %g2, [%sp + 2047 + 128 + 0x00] @@ -202,65 +188,30 @@ do_dtlb: sethi %hi(prom_mmu_ihandle_cache), %g2 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 stx %g2, [%sp + 2047 + 128 + 0x20] - sethi %hi(KERNBASE), %g2 - stx %g2, [%sp + 2047 + 128 + 0x28] - sethi %hi(kern_locked_tte_data), %g2 - ldx [%g2 + %lo(kern_locked_tte_data)], %g2 - stx %g2, [%sp + 2047 + 128 + 0x30] - mov 15, %g2 - BRANCH_IF_ANY_CHEETAH(g1,g5,1f) + /* Each TTE maps 4MB, convert index to offset. */ + sllx %l5, 22, %g1 - mov 63, %g2 -1: + add %l3, %g1, %g2 + stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR + add %l4, %g1, %g2 + stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE + /* TTE index is highest minus loop index. */ + sub %l7, %l5, %g2 stx %g2, [%sp + 2047 + 128 + 0x38] + sethi %hi(p1275buf), %g2 or %g2, %lo(p1275buf), %g2 ldx [%g2 + 0x08], %o1 call %o1 add %sp, (2047 + 128), %o0 - sethi %hi(bigkernel), %g2 - lduw [%g2 + %lo(bigkernel)], %g2 - brz,pt %g2, do_unlock + add %l5, 1, %l5 + cmp %l5, %l6 + bne,pt %xcc, 3b nop - sethi %hi(call_method), %g2 - or %g2, %lo(call_method), %g2 - stx %g2, [%sp + 2047 + 128 + 0x00] - mov 5, %g2 - stx %g2, [%sp + 2047 + 128 + 0x08] - mov 1, %g2 - stx %g2, [%sp + 2047 + 128 + 0x10] - sethi %hi(dtlb_load), %g2 - or %g2, %lo(dtlb_load), %g2 - stx %g2, [%sp + 2047 + 128 + 0x18] - sethi %hi(prom_mmu_ihandle_cache), %g2 - lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 - stx %g2, [%sp + 2047 + 128 + 0x20] - sethi %hi(KERNBASE + 0x400000), %g2 - stx %g2, [%sp + 2047 + 128 + 0x28] - sethi %hi(kern_locked_tte_data), %g2 - ldx [%g2 + %lo(kern_locked_tte_data)], %g2 - sethi %hi(0x400000), %g1 - add %g2, %g1, %g2 - stx %g2, [%sp + 2047 + 128 + 0x30] - - mov 14, %g2 - BRANCH_IF_ANY_CHEETAH(g1,g5,1f) - - mov 62, %g2 -1: - - stx %g2, [%sp + 2047 + 128 + 0x38] - sethi %hi(p1275buf), %g2 - or %g2, %lo(p1275buf), %g2 - ldx [%g2 + 0x08], %o1 - call %o1 - add %sp, (2047 + 128), %o0 - -do_unlock: sethi %hi(prom_entry_lock), %g2 stb %g0, [%g2 + %lo(prom_entry_lock)] membar #StoreStore | #StoreLoad @@ -269,47 +220,36 @@ do_unlock: nop niagara_lock_tlb: + sethi %hi(KERNBASE), %l3 + sethi %hi(kern_locked_tte_data), %l4 + ldx [%l4 + %lo(kern_locked_tte_data)], %l4 + clr %l5 + sethi %hi(num_kernel_image_mappings), %l6 + lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 + add %l6, 1, %l6 + +1: mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 - sethi %hi(KERNBASE), %o0 + sllx %l5, 22, %g2 + add %l3, %g2, %o0 clr %o1 - sethi %hi(kern_locked_tte_data), %o2 - ldx [%o2 + %lo(kern_locked_tte_data)], %o2 + add %l4, %g2, %o2 mov HV_MMU_IMMU, %o3 ta HV_FAST_TRAP mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 - sethi %hi(KERNBASE), %o0 + sllx %l5, 22, %g2 + add %l3, %g2, %o0 clr %o1 - sethi %hi(kern_locked_tte_data), %o2 - ldx [%o2 + %lo(kern_locked_tte_data)], %o2 + add %l4, %g2, %o2 mov HV_MMU_DMMU, %o3 ta HV_FAST_TRAP - sethi %hi(bigkernel), %g2 - lduw [%g2 + %lo(bigkernel)], %g2 - brz,pt %g2, after_lock_tlb + add %l5, 1, %l5 + cmp %l5, %l6 + bne,pt %xcc, 1b nop - mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 - sethi %hi(KERNBASE + 0x400000), %o0 - clr %o1 - sethi %hi(kern_locked_tte_data), %o2 - ldx [%o2 + %lo(kern_locked_tte_data)], %o2 - sethi %hi(0x400000), %o3 - add %o2, %o3, %o2 - mov HV_MMU_IMMU, %o3 - ta HV_FAST_TRAP - - mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 - sethi %hi(KERNBASE + 0x400000), %o0 - clr %o1 - sethi %hi(kern_locked_tte_data), %o2 - ldx [%o2 + %lo(kern_locked_tte_data)], %o2 - sethi %hi(0x400000), %o3 - add %o2, %o3, %o2 - mov HV_MMU_DMMU, %o3 - ta HV_FAST_TRAP - after_lock_tlb: wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate wr %g0, 0, %fprs diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index b5c30416fdac..466fd6cffac9 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -166,7 +166,7 @@ unsigned long sparc64_kern_pri_context __read_mostly; unsigned long sparc64_kern_pri_nuc_bits __read_mostly; unsigned long sparc64_kern_sec_context __read_mostly; -int bigkernel = 0; +int num_kernel_image_mappings; #ifdef CONFIG_DEBUG_DCFLUSH atomic_t dcpage_flushes = ATOMIC_INIT(0); @@ -572,7 +572,7 @@ static unsigned long kern_large_tte(unsigned long paddr); static void __init remap_kernel(void) { unsigned long phys_page, tte_vaddr, tte_data; - int tlb_ent = sparc64_highest_locked_tlbent(); + int i, tlb_ent = sparc64_highest_locked_tlbent(); tte_vaddr = (unsigned long) KERNBASE; phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; @@ -582,27 +582,20 @@ static void __init remap_kernel(void) /* Now lock us into the TLBs via Hypervisor or OBP. */ if (tlb_type == hypervisor) { - hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); - hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); - if (bigkernel) { - tte_vaddr += 0x400000; - tte_data += 0x400000; + for (i = 0; i < num_kernel_image_mappings; i++) { hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); + tte_vaddr += 0x400000; + tte_data += 0x400000; } } else { - prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); - prom_itlb_load(tlb_ent, tte_data, tte_vaddr); - if (bigkernel) { - tlb_ent -= 1; - prom_dtlb_load(tlb_ent, - tte_data + 0x400000, - tte_vaddr + 0x400000); - prom_itlb_load(tlb_ent, - tte_data + 0x400000, - tte_vaddr + 0x400000); + for (i = 0; i < num_kernel_image_mappings; i++) { + prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); + prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); + tte_vaddr += 0x400000; + tte_data += 0x400000; } - sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; + sparc64_highest_unlocked_tlb_ent = tlb_ent - i; } if (tlb_type == cheetah_plus) { sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | @@ -1352,12 +1345,9 @@ void __init paging_init(void) shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); real_end = (unsigned long)_end; - if ((real_end > ((unsigned long)KERNBASE + 0x400000))) - bigkernel = 1; - if ((real_end > ((unsigned long)KERNBASE + 0x800000))) { - prom_printf("paging_init: Kernel > 8MB, too large.\n"); - prom_halt(); - } + num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); + printk("Kernel: Using %d locked TLB entries for main kernel image.\n", + num_kernel_image_mappings); /* Set kernel pgd to upper alias so physical page computations * work. diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 8fe576baa148..4afaba0ed722 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -106,7 +106,7 @@ static int ioremap_change_attr(unsigned long vaddr, unsigned long size, * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ -static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, +static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, enum ioremap_mode mode) { unsigned long pfn, offset, last_addr, vaddr; @@ -193,13 +193,13 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, * * Must be freed with iounmap. */ -void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) +void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) { return __ioremap(phys_addr, size, IOR_MODE_UNCACHED); } EXPORT_SYMBOL(ioremap_nocache); -void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size) +void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) { return __ioremap(phys_addr, size, IOR_MODE_CACHED); } |