diff options
Diffstat (limited to 'arch/blackfin/kernel')
-rw-r--r-- | arch/blackfin/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/blackfin/kernel/cplb-mpu/cplbinit.c | 10 | ||||
-rw-r--r-- | arch/blackfin/kernel/cplb-mpu/cplbmgr.c | 36 | ||||
-rw-r--r-- | arch/blackfin/kernel/ipipe.c | 7 | ||||
-rw-r--r-- | arch/blackfin/kernel/irqchip.c | 114 | ||||
-rw-r--r-- | arch/blackfin/kernel/kgdb.c | 297 | ||||
-rw-r--r-- | arch/blackfin/kernel/mcount.S | 70 | ||||
-rw-r--r-- | arch/blackfin/kernel/process.c | 151 | ||||
-rw-r--r-- | arch/blackfin/kernel/setup.c | 122 | ||||
-rw-r--r-- | arch/blackfin/kernel/traps.c | 60 |
10 files changed, 406 insertions, 462 deletions
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile index 3731088e181b..141d9281e4b0 100644 --- a/arch/blackfin/kernel/Makefile +++ b/arch/blackfin/kernel/Makefile @@ -20,7 +20,6 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o CFLAGS_REMOVE_ftrace.o = -pg obj-$(CONFIG_IPIPE) += ipipe.o -obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o obj-$(CONFIG_CPLB_INFO) += cplbinfo.o obj-$(CONFIG_MODULES) += module.o diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c index c006a44527bf..36193eed9a1f 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c @@ -46,13 +46,13 @@ void __init generate_cplb_tables_cpu(unsigned int cpu) printk(KERN_INFO "MPU: setting up cplb tables with memory protection\n"); -#ifdef CONFIG_BFIN_ICACHE +#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE i_cache = CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; #endif -#ifdef CONFIG_BFIN_DCACHE +#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE d_cache = CPLB_L1_CHBL; -#ifdef CONFIG_BFIN_WT +#ifdef CONFIG_BFIN_EXTMEM_WRITETROUGH d_cache |= CPLB_L1_AOW | CPLB_WT; #endif #endif @@ -91,9 +91,9 @@ void __init generate_cplb_tables_cpu(unsigned int cpu) /* Cover L2 memory */ #if L2_LENGTH > 0 dcplb_tbl[cpu][i_d].addr = L2_START; - dcplb_tbl[cpu][i_d++].data = L2_DMEMORY | PAGE_SIZE_1MB; + dcplb_tbl[cpu][i_d++].data = L2_DMEMORY; icplb_tbl[cpu][i_i].addr = L2_START; - icplb_tbl[cpu][i_i++].data = L2_IMEMORY | PAGE_SIZE_1MB; + icplb_tbl[cpu][i_i++].data = L2_IMEMORY; #endif first_mask_dcplb = i_d; diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c index 784923e52a9a..bcdfe9b0b71f 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c @@ -150,15 +150,19 @@ static noinline int dcplb_miss(unsigned int cpu) nr_dcplb_miss[cpu]++; d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; -#ifdef CONFIG_BFIN_DCACHE +#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE if (bfin_addr_dcacheable(addr)) { d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; -#ifdef CONFIG_BFIN_WT +# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH d_data |= CPLB_L1_AOW | CPLB_WT; -#endif +# endif } #endif - if (addr >= physical_mem_end) { + + if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) { + addr = L2_START; + d_data = L2_DMEMORY; + } else if (addr >= physical_mem_end) { if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE && (status & FAULT_USERSUPV)) { addr &= ~0x3fffff; @@ -235,7 +239,7 @@ static noinline int icplb_miss(unsigned int cpu) i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB; -#ifdef CONFIG_BFIN_ICACHE +#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE /* * Normal RAM, and possibly the reserved memory area, are * cacheable. @@ -245,7 +249,10 @@ static noinline int icplb_miss(unsigned int cpu) i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; #endif - if (addr >= physical_mem_end) { + if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) { + addr = L2_START; + i_data = L2_IMEMORY; + } else if (addr >= physical_mem_end) { if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH && (status & FAULT_USERSUPV)) { addr &= ~(1 * 1024 * 1024 - 1); @@ -365,13 +372,18 @@ void set_mask_dcplbs(unsigned long *masks, unsigned int cpu) local_irq_save_hw(flags); current_rwx_mask[cpu] = masks; - d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; -#ifdef CONFIG_BFIN_DCACHE - d_data |= CPLB_L1_CHBL; -#ifdef CONFIG_BFIN_WT - d_data |= CPLB_L1_AOW | CPLB_WT; -#endif + if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) { + addr = L2_START; + d_data = L2_DMEMORY; + } else { + d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; +#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE + d_data |= CPLB_L1_CHBL; +# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH + d_data |= CPLB_L1_AOW | CPLB_WT; +# endif #endif + } disable_dcplb(); for (i = first_mask_dcplb; i < first_switched_dcplb; i++) { diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c index d8cde1fc5cb9..b8d22034b9a6 100644 --- a/arch/blackfin/kernel/ipipe.c +++ b/arch/blackfin/kernel/ipipe.c @@ -52,7 +52,7 @@ EXPORT_SYMBOL(__ipipe_freq_scale); atomic_t __ipipe_irq_lvdepth[IVG15 + 1]; -unsigned long __ipipe_irq_lvmask = __all_masked_irq_flags; +unsigned long __ipipe_irq_lvmask = bfin_no_irqs; EXPORT_SYMBOL(__ipipe_irq_lvmask); static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc) @@ -342,8 +342,3 @@ void ___ipipe_sync_pipeline(unsigned long syncmask) } EXPORT_SYMBOL(show_stack); - -#ifdef CONFIG_IPIPE_TRACE_MCOUNT -void notrace _mcount(void); -EXPORT_SYMBOL(_mcount); -#endif /* CONFIG_IPIPE_TRACE_MCOUNT */ diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index 6e31e935bb31..4b5fd36187d9 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c @@ -38,38 +38,15 @@ #include <asm/pda.h> static atomic_t irq_err_count; -static spinlock_t irq_controller_lock; - -/* - * Dummy mask/unmask handler - */ -void dummy_mask_unmask_irq(unsigned int irq) -{ -} - void ack_bad_irq(unsigned int irq) { atomic_inc(&irq_err_count); printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); } -static struct irq_chip bad_chip = { - .ack = dummy_mask_unmask_irq, - .mask = dummy_mask_unmask_irq, - .unmask = dummy_mask_unmask_irq, -}; - -static int bad_stats; static struct irq_desc bad_irq_desc = { - .status = IRQ_DISABLED, - .chip = &bad_chip, .handle_irq = handle_bad_irq, - .depth = 1, .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), - .kstat_irqs = &bad_stats, -#ifdef CONFIG_SMP - .affinity = CPU_MASK_ALL -#endif }; #ifdef CONFIG_CPUMASK_OFFSTACK @@ -77,6 +54,7 @@ static struct irq_desc bad_irq_desc = { #error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK." #endif +#ifdef CONFIG_PROC_FS int show_interrupts(struct seq_file *p, void *v) { int i = *(loff_t *) v, j; @@ -108,50 +86,29 @@ int show_interrupts(struct seq_file *p, void *v) } return 0; } - -/* - * do_IRQ handles all hardware IRQs. Decoded IRQs should not - * come via this function. Instead, they should provide their - * own 'handler' - */ -#ifdef CONFIG_DO_IRQ_L1 -__attribute__((l1_text)) -#endif -asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) -{ - struct pt_regs *old_regs; - struct irq_desc *desc = irq_desc + irq; -#ifndef CONFIG_IPIPE - unsigned short pending, other_ints; #endif - old_regs = set_irq_regs(regs); - /* - * Some hardware gives randomly wrong interrupts. Rather - * than crashing, do something sensible. - */ - if (irq >= NR_IRQS) - desc = &bad_irq_desc; - - irq_enter(); #ifdef CONFIG_DEBUG_STACKOVERFLOW +static void check_stack_overflow(int irq) +{ /* Debugging check for stack overflow: is there less than STACK_WARN free? */ - { - long sp; - - sp = __get_SP() & (THREAD_SIZE-1); + long sp = __get_SP() & (THREAD_SIZE - 1); - if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { - dump_stack(); - printk(KERN_EMERG "%s: possible stack overflow while handling irq %i " - " only %ld bytes free\n", - __func__, irq, sp - sizeof(struct thread_info)); - } + if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { + dump_stack(); + pr_emerg("irq%i: possible stack overflow only %ld bytes free\n", + irq, sp - sizeof(struct thread_info)); } +} +#else +static inline void check_stack_overflow(int irq) { } #endif - generic_handle_irq(irq); #ifndef CONFIG_IPIPE +static void maybe_lower_to_irq14(void) +{ + unsigned short pending, other_ints; + /* * If we're the only interrupt running (ignoring IRQ15 which * is for syscalls), lower our priority to IRQ14 so that @@ -165,7 +122,38 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) other_ints = pending & (pending - 1); if (other_ints == 0) lower_to_irq14(); -#endif /* !CONFIG_IPIPE */ +} +#else +static inline void maybe_lower_to_irq14(void) { } +#endif + +/* + * do_IRQ handles all hardware IRQs. Decoded IRQs should not + * come via this function. Instead, they should provide their + * own 'handler' + */ +#ifdef CONFIG_DO_IRQ_L1 +__attribute__((l1_text)) +#endif +asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + irq_enter(); + + check_stack_overflow(irq); + + /* + * Some hardware gives randomly wrong interrupts. Rather + * than crashing, do something sensible. + */ + if (irq >= NR_IRQS) + handle_bad_irq(irq, &bad_irq_desc); + else + generic_handle_irq(irq); + + maybe_lower_to_irq14(); + irq_exit(); set_irq_regs(old_regs); @@ -173,14 +161,6 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) void __init init_IRQ(void) { - struct irq_desc *desc; - int irq; - - spin_lock_init(&irq_controller_lock); - for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) { - *desc = bad_irq_desc; - } - init_arch_irq(); #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c index da28f796ad78..cce79d05b90b 100644 --- a/arch/blackfin/kernel/kgdb.c +++ b/arch/blackfin/kernel/kgdb.c @@ -34,15 +34,6 @@ int gdb_bfin_vector = -1; #error change the definition of slavecpulocks #endif -#define IN_MEM(addr, size, l1_addr, l1_size) \ -({ \ - unsigned long __addr = (unsigned long)(addr); \ - (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \ -}) -#define ASYNC_BANK_SIZE \ - (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \ - ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE) - void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) { gdb_regs[BFIN_R0] = regs->r0; @@ -463,41 +454,88 @@ static int hex(char ch) static int validate_memory_access_address(unsigned long addr, int size) { - int cpu = raw_smp_processor_id(); - - if (size < 0) + if (size < 0 || addr == 0) return -EFAULT; - if (addr >= 0x1000 && (addr + size) <= physical_mem_end) - return 0; - if (addr >= SYSMMR_BASE) - return 0; - if (IN_MEM(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK_SIZE)) - return 0; - if (cpu == 0) { - if (IN_MEM(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH)) - return 0; - if (IN_MEM(addr, size, L1_CODE_START, L1_CODE_LENGTH)) - return 0; - if (IN_MEM(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH)) - return 0; - if (IN_MEM(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH)) - return 0; -#ifdef CONFIG_SMP - } else if (cpu == 1) { - if (IN_MEM(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH)) + return bfin_mem_access_type(addr, size); +} + +static int bfin_probe_kernel_read(char *dst, char *src, int size) +{ + unsigned long lsrc = (unsigned long)src; + int mem_type; + + mem_type = validate_memory_access_address(lsrc, size); + if (mem_type < 0) + return mem_type; + + if (lsrc >= SYSMMR_BASE) { + if (size == 2 && lsrc % 2 == 0) { + u16 mmr = bfin_read16(src); + memcpy(dst, &mmr, sizeof(mmr)); return 0; - if (IN_MEM(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH)) + } else if (size == 4 && lsrc % 4 == 0) { + u32 mmr = bfin_read32(src); + memcpy(dst, &mmr, sizeof(mmr)); return 0; - if (IN_MEM(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH)) + } + } else { + switch (mem_type) { + case BFIN_MEM_ACCESS_CORE: + case BFIN_MEM_ACCESS_CORE_ONLY: + return probe_kernel_read(dst, src, size); + /* XXX: should support IDMA here with SMP */ + case BFIN_MEM_ACCESS_DMA: + if (dma_memcpy(dst, src, size)) + return 0; + break; + case BFIN_MEM_ACCESS_ITEST: + if (isram_memcpy(dst, src, size)) + return 0; + break; + } + } + + return -EFAULT; +} + +static int bfin_probe_kernel_write(char *dst, char *src, int size) +{ + unsigned long ldst = (unsigned long)dst; + int mem_type; + + mem_type = validate_memory_access_address(ldst, size); + if (mem_type < 0) + return mem_type; + + if (ldst >= SYSMMR_BASE) { + if (size == 2 && ldst % 2 == 0) { + u16 mmr; + memcpy(&mmr, src, sizeof(mmr)); + bfin_write16(dst, mmr); return 0; - if (IN_MEM(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH)) + } else if (size == 4 && ldst % 4 == 0) { + u32 mmr; + memcpy(&mmr, src, sizeof(mmr)); + bfin_write32(dst, mmr); return 0; -#endif + } + } else { + switch (mem_type) { + case BFIN_MEM_ACCESS_CORE: + case BFIN_MEM_ACCESS_CORE_ONLY: + return probe_kernel_write(dst, src, size); + /* XXX: should support IDMA here with SMP */ + case BFIN_MEM_ACCESS_DMA: + if (dma_memcpy(dst, src, size)) + return 0; + break; + case BFIN_MEM_ACCESS_ITEST: + if (isram_memcpy(dst, src, size)) + return 0; + break; + } } - if (IN_MEM(addr, size, L2_START, L2_LENGTH)) - return 0; - return -EFAULT; } @@ -509,14 +547,6 @@ int kgdb_mem2hex(char *mem, char *buf, int count) { char *tmp; int err; - unsigned char *pch; - unsigned short mmr16; - unsigned long mmr32; - int cpu = raw_smp_processor_id(); - - err = validate_memory_access_address((unsigned long)mem, count); - if (err) - return err; /* * We use the upper half of buf as an intermediate buffer for the @@ -524,44 +554,7 @@ int kgdb_mem2hex(char *mem, char *buf, int count) */ tmp = buf + count; - if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/ - switch (count) { - case 2: - if ((unsigned int)mem % 2 == 0) { - mmr16 = *(unsigned short *)mem; - pch = (unsigned char *)&mmr16; - *tmp++ = *pch++; - *tmp++ = *pch++; - tmp -= 2; - } else - err = -EFAULT; - break; - case 4: - if ((unsigned int)mem % 4 == 0) { - mmr32 = *(unsigned long *)mem; - pch = (unsigned char *)&mmr32; - *tmp++ = *pch++; - *tmp++ = *pch++; - *tmp++ = *pch++; - *tmp++ = *pch++; - tmp -= 4; - } else - err = -EFAULT; - break; - default: - err = -EFAULT; - } - } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH)) -#ifdef CONFIG_SMP - || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH)) -#endif - ) { - /* access L1 instruction SRAM*/ - if (dma_memcpy(tmp, mem, count) == NULL) - err = -EFAULT; - } else - err = probe_kernel_read(tmp, mem, count); - + err = bfin_probe_kernel_read(tmp, mem, count); if (!err) { while (count > 0) { buf = pack_hex_byte(buf, *tmp); @@ -582,13 +575,8 @@ int kgdb_mem2hex(char *mem, char *buf, int count) */ int kgdb_ebin2mem(char *buf, char *mem, int count) { - char *tmp_old; - char *tmp_new; - unsigned short *mmr16; - unsigned long *mmr32; - int err; + char *tmp_old, *tmp_new; int size; - int cpu = raw_smp_processor_id(); tmp_old = tmp_new = buf; @@ -601,41 +589,7 @@ int kgdb_ebin2mem(char *buf, char *mem, int count) tmp_old++; } - err = validate_memory_access_address((unsigned long)mem, size); - if (err) - return err; - - if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/ - switch (size) { - case 2: - if ((unsigned int)mem % 2 == 0) { - mmr16 = (unsigned short *)buf; - *(unsigned short *)mem = *mmr16; - } else - err = -EFAULT; - break; - case 4: - if ((unsigned int)mem % 4 == 0) { - mmr32 = (unsigned long *)buf; - *(unsigned long *)mem = *mmr32; - } else - err = -EFAULT; - break; - default: - err = -EFAULT; - } - } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH)) -#ifdef CONFIG_SMP - || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH)) -#endif - ) { - /* access L1 instruction SRAM */ - if (dma_memcpy(mem, buf, size) == NULL) - err = -EFAULT; - } else - err = probe_kernel_write(mem, buf, size); - - return err; + return bfin_probe_kernel_write(mem, buf, count); } /* @@ -645,16 +599,7 @@ int kgdb_ebin2mem(char *buf, char *mem, int count) */ int kgdb_hex2mem(char *buf, char *mem, int count) { - char *tmp_raw; - char *tmp_hex; - unsigned short *mmr16; - unsigned long *mmr32; - int err; - int cpu = raw_smp_processor_id(); - - err = validate_memory_access_address((unsigned long)mem, count); - if (err) - return err; + char *tmp_raw, *tmp_hex; /* * We use the upper half of buf as an intermediate buffer for the @@ -669,39 +614,18 @@ int kgdb_hex2mem(char *buf, char *mem, int count) *tmp_raw |= hex(*tmp_hex--) << 4; } - if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/ - switch (count) { - case 2: - if ((unsigned int)mem % 2 == 0) { - mmr16 = (unsigned short *)tmp_raw; - *(unsigned short *)mem = *mmr16; - } else - err = -EFAULT; - break; - case 4: - if ((unsigned int)mem % 4 == 0) { - mmr32 = (unsigned long *)tmp_raw; - *(unsigned long *)mem = *mmr32; - } else - err = -EFAULT; - break; - default: - err = -EFAULT; - } - } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH)) -#ifdef CONFIG_SMP - || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH)) -#endif - ) { - /* access L1 instruction SRAM */ - if (dma_memcpy(mem, tmp_raw, count) == NULL) - err = -EFAULT; - } else - err = probe_kernel_write(mem, tmp_raw, count); - - return err; + return bfin_probe_kernel_write(mem, tmp_raw, count); } +#define IN_MEM(addr, size, l1_addr, l1_size) \ +({ \ + unsigned long __addr = (unsigned long)(addr); \ + (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \ +}) +#define ASYNC_BANK_SIZE \ + (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \ + ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE) + int kgdb_validate_break_address(unsigned long addr) { int cpu = raw_smp_processor_id(); @@ -724,46 +648,17 @@ int kgdb_validate_break_address(unsigned long addr) int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) { - int err; - int cpu = raw_smp_processor_id(); - - if ((cpu == 0 && IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH)) -#ifdef CONFIG_SMP - || (cpu == 1 && IN_MEM(addr, BREAK_INSTR_SIZE, COREB_L1_CODE_START, L1_CODE_LENGTH)) -#endif - ) { - /* access L1 instruction SRAM */ - if (dma_memcpy(saved_instr, (void *)addr, BREAK_INSTR_SIZE) - == NULL) - return -EFAULT; - - if (dma_memcpy((void *)addr, arch_kgdb_ops.gdb_bpt_instr, - BREAK_INSTR_SIZE) == NULL) - return -EFAULT; - - return 0; - } else { - err = probe_kernel_read(saved_instr, (char *)addr, - BREAK_INSTR_SIZE); - if (err) - return err; - - return probe_kernel_write((char *)addr, - arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); - } + int err = bfin_probe_kernel_read(saved_instr, (char *)addr, + BREAK_INSTR_SIZE); + if (err) + return err; + return bfin_probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr, + BREAK_INSTR_SIZE); } int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle) { - if (IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH)) { - /* access L1 instruction SRAM */ - if (dma_memcpy((void *)addr, bundle, BREAK_INSTR_SIZE) == NULL) - return -EFAULT; - - return 0; - } else - return probe_kernel_write((char *)addr, - (char *)bundle, BREAK_INSTR_SIZE); + return bfin_probe_kernel_write((char *)addr, bundle, BREAK_INSTR_SIZE); } int kgdb_arch_init(void) diff --git a/arch/blackfin/kernel/mcount.S b/arch/blackfin/kernel/mcount.S deleted file mode 100644 index edcfb3865f46..000000000000 --- a/arch/blackfin/kernel/mcount.S +++ /dev/null @@ -1,70 +0,0 @@ -/* - * linux/arch/blackfin/mcount.S - * - * Copyright (C) 2006 Analog Devices Inc. - * - * 2007/04/12 Save index, length, modify and base registers. --rpm - */ - -#include <linux/linkage.h> -#include <asm/blackfin.h> - -.text - -.align 4 /* just in case */ - -ENTRY(__mcount) - [--sp] = i0; - [--sp] = i1; - [--sp] = i2; - [--sp] = i3; - [--sp] = l0; - [--sp] = l1; - [--sp] = l2; - [--sp] = l3; - [--sp] = m0; - [--sp] = m1; - [--sp] = m2; - [--sp] = m3; - [--sp] = b0; - [--sp] = b1; - [--sp] = b2; - [--sp] = b3; - [--sp] = ( r7:0, p5:0 ); - [--sp] = ASTAT; - - p1.L = _ipipe_trace_enable; - p1.H = _ipipe_trace_enable; - r7 = [p1]; - CC = r7 == 0; - if CC jump out; - link 0x10; - r0 = 0x0; - [sp + 0xc] = r0; /* v */ - r0 = 0x0; /* type: IPIPE_TRACE_FN */ - r1 = rets; - p0 = [fp]; /* p0: Prior FP */ - r2 = [p0 + 4]; /* r2: Prior RETS */ - call ___ipipe_trace; - unlink; -out: - ASTAT = [sp++]; - ( r7:0, p5:0 ) = [sp++]; - b3 = [sp++]; - b2 = [sp++]; - b1 = [sp++]; - b0 = [sp++]; - m3 = [sp++]; - m2 = [sp++]; - m1 = [sp++]; - m0 = [sp++]; - l3 = [sp++]; - l2 = [sp++]; - l1 = [sp++]; - l0 = [sp++]; - i3 = [sp++]; - i2 = [sp++]; - i1 = [sp++]; - i0 = [sp++]; - rts; -ENDPROC(__mcount) diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 3e1d86e456f6..79cad0ac5892 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c @@ -344,6 +344,87 @@ void finish_atomic_sections (struct pt_regs *regs) } } +static inline +int in_mem(unsigned long addr, unsigned long size, + unsigned long start, unsigned long end) +{ + return addr >= start && addr + size <= end; +} +static inline +int in_mem_const_off(unsigned long addr, unsigned long size, unsigned long off, + unsigned long const_addr, unsigned long const_size) +{ + return const_size && + in_mem(addr, size, const_addr + off, const_addr + const_size); +} +static inline +int in_mem_const(unsigned long addr, unsigned long size, + unsigned long const_addr, unsigned long const_size) +{ + return in_mem_const_off(addr, 0, size, const_addr, const_size); +} +#define IN_ASYNC(bnum, bctlnum) \ +({ \ + (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? -EFAULT : \ + bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? -EFAULT : \ + BFIN_MEM_ACCESS_CORE; \ +}) + +int bfin_mem_access_type(unsigned long addr, unsigned long size) +{ + int cpu = raw_smp_processor_id(); + + /* Check that things do not wrap around */ + if (addr > ULONG_MAX - size) + return -EFAULT; + + if (in_mem(addr, size, FIXED_CODE_START, physical_mem_end)) + return BFIN_MEM_ACCESS_CORE; + + if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH)) + return cpu == 0 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA; + if (in_mem_const(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH)) + return cpu == 0 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT; + if (in_mem_const(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH)) + return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA; + if (in_mem_const(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH)) + return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA; +#ifdef COREB_L1_CODE_START + if (in_mem_const(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH)) + return cpu == 1 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA; + if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH)) + return cpu == 1 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT; + if (in_mem_const(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH)) + return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA; + if (in_mem_const(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH)) + return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA; +#endif + if (in_mem_const(addr, size, L2_START, L2_LENGTH)) + return BFIN_MEM_ACCESS_CORE; + + if (addr >= SYSMMR_BASE) + return BFIN_MEM_ACCESS_CORE_ONLY; + + /* We can't read EBIU banks that aren't enabled or we end up hanging + * on the access to the async space. + */ + if (in_mem_const(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK0_SIZE)) + return IN_ASYNC(0, 0); + if (in_mem_const(addr, size, ASYNC_BANK1_BASE, ASYNC_BANK1_SIZE)) + return IN_ASYNC(1, 0); + if (in_mem_const(addr, size, ASYNC_BANK2_BASE, ASYNC_BANK2_SIZE)) + return IN_ASYNC(2, 1); + if (in_mem_const(addr, size, ASYNC_BANK3_BASE, ASYNC_BANK3_SIZE)) + return IN_ASYNC(3, 1); + + if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH)) + return BFIN_MEM_ACCESS_CORE; + if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH)) + return BFIN_MEM_ACCESS_DMA; + + return -EFAULT; +} + #if defined(CONFIG_ACCESS_CHECK) #ifdef CONFIG_ACCESS_OK_L1 __attribute__((l1_text)) @@ -353,51 +434,61 @@ int _access_ok(unsigned long addr, unsigned long size) { if (size == 0) return 1; - if (addr > (addr + size)) + /* Check that things do not wrap around */ + if (addr > ULONG_MAX - size) return 0; if (segment_eq(get_fs(), KERNEL_DS)) return 1; #ifdef CONFIG_MTD_UCLINUX - if (addr >= memory_start && (addr + size) <= memory_end) - return 1; - if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end) + if (1) +#else + if (0) +#endif + { + if (in_mem(addr, size, memory_start, memory_end)) + return 1; + if (in_mem(addr, size, memory_mtd_end, physical_mem_end)) + return 1; +# ifndef CONFIG_ROMFS_ON_MTD + if (0) +# endif + /* For XIP, allow user space to use pointers within the ROMFS. */ + if (in_mem(addr, size, memory_mtd_start, memory_mtd_end)) + return 1; + } else { + if (in_mem(addr, size, memory_start, physical_mem_end)) + return 1; + } + + if (in_mem(addr, size, (unsigned long)__init_begin, (unsigned long)__init_end)) return 1; -#ifdef CONFIG_ROMFS_ON_MTD - /* For XIP, allow user space to use pointers within the ROMFS. */ - if (addr >= memory_mtd_start && (addr + size) <= memory_mtd_end) + if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH)) return 1; -#endif -#else - if (addr >= memory_start && (addr + size) <= physical_mem_end) + if (in_mem_const_off(addr, size, _etext_l1 - _stext_l1, L1_CODE_START, L1_CODE_LENGTH)) return 1; -#endif - if (addr >= (unsigned long)__init_begin && - addr + size <= (unsigned long)__init_end) + if (in_mem_const_off(addr, size, _ebss_l1 - _sdata_l1, L1_DATA_A_START, L1_DATA_A_LENGTH)) return 1; - if (addr >= get_l1_scratch_start() - && addr + size <= get_l1_scratch_start() + L1_SCRATCH_LENGTH) + if (in_mem_const_off(addr, size, _ebss_b_l1 - _sdata_b_l1, L1_DATA_B_START, L1_DATA_B_LENGTH)) return 1; -#if L1_CODE_LENGTH != 0 - if (addr >= get_l1_code_start() + (_etext_l1 - _stext_l1) - && addr + size <= get_l1_code_start() + L1_CODE_LENGTH) +#ifdef COREB_L1_CODE_START + if (in_mem_const(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH)) return 1; -#endif -#if L1_DATA_A_LENGTH != 0 - if (addr >= get_l1_data_a_start() + (_ebss_l1 - _sdata_l1) - && addr + size <= get_l1_data_a_start() + L1_DATA_A_LENGTH) + if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH)) return 1; -#endif -#if L1_DATA_B_LENGTH != 0 - if (addr >= get_l1_data_b_start() + (_ebss_b_l1 - _sdata_b_l1) - && addr + size <= get_l1_data_b_start() + L1_DATA_B_LENGTH) + if (in_mem_const(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH)) return 1; -#endif -#if L2_LENGTH != 0 - if (addr >= L2_START + (_ebss_l2 - _stext_l2) - && addr + size <= L2_START + L2_LENGTH) + if (in_mem_const(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH)) return 1; #endif + if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH)) + return 1; + + if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH)) + return 1; + if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH)) + return 1; + return 0; } EXPORT_SYMBOL(_access_ok); diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index 6454babdfaff..298f023bcc09 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c @@ -117,15 +117,49 @@ void __cpuinit bfin_setup_caches(unsigned int cpu) */ #ifdef CONFIG_BFIN_ICACHE printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu); + printk(KERN_INFO " External memory:" +# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE + " cacheable" +# else + " uncacheable" +# endif + " in instruction cache\n"); + if (L2_LENGTH) + printk(KERN_INFO " L2 SRAM :" +# ifdef CONFIG_BFIN_L2_ICACHEABLE + " cacheable" +# else + " uncacheable" +# endif + " in instruction cache\n"); + +#else + printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu); #endif + #ifdef CONFIG_BFIN_DCACHE - printk(KERN_INFO "Data Cache Enabled for CPU%u" -# if defined CONFIG_BFIN_WB - " (write-back)" -# elif defined CONFIG_BFIN_WT - " (write-through)" + printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu); + printk(KERN_INFO " External memory:" +# if defined CONFIG_BFIN_EXTMEM_WRITEBACK + " cacheable (write-back)" +# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH + " cacheable (write-through)" +# else + " uncacheable" # endif - "\n", cpu); + " in data cache\n"); + if (L2_LENGTH) + printk(KERN_INFO " L2 SRAM :" +# if defined CONFIG_BFIN_L2_WRITEBACK + " cacheable (write-back)" +# elif defined CONFIG_BFIN_L2_WRITETHROUGH + " cacheable (write-through)" +# else + " uncacheable" +# endif + " in data cache\n"); +#else + printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu); #endif } @@ -443,9 +477,11 @@ static __init void parse_cmdline_early(char *cmdline_p) } else if (!memcmp(to, "clkin_hz=", 9)) { to += 9; early_init_clkin_hz(to); +#ifdef CONFIG_EARLY_PRINTK } else if (!memcmp(to, "earlyprintk=", 12)) { to += 12; setup_early_printk(to); +#endif } else if (!memcmp(to, "memmap=", 7)) { to += 7; parse_memmap(to); @@ -516,7 +552,7 @@ static __init void memory_setup(void) && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) mtd_size = PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2])); -# if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263) +# if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263) /* Due to a Hardware Anomaly we need to limit the size of usable * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on * 05000263 - Hardware loop corrupted when taking an ICPLB exception @@ -544,7 +580,7 @@ static __init void memory_setup(void) dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size); #endif /* CONFIG_MTD_UCLINUX */ -#if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263) +#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263) /* Due to a Hardware Anomaly we need to limit the size of usable * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on * 05000263 - Hardware loop corrupted when taking an ICPLB exception @@ -764,6 +800,11 @@ void __init setup_arch(char **cmdline_p) { unsigned long sclk, cclk; + /* Check to make sure we are running on the right processor */ + if (unlikely(CPUID != bfin_cpuid())) + printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n", + CPU, bfin_cpuid(), bfin_revid()); + #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif @@ -778,14 +819,17 @@ void __init setup_arch(char **cmdline_p) memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); boot_command_line[COMMAND_LINE_SIZE - 1] = '\0'; - /* setup memory defaults from the user config */ - physical_mem_end = 0; - _ramend = get_mem_size() * 1024 * 1024; - memset(&bfin_memmap, 0, sizeof(bfin_memmap)); + /* If the user does not specify things on the command line, use + * what the bootloader set things up as + */ + physical_mem_end = 0; parse_cmdline_early(&command_line[0]); + if (_ramend == 0) + _ramend = get_mem_size() * 1024 * 1024; + if (physical_mem_end == 0) physical_mem_end = _ramend; @@ -837,7 +881,8 @@ void __init setup_arch(char **cmdline_p) defined(CONFIG_BF538) || defined(CONFIG_BF539) _bfin_swrst = bfin_read_SWRST(); #else - _bfin_swrst = bfin_read_SYSCR(); + /* Clear boot mode field */ + _bfin_swrst = bfin_read_SYSCR() & ~0xf; #endif #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT @@ -875,10 +920,7 @@ void __init setup_arch(char **cmdline_p) else printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid()); - if (unlikely(CPUID != bfin_cpuid())) - printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n", - CPU, bfin_cpuid(), bfin_revid()); - else { + if (likely(CPUID == bfin_cpuid())) { if (bfin_revid() != bfin_compiled_revid()) { if (bfin_compiled_revid() == -1) printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n", @@ -1157,16 +1199,25 @@ static int show_cpuinfo(struct seq_file *m, void *v) icache_size = 0; seq_printf(m, "cache size\t: %d KB(L1 icache) " - "%d KB(L1 dcache%s) %d KB(L2 cache)\n", - icache_size, dcache_size, -#if defined CONFIG_BFIN_WB - "-wb" -#elif defined CONFIG_BFIN_WT - "-wt" -#endif - "", 0); - + "%d KB(L1 dcache) %d KB(L2 cache)\n", + icache_size, dcache_size, 0); seq_printf(m, "%s\n", cache); + seq_printf(m, "external memory\t: " +#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) + "cacheable" +#else + "uncacheable" +#endif + " in instruction cache\n"); + seq_printf(m, "external memory\t: " +#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) + "cacheable (write-back)" +#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH) + "cacheable (write-through)" +#else + "uncacheable" +#endif + " in data cache\n"); if (icache_size) seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n", @@ -1239,8 +1290,25 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_num != num_possible_cpus() - 1) return 0; - if (L2_LENGTH) + if (L2_LENGTH) { seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400); + seq_printf(m, "L2 SRAM\t\t: " +#if defined(CONFIG_BFIN_L2_ICACHEABLE) + "cacheable" +#else + "uncacheable" +#endif + " in instruction cache\n"); + seq_printf(m, "L2 SRAM\t\t: " +#if defined(CONFIG_BFIN_L2_WRITEBACK) + "cacheable (write-back)" +#elif defined(CONFIG_BFIN_L2_WRITETHROUGH) + "cacheable (write-through)" +#else + "uncacheable" +#endif + " in data cache\n"); + } seq_printf(m, "board name\t: %s\n", bfin_board_name); seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index d279552fe9b0..8eeb457ce5d5 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c @@ -37,6 +37,7 @@ #include <asm/traps.h> #include <asm/cacheflush.h> #include <asm/cplb.h> +#include <asm/dma.h> #include <asm/blackfin.h> #include <asm/irq_handler.h> #include <linux/irq.h> @@ -636,57 +637,30 @@ asmlinkage void trap_c(struct pt_regs *fp) */ static bool get_instruction(unsigned short *val, unsigned short *address) { - - unsigned long addr; - - addr = (unsigned long)address; + unsigned long addr = (unsigned long)address; /* Check for odd addresses */ if (addr & 0x1) return false; - /* Check that things do not wrap around */ - if (addr > (addr + 2)) + /* MMR region will never have instructions */ + if (addr >= SYSMMR_BASE) return false; - /* - * Since we are in exception context, we need to do a little address checking - * We need to make sure we are only accessing valid memory, and - * we don't read something in the async space that can hang forever - */ - if ((addr >= FIXED_CODE_START && (addr + 2) <= physical_mem_end) || -#if L2_LENGTH != 0 - (addr >= L2_START && (addr + 2) <= (L2_START + L2_LENGTH)) || -#endif - (addr >= BOOT_ROM_START && (addr + 2) <= (BOOT_ROM_START + BOOT_ROM_LENGTH)) || -#if L1_DATA_A_LENGTH != 0 - (addr >= L1_DATA_A_START && (addr + 2) <= (L1_DATA_A_START + L1_DATA_A_LENGTH)) || -#endif -#if L1_DATA_B_LENGTH != 0 - (addr >= L1_DATA_B_START && (addr + 2) <= (L1_DATA_B_START + L1_DATA_B_LENGTH)) || -#endif - (addr >= L1_SCRATCH_START && (addr + 2) <= (L1_SCRATCH_START + L1_SCRATCH_LENGTH)) || - (!(bfin_read_EBIU_AMBCTL0() & B0RDYEN) && - addr >= ASYNC_BANK0_BASE && (addr + 2) <= (ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)) || - (!(bfin_read_EBIU_AMBCTL0() & B1RDYEN) && - addr >= ASYNC_BANK1_BASE && (addr + 2) <= (ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)) || - (!(bfin_read_EBIU_AMBCTL1() & B2RDYEN) && - addr >= ASYNC_BANK2_BASE && (addr + 2) <= (ASYNC_BANK2_BASE + ASYNC_BANK1_SIZE)) || - (!(bfin_read_EBIU_AMBCTL1() & B3RDYEN) && - addr >= ASYNC_BANK3_BASE && (addr + 2) <= (ASYNC_BANK3_BASE + ASYNC_BANK1_SIZE))) { - *val = *address; - return true; + switch (bfin_mem_access_type(addr, 2)) { + case BFIN_MEM_ACCESS_CORE: + case BFIN_MEM_ACCESS_CORE_ONLY: + *val = *address; + return true; + case BFIN_MEM_ACCESS_DMA: + dma_memcpy(val, address, 2); + return true; + case BFIN_MEM_ACCESS_ITEST: + isram_memcpy(val, address, 2); + return true; + default: /* invalid access */ + return false; } - -#if L1_CODE_LENGTH != 0 - if (addr >= L1_CODE_START && (addr + 2) <= (L1_CODE_START + L1_CODE_LENGTH)) { - isram_memcpy(val, address, 2); - return true; - } -#endif - - - return false; } /* |