summaryrefslogtreecommitdiff
path: root/arch/blackfin/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/kernel')
-rw-r--r--arch/blackfin/kernel/Makefile1
-rw-r--r--arch/blackfin/kernel/asm-offsets.c7
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c14
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c1
-rw-r--r--arch/blackfin/kernel/cplb-mpu/Makefile2
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cacheinit.c69
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbmgr.c63
-rw-r--r--arch/blackfin/kernel/cplb-nompu/Makefile2
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cacheinit.c69
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c11
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbmgr.c35
-rw-r--r--arch/blackfin/kernel/early_printk.c74
-rw-r--r--arch/blackfin/kernel/entry.S24
-rw-r--r--arch/blackfin/kernel/ftrace-entry.S23
-rw-r--r--arch/blackfin/kernel/ftrace.c2
-rw-r--r--arch/blackfin/kernel/ipipe.c83
-rw-r--r--arch/blackfin/kernel/kgdb_test.c2
-rw-r--r--arch/blackfin/kernel/module.c266
-rw-r--r--arch/blackfin/kernel/process.c10
-rw-r--r--arch/blackfin/kernel/ptrace.c155
-rw-r--r--arch/blackfin/kernel/setup.c120
-rw-r--r--arch/blackfin/kernel/shadow_console.c113
-rw-r--r--arch/blackfin/kernel/time-ts.c4
-rw-r--r--arch/blackfin/kernel/traps.c88
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S9
25 files changed, 618 insertions, 629 deletions
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index 141d9281e4b0..a8ddbc8ed5af 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_EARLY_PRINTK) += shadow_console.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
# the kgdb test puts code into L2 and without linker
diff --git a/arch/blackfin/kernel/asm-offsets.c b/arch/blackfin/kernel/asm-offsets.c
index b5df9459d6d5..f05d1b99b0ef 100644
--- a/arch/blackfin/kernel/asm-offsets.c
+++ b/arch/blackfin/kernel/asm-offsets.c
@@ -145,6 +145,7 @@ int main(void)
DEFINE(PDA_EXBUF, offsetof(struct blackfin_pda, ex_buf));
DEFINE(PDA_EXIMASK, offsetof(struct blackfin_pda, ex_imask));
DEFINE(PDA_EXSTACK, offsetof(struct blackfin_pda, ex_stack));
+ DEFINE(PDA_EXIPEND, offsetof(struct blackfin_pda, ex_ipend));
#ifdef ANOMALY_05000261
DEFINE(PDA_LFRETX, offsetof(struct blackfin_pda, last_cplb_fault_retx));
#endif
@@ -152,6 +153,12 @@ int main(void)
DEFINE(PDA_ICPLB, offsetof(struct blackfin_pda, icplb_fault_addr));
DEFINE(PDA_RETX, offsetof(struct blackfin_pda, retx));
DEFINE(PDA_SEQSTAT, offsetof(struct blackfin_pda, seqstat));
+#ifdef CONFIG_DEBUG_DOUBLEFAULT
+ DEFINE(PDA_DF_DCPLB, offsetof(struct blackfin_pda, dcplb_doublefault_addr));
+ DEFINE(PDA_DF_ICPLB, offsetof(struct blackfin_pda, icplb_doublefault_addr));
+ DEFINE(PDA_DF_SEQSTAT, offsetof(struct blackfin_pda, seqstat_doublefault));
+ DEFINE(PDA_DF_RETX, offsetof(struct blackfin_pda, retx_doublefault));
+#endif
#ifdef CONFIG_SMP
/* Inter-core lock (in L2 SRAM) */
DEFINE(SIZEOF_CORELOCK, sizeof(struct corelock_slot));
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 9f9b82816652..384868dedac3 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -19,6 +19,7 @@
#include <asm/cacheflush.h>
#include <asm/dma.h>
#include <asm/uaccess.h>
+#include <asm/early_printk.h>
/*
* To make sure we work around 05000119 - we always check DMA_DONE bit,
@@ -146,8 +147,8 @@ EXPORT_SYMBOL(request_dma);
int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data)
{
- BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
- && channel < MAX_DMA_CHANNELS));
+ BUG_ON(channel >= MAX_DMA_CHANNELS ||
+ dma_ch[channel].chan_status == DMA_CHANNEL_FREE);
if (callback != NULL) {
int ret;
@@ -181,8 +182,8 @@ static void clear_dma_buffer(unsigned int channel)
void free_dma(unsigned int channel)
{
pr_debug("freedma() : BEGIN \n");
- BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
- && channel < MAX_DMA_CHANNELS));
+ BUG_ON(channel >= MAX_DMA_CHANNELS ||
+ dma_ch[channel].chan_status == DMA_CHANNEL_FREE);
/* Halt the DMA */
disable_dma(channel);
@@ -236,6 +237,7 @@ void blackfin_dma_resume(void)
*/
void __init blackfin_dma_early_init(void)
{
+ early_shadow_stamp();
bfin_write_MDMA_S0_CONFIG(0);
bfin_write_MDMA_S1_CONFIG(0);
}
@@ -246,6 +248,8 @@ void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size)
unsigned long src = (unsigned long)psrc;
struct dma_register *dst_ch, *src_ch;
+ early_shadow_stamp();
+
/* We assume that everything is 4 byte aligned, so include
* a basic sanity check
*/
@@ -300,6 +304,8 @@ void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size)
void __init early_dma_memcpy_done(void)
{
+ early_shadow_stamp();
+
while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) ||
(bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE)))
continue;
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index 6b9446271371..fc4681c0170e 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -722,7 +722,6 @@ void bfin_gpio_pm_hibernate_suspend(void)
gpio_bank_saved[bank].fer = gpio_array[bank]->port_fer;
gpio_bank_saved[bank].mux = gpio_array[bank]->port_mux;
gpio_bank_saved[bank].data = gpio_array[bank]->data;
- gpio_bank_saved[bank].data = gpio_array[bank]->data;
gpio_bank_saved[bank].inen = gpio_array[bank]->inen;
gpio_bank_saved[bank].dir = gpio_array[bank]->dir_set;
}
diff --git a/arch/blackfin/kernel/cplb-mpu/Makefile b/arch/blackfin/kernel/cplb-mpu/Makefile
index 7d70d3bf3212..394d0b1b28fe 100644
--- a/arch/blackfin/kernel/cplb-mpu/Makefile
+++ b/arch/blackfin/kernel/cplb-mpu/Makefile
@@ -2,7 +2,7 @@
# arch/blackfin/kernel/cplb-nompu/Makefile
#
-obj-y := cplbinit.o cacheinit.o cplbmgr.o
+obj-y := cplbinit.o cplbmgr.o
CFLAGS_cplbmgr.o := -ffixed-I0 -ffixed-I1 -ffixed-I2 -ffixed-I3 \
-ffixed-L0 -ffixed-L1 -ffixed-L2 -ffixed-L3 \
diff --git a/arch/blackfin/kernel/cplb-mpu/cacheinit.c b/arch/blackfin/kernel/cplb-mpu/cacheinit.c
deleted file mode 100644
index d5a86c3017f7..000000000000
--- a/arch/blackfin/kernel/cplb-mpu/cacheinit.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright 2004-2007 Analog Devices Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <linux/cpu.h>
-
-#include <asm/cacheflush.h>
-#include <asm/blackfin.h>
-#include <asm/cplb.h>
-#include <asm/cplbinit.h>
-
-#if defined(CONFIG_BFIN_ICACHE)
-void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl)
-{
- unsigned long ctrl;
- int i;
-
- SSYNC();
- for (i = 0; i < MAX_CPLBS; i++) {
- bfin_write32(ICPLB_ADDR0 + i * 4, icplb_tbl[i].addr);
- bfin_write32(ICPLB_DATA0 + i * 4, icplb_tbl[i].data);
- }
- ctrl = bfin_read_IMEM_CONTROL();
- ctrl |= IMC | ENICPLB;
- bfin_write_IMEM_CONTROL(ctrl);
- SSYNC();
-}
-#endif
-
-#if defined(CONFIG_BFIN_DCACHE)
-void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl)
-{
- unsigned long ctrl;
- int i;
-
- SSYNC();
- for (i = 0; i < MAX_CPLBS; i++) {
- bfin_write32(DCPLB_ADDR0 + i * 4, dcplb_tbl[i].addr);
- bfin_write32(DCPLB_DATA0 + i * 4, dcplb_tbl[i].data);
- }
-
- ctrl = bfin_read_DMEM_CONTROL();
-
- /*
- * Anomaly notes:
- * 05000287 - We implement workaround #2 - Change the DMEM_CONTROL
- * register, so that the port preferences for DAG0 and DAG1 are set
- * to port B
- */
- ctrl |= DMEM_CNTR | PORT_PREF0 | (ANOMALY_05000287 ? PORT_PREF1 : 0);
- bfin_write_DMEM_CONTROL(ctrl);
- SSYNC();
-}
-#endif
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index bcdfe9b0b71f..8e1e9e9e9632 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -22,6 +22,7 @@
#include <asm/blackfin.h>
#include <asm/cacheflush.h>
+#include <asm/cplb.h>
#include <asm/cplbinit.h>
#include <asm/mmu_context.h>
@@ -41,46 +42,6 @@ int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
int nr_cplb_flush[NR_CPUS];
-static inline void disable_dcplb(void)
-{
- unsigned long ctrl;
- SSYNC();
- ctrl = bfin_read_DMEM_CONTROL();
- ctrl &= ~ENDCPLB;
- bfin_write_DMEM_CONTROL(ctrl);
- SSYNC();
-}
-
-static inline void enable_dcplb(void)
-{
- unsigned long ctrl;
- SSYNC();
- ctrl = bfin_read_DMEM_CONTROL();
- ctrl |= ENDCPLB;
- bfin_write_DMEM_CONTROL(ctrl);
- SSYNC();
-}
-
-static inline void disable_icplb(void)
-{
- unsigned long ctrl;
- SSYNC();
- ctrl = bfin_read_IMEM_CONTROL();
- ctrl &= ~ENICPLB;
- bfin_write_IMEM_CONTROL(ctrl);
- SSYNC();
-}
-
-static inline void enable_icplb(void)
-{
- unsigned long ctrl;
- SSYNC();
- ctrl = bfin_read_IMEM_CONTROL();
- ctrl |= ENICPLB;
- bfin_write_IMEM_CONTROL(ctrl);
- SSYNC();
-}
-
/*
* Given the contents of the status register, return the index of the
* CPLB that caused the fault.
@@ -198,10 +159,10 @@ static noinline int dcplb_miss(unsigned int cpu)
dcplb_tbl[cpu][idx].addr = addr;
dcplb_tbl[cpu][idx].data = d_data;
- disable_dcplb();
+ _disable_dcplb();
bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
- enable_dcplb();
+ _enable_dcplb();
return 0;
}
@@ -288,10 +249,10 @@ static noinline int icplb_miss(unsigned int cpu)
icplb_tbl[cpu][idx].addr = addr;
icplb_tbl[cpu][idx].data = i_data;
- disable_icplb();
+ _disable_icplb();
bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
- enable_icplb();
+ _enable_icplb();
return 0;
}
@@ -319,7 +280,7 @@ static noinline int dcplb_protection_fault(unsigned int cpu)
int cplb_hdr(int seqstat, struct pt_regs *regs)
{
int cause = seqstat & 0x3f;
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu = raw_smp_processor_id();
switch (cause) {
case 0x23:
return dcplb_protection_fault(cpu);
@@ -340,19 +301,19 @@ void flush_switched_cplbs(unsigned int cpu)
nr_cplb_flush[cpu]++;
local_irq_save_hw(flags);
- disable_icplb();
+ _disable_icplb();
for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
icplb_tbl[cpu][i].data = 0;
bfin_write32(ICPLB_DATA0 + i * 4, 0);
}
- enable_icplb();
+ _enable_icplb();
- disable_dcplb();
+ _disable_dcplb();
for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
dcplb_tbl[cpu][i].data = 0;
bfin_write32(DCPLB_DATA0 + i * 4, 0);
}
- enable_dcplb();
+ _enable_dcplb();
local_irq_restore_hw(flags);
}
@@ -385,7 +346,7 @@ void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
#endif
}
- disable_dcplb();
+ _disable_dcplb();
for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
dcplb_tbl[cpu][i].addr = addr;
dcplb_tbl[cpu][i].data = d_data;
@@ -393,6 +354,6 @@ void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
bfin_write32(DCPLB_ADDR0 + i * 4, addr);
addr += PAGE_SIZE;
}
- enable_dcplb();
+ _enable_dcplb();
local_irq_restore_hw(flags);
}
diff --git a/arch/blackfin/kernel/cplb-nompu/Makefile b/arch/blackfin/kernel/cplb-nompu/Makefile
index 7d70d3bf3212..394d0b1b28fe 100644
--- a/arch/blackfin/kernel/cplb-nompu/Makefile
+++ b/arch/blackfin/kernel/cplb-nompu/Makefile
@@ -2,7 +2,7 @@
# arch/blackfin/kernel/cplb-nompu/Makefile
#
-obj-y := cplbinit.o cacheinit.o cplbmgr.o
+obj-y := cplbinit.o cplbmgr.o
CFLAGS_cplbmgr.o := -ffixed-I0 -ffixed-I1 -ffixed-I2 -ffixed-I3 \
-ffixed-L0 -ffixed-L1 -ffixed-L2 -ffixed-L3 \
diff --git a/arch/blackfin/kernel/cplb-nompu/cacheinit.c b/arch/blackfin/kernel/cplb-nompu/cacheinit.c
deleted file mode 100644
index d5a86c3017f7..000000000000
--- a/arch/blackfin/kernel/cplb-nompu/cacheinit.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright 2004-2007 Analog Devices Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <linux/cpu.h>
-
-#include <asm/cacheflush.h>
-#include <asm/blackfin.h>
-#include <asm/cplb.h>
-#include <asm/cplbinit.h>
-
-#if defined(CONFIG_BFIN_ICACHE)
-void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl)
-{
- unsigned long ctrl;
- int i;
-
- SSYNC();
- for (i = 0; i < MAX_CPLBS; i++) {
- bfin_write32(ICPLB_ADDR0 + i * 4, icplb_tbl[i].addr);
- bfin_write32(ICPLB_DATA0 + i * 4, icplb_tbl[i].data);
- }
- ctrl = bfin_read_IMEM_CONTROL();
- ctrl |= IMC | ENICPLB;
- bfin_write_IMEM_CONTROL(ctrl);
- SSYNC();
-}
-#endif
-
-#if defined(CONFIG_BFIN_DCACHE)
-void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl)
-{
- unsigned long ctrl;
- int i;
-
- SSYNC();
- for (i = 0; i < MAX_CPLBS; i++) {
- bfin_write32(DCPLB_ADDR0 + i * 4, dcplb_tbl[i].addr);
- bfin_write32(DCPLB_DATA0 + i * 4, dcplb_tbl[i].data);
- }
-
- ctrl = bfin_read_DMEM_CONTROL();
-
- /*
- * Anomaly notes:
- * 05000287 - We implement workaround #2 - Change the DMEM_CONTROL
- * register, so that the port preferences for DAG0 and DAG1 are set
- * to port B
- */
- ctrl |= DMEM_CNTR | PORT_PREF0 | (ANOMALY_05000287 ? PORT_PREF1 : 0);
- bfin_write_DMEM_CONTROL(ctrl);
- SSYNC();
-}
-#endif
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index 685f160a5a36..5d8ad503f82a 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -36,7 +36,7 @@ int first_switched_icplb PDT_ATTR;
int first_switched_dcplb PDT_ATTR;
struct cplb_boundary dcplb_bounds[9] PDT_ATTR;
-struct cplb_boundary icplb_bounds[7] PDT_ATTR;
+struct cplb_boundary icplb_bounds[9] PDT_ATTR;
int icplb_nr_bounds PDT_ATTR;
int dcplb_nr_bounds PDT_ATTR;
@@ -167,14 +167,21 @@ void __init generate_cplb_tables_all(void)
icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
SDRAM_IGENERIC : SDRAM_INON_CHBL);
}
+ /* Addressing hole up to the async bank. */
+ icplb_bounds[i_i].eaddr = ASYNC_BANK0_BASE;
+ icplb_bounds[i_i++].data = 0;
+ /* ASYNC banks. */
+ icplb_bounds[i_i].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
+ icplb_bounds[i_i++].data = SDRAM_EBIU;
/* Addressing hole up to BootROM. */
icplb_bounds[i_i].eaddr = BOOT_ROM_START;
icplb_bounds[i_i++].data = 0;
/* BootROM -- largest one should be less than 1 meg. */
icplb_bounds[i_i].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
icplb_bounds[i_i++].data = SDRAM_IGENERIC;
+
if (L2_LENGTH) {
- /* Addressing hole up to L2 SRAM, including the async bank. */
+ /* Addressing hole up to L2 SRAM. */
icplb_bounds[i_i].eaddr = L2_START;
icplb_bounds[i_i++].data = 0;
/* L2 SRAM. */
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
index 12b030842fdb..d9ea46c6e41a 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
@@ -48,36 +48,13 @@ int nr_cplb_flush[NR_CPUS], nr_dcplb_prot[NR_CPUS];
#define MGR_ATTR
#endif
-/*
- * We're in an exception handler. The normal cli nop nop workaround
- * isn't going to do very much, as the only thing that can interrupt
- * us is an NMI, and the cli isn't going to stop that.
- */
-#define NOWA_SSYNC __asm__ __volatile__ ("ssync;")
-
-/* Anomaly handlers provide SSYNCs, so avoid extra if anomaly is present */
-#if ANOMALY_05000125
-
-#define bfin_write_DMEM_CONTROL_SSYNC(v) bfin_write_DMEM_CONTROL(v)
-#define bfin_write_IMEM_CONTROL_SSYNC(v) bfin_write_IMEM_CONTROL(v)
-
-#else
-
-#define bfin_write_DMEM_CONTROL_SSYNC(v) \
- do { NOWA_SSYNC; bfin_write_DMEM_CONTROL(v); NOWA_SSYNC; } while (0)
-#define bfin_write_IMEM_CONTROL_SSYNC(v) \
- do { NOWA_SSYNC; bfin_write_IMEM_CONTROL(v); NOWA_SSYNC; } while (0)
-
-#endif
-
static inline void write_dcplb_data(int cpu, int idx, unsigned long data,
unsigned long addr)
{
- unsigned long ctrl = bfin_read_DMEM_CONTROL();
- bfin_write_DMEM_CONTROL_SSYNC(ctrl & ~ENDCPLB);
+ _disable_dcplb();
bfin_write32(DCPLB_DATA0 + idx * 4, data);
bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
- bfin_write_DMEM_CONTROL_SSYNC(ctrl);
+ _enable_dcplb();
#ifdef CONFIG_CPLB_INFO
dcplb_tbl[cpu][idx].addr = addr;
@@ -88,12 +65,10 @@ static inline void write_dcplb_data(int cpu, int idx, unsigned long data,
static inline void write_icplb_data(int cpu, int idx, unsigned long data,
unsigned long addr)
{
- unsigned long ctrl = bfin_read_IMEM_CONTROL();
-
- bfin_write_IMEM_CONTROL_SSYNC(ctrl & ~ENICPLB);
+ _disable_icplb();
bfin_write32(ICPLB_DATA0 + idx * 4, data);
bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
- bfin_write_IMEM_CONTROL_SSYNC(ctrl);
+ _enable_icplb();
#ifdef CONFIG_CPLB_INFO
icplb_tbl[cpu][idx].addr = addr;
@@ -227,7 +202,7 @@ MGR_ATTR static int dcplb_miss(int cpu)
MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
{
int cause = seqstat & 0x3f;
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu = raw_smp_processor_id();
switch (cause) {
case VEC_CPLB_I_M:
return icplb_miss(cpu);
diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c
index 2ab56811841c..931c78b5ea1f 100644
--- a/arch/blackfin/kernel/early_printk.c
+++ b/arch/blackfin/kernel/early_printk.c
@@ -27,6 +27,7 @@
#include <linux/serial_core.h>
#include <linux/console.h>
#include <linux/string.h>
+#include <linux/reboot.h>
#include <asm/blackfin.h>
#include <asm/irq_handler.h>
#include <asm/early_printk.h>
@@ -181,6 +182,22 @@ asmlinkage void __init init_early_exception_vectors(void)
u32 evt;
SSYNC();
+ /*
+ * This starts up the shadow buffer, incase anything crashes before
+ * setup arch
+ */
+ mark_shadow_error();
+ early_shadow_puts(linux_banner);
+ early_shadow_stamp();
+
+ if (CPUID != bfin_cpuid()) {
+ early_shadow_puts("Running on wrong machine type, expected");
+ early_shadow_reg(CPUID, 16);
+ early_shadow_puts(", but running on");
+ early_shadow_reg(bfin_cpuid(), 16);
+ early_shadow_puts("\n");
+ }
+
/* cannot program in software:
* evt0 - emulation (jtag)
* evt1 - reset
@@ -199,6 +216,7 @@ asmlinkage void __init init_early_exception_vectors(void)
}
+__attribute__((__noreturn__))
asmlinkage void __init early_trap_c(struct pt_regs *fp, void *retaddr)
{
/* This can happen before the uart is initialized, so initialize
@@ -210,10 +228,58 @@ asmlinkage void __init early_trap_c(struct pt_regs *fp, void *retaddr)
if (likely(early_console == NULL) && CPUID == bfin_cpuid())
setup_early_printk(DEFAULT_EARLY_PORT);
- printk(KERN_EMERG "Early panic\n");
- dump_bfin_mem(fp);
- show_regs(fp);
- dump_bfin_trace_buffer();
+ if (!shadow_console_enabled()) {
+ /* crap - we crashed before setup_arch() */
+ early_shadow_puts("panic before setup_arch\n");
+ early_shadow_puts("IPEND:");
+ early_shadow_reg(fp->ipend, 16);
+ if (fp->seqstat & SEQSTAT_EXCAUSE) {
+ early_shadow_puts("\nEXCAUSE:");
+ early_shadow_reg(fp->seqstat & SEQSTAT_EXCAUSE, 8);
+ }
+ if (fp->seqstat & SEQSTAT_HWERRCAUSE) {
+ early_shadow_puts("\nHWERRCAUSE:");
+ early_shadow_reg(
+ (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14, 8);
+ }
+ early_shadow_puts("\nErr @");
+ if (fp->ipend & EVT_EVX)
+ early_shadow_reg(fp->retx, 32);
+ else
+ early_shadow_reg(fp->pc, 32);
+#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
+ early_shadow_puts("\nTrace:");
+ if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) {
+ while (bfin_read_TBUFSTAT() & TBUFCNT) {
+ early_shadow_puts("\nT :");
+ early_shadow_reg(bfin_read_TBUF(), 32);
+ early_shadow_puts("\n S :");
+ early_shadow_reg(bfin_read_TBUF(), 32);
+ }
+ }
+#endif
+ early_shadow_puts("\nUse bfin-elf-addr2line to determine "
+ "function names\n");
+ /*
+ * We should panic(), but we can't - since panic calls printk,
+ * and printk uses memcpy.
+ * we want to reboot, but if the machine type is different,
+ * can't due to machine specific reboot sequences
+ */
+ if (CPUID == bfin_cpuid()) {
+ early_shadow_puts("Trying to restart\n");
+ machine_restart("");
+ }
+
+ early_shadow_puts("Halting, since it is not safe to restart\n");
+ while (1)
+ asm volatile ("EMUEXCPT; IDLE;\n");
+
+ } else {
+ printk(KERN_EMERG "Early panic\n");
+ show_regs(fp);
+ dump_bfin_trace_buffer();
+ }
panic("Died early");
}
diff --git a/arch/blackfin/kernel/entry.S b/arch/blackfin/kernel/entry.S
index a9cfba9946b5..3f8769b7db54 100644
--- a/arch/blackfin/kernel/entry.S
+++ b/arch/blackfin/kernel/entry.S
@@ -43,8 +43,28 @@
ENTRY(_ret_from_fork)
#ifdef CONFIG_IPIPE
- [--sp] = reti; /* IRQs on. */
- SP += 4;
+ /*
+ * Hw IRQs are off on entry, and we don't want the scheduling tail
+ * code to starve high priority domains from interrupts while it
+ * runs. Therefore we first stall the root stage to have the
+ * virtual interrupt state reflect IMASK.
+ */
+ p0.l = ___ipipe_root_status;
+ p0.h = ___ipipe_root_status;
+ r4 = [p0];
+ bitset(r4, 0);
+ [p0] = r4;
+ /*
+ * Then we may enable hw IRQs, allowing preemption from high
+ * priority domains. schedule_tail() will do local_irq_enable()
+ * since Blackfin does not define __ARCH_WANT_UNLOCKED_CTXSW, so
+ * there is no need to unstall the root domain by ourselves
+ * afterwards.
+ */
+ p0.l = _bfin_irq_flags;
+ p0.h = _bfin_irq_flags;
+ r4 = [p0];
+ sti r4;
#endif /* CONFIG_IPIPE */
SP += -12;
call _schedule_tail;
diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S
index 6980b7a0615d..76dd4fbcd17a 100644
--- a/arch/blackfin/kernel/ftrace-entry.S
+++ b/arch/blackfin/kernel/ftrace-entry.S
@@ -17,8 +17,8 @@
* only one we can blow away. With pointer registers, we have P0-P2.
*
* Upon entry, the RETS will point to the top of the current profiled
- * function. And since GCC setup the frame for us, the previous function
- * will be waiting there. mmmm pie.
+ * function. And since GCC pushed the previous RETS for us, the previous
+ * function will be waiting there. mmmm pie.
*/
ENTRY(__mcount)
/* save third function arg early so we can do testing below */
@@ -70,14 +70,14 @@ ENTRY(__mcount)
/* setup the tracer function */
p0 = r3;
- /* tracer(ulong frompc, ulong selfpc):
- * frompc: the pc that did the call to ...
- * selfpc: ... this location
- * the selfpc itself will need adjusting for the mcount call
+ /* function_trace_call(unsigned long ip, unsigned long parent_ip):
+ * ip: this point was called by ...
+ * parent_ip: ... this function
+ * the ip itself will need adjusting for the mcount call
*/
- r1 = rets;
- r0 = [fp + 4];
- r1 += -MCOUNT_INSN_SIZE;
+ r0 = rets;
+ r1 = [sp + 16]; /* skip the 4 local regs on stack */
+ r0 += -MCOUNT_INSN_SIZE;
/* call the tracer */
call (p0);
@@ -106,9 +106,10 @@ ENTRY(_ftrace_graph_caller)
[--sp] = r1;
[--sp] = rets;
- r0 = fp;
+ /* prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) */
+ r0 = sp;
r1 = rets;
- r0 += 4;
+ r0 += 16; /* skip the 4 local regs on stack */
r1 += -MCOUNT_INSN_SIZE;
call _prepare_ftrace_return;
diff --git a/arch/blackfin/kernel/ftrace.c b/arch/blackfin/kernel/ftrace.c
index 905bfc40a00b..f2c85ac6f2da 100644
--- a/arch/blackfin/kernel/ftrace.c
+++ b/arch/blackfin/kernel/ftrace.c
@@ -24,7 +24,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
- if (ftrace_push_return_trace(*parent, self_addr, &trace.depth) == -EBUSY)
+ if (ftrace_push_return_trace(*parent, self_addr, &trace.depth, 0) == -EBUSY)
return;
trace.func = self_addr;
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index b8d22034b9a6..5d7382396dc0 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -30,10 +30,10 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/kthread.h>
-#include <asm/unistd.h>
+#include <linux/unistd.h>
+#include <linux/io.h>
#include <asm/system.h>
#include <asm/atomic.h>
-#include <asm/io.h>
DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
@@ -90,6 +90,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
struct ipipe_domain *this_domain, *next_domain;
struct list_head *head, *pos;
+ struct ipipe_irqdesc *idesc;
int m_ack, s = -1;
/*
@@ -100,17 +101,20 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
*/
m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
this_domain = __ipipe_current_domain;
+ idesc = &this_domain->irqs[irq];
- if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
+ if (unlikely(test_bit(IPIPE_STICKY_FLAG, &idesc->control)))
head = &this_domain->p_link;
else {
head = __ipipe_pipeline.next;
next_domain = list_entry(head, struct ipipe_domain, p_link);
- if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
- if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
- next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
+ idesc = &next_domain->irqs[irq];
+ if (likely(test_bit(IPIPE_WIRED_FLAG, &idesc->control))) {
+ if (!m_ack && idesc->acknowledge != NULL)
+ idesc->acknowledge(irq, irq_to_desc(irq));
if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
- s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
+ s = __test_and_set_bit(IPIPE_STALL_FLAG,
+ &p->status);
__ipipe_dispatch_wired(next_domain, irq);
goto out;
}
@@ -121,14 +125,15 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
pos = head;
while (pos != &__ipipe_pipeline) {
next_domain = list_entry(pos, struct ipipe_domain, p_link);
- if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
+ idesc = &next_domain->irqs[irq];
+ if (test_bit(IPIPE_HANDLE_FLAG, &idesc->control)) {
__ipipe_set_irq_pending(next_domain, irq);
- if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) {
- next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
+ if (!m_ack && idesc->acknowledge != NULL) {
+ idesc->acknowledge(irq, irq_to_desc(irq));
m_ack = 1;
}
}
- if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
+ if (!test_bit(IPIPE_PASS_FLAG, &idesc->control))
break;
pos = next_domain->p_link.next;
}
@@ -159,11 +164,6 @@ out:
__clear_bit(IPIPE_STALL_FLAG, &p->status);
}
-int __ipipe_check_root(void)
-{
- return ipipe_root_domain_p;
-}
-
void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
{
struct irq_desc *desc = irq_to_desc(irq);
@@ -186,30 +186,6 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
}
EXPORT_SYMBOL(__ipipe_disable_irqdesc);
-void __ipipe_stall_root_raw(void)
-{
- /*
- * This code is called by the ins{bwl} routines (see
- * arch/blackfin/lib/ins.S), which are heavily used by the
- * network stack. It masks all interrupts but those handled by
- * non-root domains, so that we keep decent network transfer
- * rates for Linux without inducing pathological jitter for
- * the real-time domain.
- */
- __asm__ __volatile__ ("sti %0;" : : "d"(__ipipe_irq_lvmask));
-
- __set_bit(IPIPE_STALL_FLAG,
- &ipipe_root_cpudom_var(status));
-}
-
-void __ipipe_unstall_root_raw(void)
-{
- __clear_bit(IPIPE_STALL_FLAG,
- &ipipe_root_cpudom_var(status));
-
- __asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags));
-}
-
int __ipipe_syscall_root(struct pt_regs *regs)
{
struct ipipe_percpu_domain_data *p;
@@ -333,12 +309,29 @@ asmlinkage void __ipipe_sync_root(void)
void ___ipipe_sync_pipeline(unsigned long syncmask)
{
- if (__ipipe_root_domain_p) {
- if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
- return;
- }
+ if (__ipipe_root_domain_p &&
+ test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
+ return;
__ipipe_sync_stage(syncmask);
}
-EXPORT_SYMBOL(show_stack);
+void __ipipe_disable_root_irqs_hw(void)
+{
+ /*
+ * This code is called by the ins{bwl} routines (see
+ * arch/blackfin/lib/ins.S), which are heavily used by the
+ * network stack. It masks all interrupts but those handled by
+ * non-root domains, so that we keep decent network transfer
+ * rates for Linux without inducing pathological jitter for
+ * the real-time domain.
+ */
+ bfin_sti(__ipipe_irq_lvmask);
+ __set_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
+}
+
+void __ipipe_enable_root_irqs_hw(void)
+{
+ __clear_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
+ bfin_sti(bfin_irq_flags);
+}
diff --git a/arch/blackfin/kernel/kgdb_test.c b/arch/blackfin/kernel/kgdb_test.c
index dbcf3e45cb0b..59fc42dc5d6a 100644
--- a/arch/blackfin/kernel/kgdb_test.c
+++ b/arch/blackfin/kernel/kgdb_test.c
@@ -54,7 +54,7 @@ void kgdb_l2_test(void)
int kgdb_test(char *name, int len, int count, int z)
{
- printk(KERN_DEBUG "kgdb name(%d): %s, %d, %d\n", len, name, count, z);
+ printk(KERN_ALERT "kgdb name(%d): %s, %d, %d\n", len, name, count, z);
count = z;
return count;
}
diff --git a/arch/blackfin/kernel/module.c b/arch/blackfin/kernel/module.c
index d5aee3626688..67fc7a56c865 100644
--- a/arch/blackfin/kernel/module.c
+++ b/arch/blackfin/kernel/module.c
@@ -27,6 +27,7 @@
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#define pr_fmt(fmt) "module %s: " fmt
#include <linux/moduleloader.h>
#include <linux/elf.h>
@@ -36,6 +37,7 @@
#include <linux/kernel.h>
#include <asm/dma.h>
#include <asm/cacheflush.h>
+#include <asm/uaccess.h>
void *module_alloc(unsigned long size)
{
@@ -52,7 +54,7 @@ void module_free(struct module *mod, void *module_region)
/* Transfer the section to the L1 memory */
int
-module_frob_arch_sections(Elf_Ehdr * hdr, Elf_Shdr * sechdrs,
+module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
/*
@@ -63,126 +65,119 @@ module_frob_arch_sections(Elf_Ehdr * hdr, Elf_Shdr * sechdrs,
* NOTE: this breaks the semantic of mod->arch structure.
*/
Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
- void *dest = NULL;
+ void *dest;
for (s = sechdrs; s < sechdrs_end; ++s) {
- if ((strcmp(".l1.text", secstrings + s->sh_name) == 0) ||
- ((strcmp(".text", secstrings + s->sh_name) == 0) &&
- (hdr->e_flags & EF_BFIN_CODE_IN_L1) && (s->sh_size > 0))) {
+ const char *shname = secstrings + s->sh_name;
+
+ if (s->sh_size == 0)
+ continue;
+
+ if (!strcmp(".l1.text", shname) ||
+ (!strcmp(".text", shname) &&
+ (hdr->e_flags & EF_BFIN_CODE_IN_L1))) {
+
dest = l1_inst_sram_alloc(s->sh_size);
mod->arch.text_l1 = dest;
if (dest == NULL) {
- printk(KERN_ERR
- "module %s: L1 instruction memory allocation failed\n",
- mod->name);
+ pr_err("L1 inst memory allocation failed\n",
+ mod->name);
return -1;
}
dma_memcpy(dest, (void *)s->sh_addr, s->sh_size);
- s->sh_flags &= ~SHF_ALLOC;
- s->sh_addr = (unsigned long)dest;
- }
- if ((strcmp(".l1.data", secstrings + s->sh_name) == 0) ||
- ((strcmp(".data", secstrings + s->sh_name) == 0) &&
- (hdr->e_flags & EF_BFIN_DATA_IN_L1) && (s->sh_size > 0))) {
+
+ } else if (!strcmp(".l1.data", shname) ||
+ (!strcmp(".data", shname) &&
+ (hdr->e_flags & EF_BFIN_DATA_IN_L1))) {
+
dest = l1_data_sram_alloc(s->sh_size);
mod->arch.data_a_l1 = dest;
if (dest == NULL) {
- printk(KERN_ERR
- "module %s: L1 data memory allocation failed\n",
+ pr_err("L1 data memory allocation failed\n",
mod->name);
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
- s->sh_flags &= ~SHF_ALLOC;
- s->sh_addr = (unsigned long)dest;
- }
- if (strcmp(".l1.bss", secstrings + s->sh_name) == 0 ||
- ((strcmp(".bss", secstrings + s->sh_name) == 0) &&
- (hdr->e_flags & EF_BFIN_DATA_IN_L1) && (s->sh_size > 0))) {
- dest = l1_data_sram_alloc(s->sh_size);
+
+ } else if (!strcmp(".l1.bss", shname) ||
+ (!strcmp(".bss", shname) &&
+ (hdr->e_flags & EF_BFIN_DATA_IN_L1))) {
+
+ dest = l1_data_sram_zalloc(s->sh_size);
mod->arch.bss_a_l1 = dest;
if (dest == NULL) {
- printk(KERN_ERR
- "module %s: L1 data memory allocation failed\n",
+ pr_err("L1 data memory allocation failed\n",
mod->name);
return -1;
}
- memset(dest, 0, s->sh_size);
- s->sh_flags &= ~SHF_ALLOC;
- s->sh_addr = (unsigned long)dest;
- }
- if (strcmp(".l1.data.B", secstrings + s->sh_name) == 0) {
+
+ } else if (!strcmp(".l1.data.B", shname)) {
+
dest = l1_data_B_sram_alloc(s->sh_size);
mod->arch.data_b_l1 = dest;
if (dest == NULL) {
- printk(KERN_ERR
- "module %s: L1 data memory allocation failed\n",
+ pr_err("L1 data memory allocation failed\n",
mod->name);
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
- s->sh_flags &= ~SHF_ALLOC;
- s->sh_addr = (unsigned long)dest;
- }
- if (strcmp(".l1.bss.B", secstrings + s->sh_name) == 0) {
+
+ } else if (!strcmp(".l1.bss.B", shname)) {
+
dest = l1_data_B_sram_alloc(s->sh_size);
mod->arch.bss_b_l1 = dest;
if (dest == NULL) {
- printk(KERN_ERR
- "module %s: L1 data memory allocation failed\n",
+ pr_err("L1 data memory allocation failed\n",
mod->name);
return -1;
}
memset(dest, 0, s->sh_size);
- s->sh_flags &= ~SHF_ALLOC;
- s->sh_addr = (unsigned long)dest;
- }
- if ((strcmp(".l2.text", secstrings + s->sh_name) == 0) ||
- ((strcmp(".text", secstrings + s->sh_name) == 0) &&
- (hdr->e_flags & EF_BFIN_CODE_IN_L2) && (s->sh_size > 0))) {
+
+ } else if (!strcmp(".l2.text", shname) ||
+ (!strcmp(".text", shname) &&
+ (hdr->e_flags & EF_BFIN_CODE_IN_L2))) {
+
dest = l2_sram_alloc(s->sh_size);
mod->arch.text_l2 = dest;
if (dest == NULL) {
- printk(KERN_ERR
- "module %s: L2 SRAM allocation failed\n",
- mod->name);
+ pr_err("L2 SRAM allocation failed\n",
+ mod->name);
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
- s->sh_flags &= ~SHF_ALLOC;
- s->sh_addr = (unsigned long)dest;
- }
- if ((strcmp(".l2.data", secstrings + s->sh_name) == 0) ||
- ((strcmp(".data", secstrings + s->sh_name) == 0) &&
- (hdr->e_flags & EF_BFIN_DATA_IN_L2) && (s->sh_size > 0))) {
+
+ } else if (!strcmp(".l2.data", shname) ||
+ (!strcmp(".data", shname) &&
+ (hdr->e_flags & EF_BFIN_DATA_IN_L2))) {
+
dest = l2_sram_alloc(s->sh_size);
mod->arch.data_l2 = dest;
if (dest == NULL) {
- printk(KERN_ERR
- "module %s: L2 SRAM allocation failed\n",
+ pr_err("L2 SRAM allocation failed\n",
mod->name);
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
- s->sh_flags &= ~SHF_ALLOC;
- s->sh_addr = (unsigned long)dest;
- }
- if (strcmp(".l2.bss", secstrings + s->sh_name) == 0 ||
- ((strcmp(".bss", secstrings + s->sh_name) == 0) &&
- (hdr->e_flags & EF_BFIN_DATA_IN_L2) && (s->sh_size > 0))) {
- dest = l2_sram_alloc(s->sh_size);
+
+ } else if (!strcmp(".l2.bss", shname) ||
+ (!strcmp(".bss", shname) &&
+ (hdr->e_flags & EF_BFIN_DATA_IN_L2))) {
+
+ dest = l2_sram_zalloc(s->sh_size);
mod->arch.bss_l2 = dest;
if (dest == NULL) {
- printk(KERN_ERR
- "module %s: L2 SRAM allocation failed\n",
+ pr_err("L2 SRAM allocation failed\n",
mod->name);
return -1;
}
- memset(dest, 0, s->sh_size);
- s->sh_flags &= ~SHF_ALLOC;
- s->sh_addr = (unsigned long)dest;
- }
+
+ } else
+ continue;
+
+ s->sh_flags &= ~SHF_ALLOC;
+ s->sh_addr = (unsigned long)dest;
}
+
return 0;
}
@@ -190,7 +185,7 @@ int
apply_relocate(Elf_Shdr * sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec, struct module *me)
{
- printk(KERN_ERR "module %s: .rel unsupported\n", me->name);
+ pr_err(".rel unsupported\n", me->name);
return -ENOEXEC;
}
@@ -205,109 +200,86 @@ apply_relocate(Elf_Shdr * sechdrs, const char *strtab,
/* gas does not generate it. */
/*************************************************************************/
int
-apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab,
+apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *mod)
{
unsigned int i;
- unsigned short tmp;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
- uint32_t *location32;
- uint16_t *location16;
- uint32_t value;
+ unsigned long location, value, size;
+
+ pr_debug("applying relocate section %u to %u\n", mod->name,
+ relsec, sechdrs[relsec].sh_info);
- pr_debug("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
- location16 =
- (uint16_t *) (sechdrs[sechdrs[relsec].sh_info].sh_addr +
- rel[i].r_offset);
- location32 = (uint32_t *) location16;
+ location = sechdrs[sechdrs[relsec].sh_info].sh_addr +
+ rel[i].r_offset;
+
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *) sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
value = sym->st_value;
value += rel[i].r_addend;
- pr_debug("location is %x, value is %x type is %d \n",
- (unsigned int) location32, value,
- ELF32_R_TYPE(rel[i].r_info));
+
#ifdef CONFIG_SMP
- if ((unsigned long)location16 >= COREB_L1_DATA_A_START) {
- printk(KERN_ERR "module %s: cannot relocate in L1: %u (SMP kernel)",
- mod->name, ELF32_R_TYPE(rel[i].r_info));
+ if (location >= COREB_L1_DATA_A_START) {
+ pr_err("cannot relocate in L1: %u (SMP kernel)",
+ mod->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
#endif
+
+ pr_debug("location is %lx, value is %lx type is %d\n",
+ mod->name, location, value, ELF32_R_TYPE(rel[i].r_info));
+
switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_BFIN_HUIMM16:
+ value >>= 16;
+ case R_BFIN_LUIMM16:
+ case R_BFIN_RIMM16:
+ size = 2;
+ break;
+ case R_BFIN_BYTE4_DATA:
+ size = 4;
+ break;
+
case R_BFIN_PCREL24:
case R_BFIN_PCREL24_JUMP_L:
- /* Add the value, subtract its postition */
- location16 =
- (uint16_t *) (sechdrs[sechdrs[relsec].sh_info].
- sh_addr + rel[i].r_offset - 2);
- location32 = (uint32_t *) location16;
- value -= (uint32_t) location32;
- value >>= 1;
- if ((value & 0xFF000000) != 0 &&
- (value & 0xFF000000) != 0xFF000000) {
- printk(KERN_ERR "module %s: relocation overflow\n",
- mod->name);
- return -ENOEXEC;
- }
- pr_debug("value is %x, before %x-%x after %x-%x\n", value,
- *location16, *(location16 + 1),
- (*location16 & 0xff00) | (value >> 16 & 0x00ff),
- value & 0xffff);
- *location16 =
- (*location16 & 0xff00) | (value >> 16 & 0x00ff);
- *(location16 + 1) = value & 0xffff;
- break;
case R_BFIN_PCREL12_JUMP:
case R_BFIN_PCREL12_JUMP_S:
- value -= (uint32_t) location32;
- value >>= 1;
- *location16 = (value & 0xfff);
- break;
case R_BFIN_PCREL10:
- value -= (uint32_t) location32;
- value >>= 1;
- *location16 = (value & 0x3ff);
- break;
- case R_BFIN_LUIMM16:
- pr_debug("before %x after %x\n", *location16,
- (value & 0xffff));
- tmp = (value & 0xffff);
- if ((unsigned long)location16 >= L1_CODE_START) {
- dma_memcpy(location16, &tmp, 2);
- } else
- *location16 = tmp;
- break;
- case R_BFIN_HUIMM16:
- pr_debug("before %x after %x\n", *location16,
- ((value >> 16) & 0xffff));
- tmp = ((value >> 16) & 0xffff);
- if ((unsigned long)location16 >= L1_CODE_START) {
- dma_memcpy(location16, &tmp, 2);
- } else
- *location16 = tmp;
+ pr_err("unsupported relocation: %u (no -mlong-calls?)\n",
+ mod->name, ELF32_R_TYPE(rel[i].r_info));
+ return -ENOEXEC;
+
+ default:
+ pr_err("unknown relocation: %u\n", mod->name,
+ ELF32_R_TYPE(rel[i].r_info));
+ return -ENOEXEC;
+ }
+
+ switch (bfin_mem_access_type(location, size)) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ memcpy((void *)location, &value, size);
break;
- case R_BFIN_RIMM16:
- *location16 = (value & 0xffff);
+ case BFIN_MEM_ACCESS_DMA:
+ dma_memcpy((void *)location, &value, size);
break;
- case R_BFIN_BYTE4_DATA:
- pr_debug("before %x after %x\n", *location32, value);
- *location32 = value;
+ case BFIN_MEM_ACCESS_ITEST:
+ isram_memcpy((void *)location, &value, size);
break;
default:
- printk(KERN_ERR "module %s: Unknown relocation: %u\n",
- mod->name, ELF32_R_TYPE(rel[i].r_info));
+ pr_err("invalid relocation for %#lx\n",
+ mod->name, location);
return -ENOEXEC;
}
}
+
return 0;
}
@@ -332,22 +304,28 @@ module_finalize(const Elf_Ehdr * hdr,
for (i = 1; i < hdr->e_shnum; i++) {
const char *strtab = (char *)sechdrs[strindex].sh_addr;
unsigned int info = sechdrs[i].sh_info;
+ const char *shname = secstrings + sechdrs[i].sh_name;
/* Not a valid relocation section? */
if (info >= hdr->e_shnum)
continue;
- if ((sechdrs[i].sh_type == SHT_RELA) &&
- ((strcmp(".rela.l2.text", secstrings + sechdrs[i].sh_name) == 0) ||
- (strcmp(".rela.l1.text", secstrings + sechdrs[i].sh_name) == 0) ||
- ((strcmp(".rela.text", secstrings + sechdrs[i].sh_name) == 0) &&
- (hdr->e_flags & (EF_BFIN_CODE_IN_L1|EF_BFIN_CODE_IN_L2))))) {
+ /* Only support RELA relocation types */
+ if (sechdrs[i].sh_type != SHT_RELA)
+ continue;
+
+ if (!strcmp(".rela.l2.text", shname) ||
+ !strcmp(".rela.l1.text", shname) ||
+ (!strcmp(".rela.text", shname) &&
+ (hdr->e_flags & (EF_BFIN_CODE_IN_L1 | EF_BFIN_CODE_IN_L2)))) {
+
err = apply_relocate_add((Elf_Shdr *) sechdrs, strtab,
symindex, i, mod);
if (err < 0)
return -ENOEXEC;
}
}
+
return 0;
}
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 9da36bab7ccb..f5b286189647 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -282,25 +282,19 @@ void finish_atomic_sections (struct pt_regs *regs)
{
int __user *up0 = (int __user *)regs->p0;
- if (regs->pc < ATOMIC_SEQS_START || regs->pc >= ATOMIC_SEQS_END)
- return;
-
switch (regs->pc) {
case ATOMIC_XCHG32 + 2:
put_user(regs->r1, up0);
- regs->pc += 2;
+ regs->pc = ATOMIC_XCHG32 + 4;
break;
case ATOMIC_CAS32 + 2:
case ATOMIC_CAS32 + 4:
if (regs->r0 == regs->r1)
+ case ATOMIC_CAS32 + 6:
put_user(regs->r2, up0);
regs->pc = ATOMIC_CAS32 + 8;
break;
- case ATOMIC_CAS32 + 6:
- put_user(regs->r2, up0);
- regs->pc += 2;
- break;
case ATOMIC_ADD32 + 2:
regs->r0 = regs->r1 + regs->r0;
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 6a387eec6b65..30f4828277ad 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -206,6 +206,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
int ret;
unsigned long __user *datap = (unsigned long __user *)data;
+ void *paddr = (void *)addr;
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
@@ -215,42 +216,49 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_PEEKTEXT: /* read word at location addr. */
{
unsigned long tmp = 0;
- int copied;
+ int copied = 0, to_copy = sizeof(tmp);
ret = -EIO;
- pr_debug("ptrace: PEEKTEXT at addr 0x%08lx + %ld\n", addr, sizeof(data));
- if (is_user_addr_valid(child, addr, sizeof(tmp)) < 0)
+ pr_debug("ptrace: PEEKTEXT at addr 0x%08lx + %i\n", addr, to_copy);
+ if (is_user_addr_valid(child, addr, to_copy) < 0)
break;
pr_debug("ptrace: user address is valid\n");
- if (L1_CODE_LENGTH != 0 && addr >= get_l1_code_start()
- && addr + sizeof(tmp) <= get_l1_code_start() + L1_CODE_LENGTH) {
- safe_dma_memcpy (&tmp, (const void *)(addr), sizeof(tmp));
- copied = sizeof(tmp);
-
- } else if (L1_DATA_A_LENGTH != 0 && addr >= L1_DATA_A_START
- && addr + sizeof(tmp) <= L1_DATA_A_START + L1_DATA_A_LENGTH) {
- memcpy(&tmp, (const void *)(addr), sizeof(tmp));
- copied = sizeof(tmp);
-
- } else if (L1_DATA_B_LENGTH != 0 && addr >= L1_DATA_B_START
- && addr + sizeof(tmp) <= L1_DATA_B_START + L1_DATA_B_LENGTH) {
- memcpy(&tmp, (const void *)(addr), sizeof(tmp));
- copied = sizeof(tmp);
-
- } else if (addr >= FIXED_CODE_START
- && addr + sizeof(tmp) <= FIXED_CODE_END) {
- copy_from_user_page(0, 0, 0, &tmp, (const void *)(addr), sizeof(tmp));
- copied = sizeof(tmp);
-
- } else
+ switch (bfin_mem_access_type(addr, to_copy)) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
copied = access_process_vm(child, addr, &tmp,
- sizeof(tmp), 0);
+ to_copy, 0);
+ if (copied)
+ break;
+
+ /* hrm, why didn't that work ... maybe no mapping */
+ if (addr >= FIXED_CODE_START &&
+ addr + to_copy <= FIXED_CODE_END) {
+ copy_from_user_page(0, 0, 0, &tmp, paddr, to_copy);
+ copied = to_copy;
+ } else if (addr >= BOOT_ROM_START) {
+ memcpy(&tmp, paddr, to_copy);
+ copied = to_copy;
+ }
- pr_debug("ptrace: copied size %d [0x%08lx]\n", copied, tmp);
- if (copied != sizeof(tmp))
break;
- ret = put_user(tmp, datap);
+ case BFIN_MEM_ACCESS_DMA:
+ if (safe_dma_memcpy(&tmp, paddr, to_copy))
+ copied = to_copy;
+ break;
+ case BFIN_MEM_ACCESS_ITEST:
+ if (isram_memcpy(&tmp, paddr, to_copy))
+ copied = to_copy;
+ break;
+ default:
+ copied = 0;
+ break;
+ }
+
+ pr_debug("ptrace: copied size %d [0x%08lx]\n", copied, tmp);
+ if (copied == to_copy)
+ ret = put_user(tmp, datap);
break;
}
@@ -277,9 +285,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp = child->mm->start_data;
#ifdef CONFIG_BINFMT_ELF_FDPIC
} else if (addr == (sizeof(struct pt_regs) + 12)) {
- tmp = child->mm->context.exec_fdpic_loadmap;
+ goto case_PTRACE_GETFDPIC_EXEC;
} else if (addr == (sizeof(struct pt_regs) + 16)) {
- tmp = child->mm->context.interp_fdpic_loadmap;
+ goto case_PTRACE_GETFDPIC_INTERP;
#endif
} else {
tmp = get_reg(child, addr);
@@ -288,49 +296,78 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
}
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ case PTRACE_GETFDPIC: {
+ unsigned long tmp = 0;
+
+ switch (addr) {
+ case_PTRACE_GETFDPIC_EXEC:
+ case PTRACE_GETFDPIC_EXEC:
+ tmp = child->mm->context.exec_fdpic_loadmap;
+ break;
+ case_PTRACE_GETFDPIC_INTERP:
+ case PTRACE_GETFDPIC_INTERP:
+ tmp = child->mm->context.interp_fdpic_loadmap;
+ break;
+ default:
+ break;
+ }
+
+ ret = put_user(tmp, datap);
+ break;
+ }
+#endif
+
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKEDATA:
pr_debug("ptrace: PTRACE_PEEKDATA\n");
/* fall through */
case PTRACE_POKETEXT: /* write the word at location addr. */
{
- int copied;
+ int copied = 0, to_copy = sizeof(data);
ret = -EIO;
- pr_debug("ptrace: POKETEXT at addr 0x%08lx + %ld bytes %lx\n",
- addr, sizeof(data), data);
- if (is_user_addr_valid(child, addr, sizeof(data)) < 0)
+ pr_debug("ptrace: POKETEXT at addr 0x%08lx + %i bytes %lx\n",
+ addr, to_copy, data);
+ if (is_user_addr_valid(child, addr, to_copy) < 0)
break;
pr_debug("ptrace: user address is valid\n");
- if (L1_CODE_LENGTH != 0 && addr >= get_l1_code_start()
- && addr + sizeof(data) <= get_l1_code_start() + L1_CODE_LENGTH) {
- safe_dma_memcpy ((void *)(addr), &data, sizeof(data));
- copied = sizeof(data);
-
- } else if (L1_DATA_A_LENGTH != 0 && addr >= L1_DATA_A_START
- && addr + sizeof(data) <= L1_DATA_A_START + L1_DATA_A_LENGTH) {
- memcpy((void *)(addr), &data, sizeof(data));
- copied = sizeof(data);
-
- } else if (L1_DATA_B_LENGTH != 0 && addr >= L1_DATA_B_START
- && addr + sizeof(data) <= L1_DATA_B_START + L1_DATA_B_LENGTH) {
- memcpy((void *)(addr), &data, sizeof(data));
- copied = sizeof(data);
-
- } else if (addr >= FIXED_CODE_START
- && addr + sizeof(data) <= FIXED_CODE_END) {
- copy_to_user_page(0, 0, 0, (void *)(addr), &data, sizeof(data));
- copied = sizeof(data);
-
- } else
+ switch (bfin_mem_access_type(addr, to_copy)) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
copied = access_process_vm(child, addr, &data,
- sizeof(data), 1);
+ to_copy, 0);
+ if (copied)
+ break;
+
+ /* hrm, why didn't that work ... maybe no mapping */
+ if (addr >= FIXED_CODE_START &&
+ addr + to_copy <= FIXED_CODE_END) {
+ copy_to_user_page(0, 0, 0, paddr, &data, to_copy);
+ copied = to_copy;
+ } else if (addr >= BOOT_ROM_START) {
+ memcpy(paddr, &data, to_copy);
+ copied = to_copy;
+ }
- pr_debug("ptrace: copied size %d\n", copied);
- if (copied != sizeof(data))
break;
- ret = 0;
+ case BFIN_MEM_ACCESS_DMA:
+ if (safe_dma_memcpy(paddr, &data, to_copy))
+ copied = to_copy;
+ break;
+ case BFIN_MEM_ACCESS_ITEST:
+ if (isram_memcpy(paddr, &data, to_copy))
+ copied = to_copy;
+ break;
+ default:
+ copied = 0;
+ break;
+ }
+
+ pr_debug("ptrace: copied size %d\n", copied);
+ if (copied == to_copy)
+ ret = 0;
break;
}
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 6225edae488e..369535b61ed1 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -112,7 +112,7 @@ void __cpuinit bfin_setup_caches(unsigned int cpu)
/*
* In cache coherence emulation mode, we need to have the
* D-cache enabled before running any atomic operation which
- * might invove cache invalidation (i.e. spinlock, rwlock).
+ * might involve cache invalidation (i.e. spinlock, rwlock).
* So printk's are deferred until then.
*/
#ifdef CONFIG_BFIN_ICACHE
@@ -187,6 +187,8 @@ void __init bfin_relocate_l1_mem(void)
unsigned long l1_data_b_length;
unsigned long l2_length;
+ early_shadow_stamp();
+
/*
* due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S
* we know that everything about l1 text/data is nice and aligned,
@@ -511,6 +513,7 @@ static __init void memory_setup(void)
#ifdef CONFIG_MTD_UCLINUX
unsigned long mtd_phys = 0;
#endif
+ unsigned long max_mem;
_rambase = (unsigned long)_stext;
_ramstart = (unsigned long)_end;
@@ -520,7 +523,22 @@ static __init void memory_setup(void)
panic("DMA region exceeds memory limit: %lu.",
_ramend - _ramstart);
}
- memory_end = _ramend - DMA_UNCACHED_REGION;
+ max_mem = memory_end = _ramend - DMA_UNCACHED_REGION;
+
+#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
+ /* Due to a Hardware Anomaly we need to limit the size of usable
+ * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
+ * 05000263 - Hardware loop corrupted when taking an ICPLB exception
+ */
+# if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
+ if (max_mem >= 56 * 1024 * 1024)
+ max_mem = 56 * 1024 * 1024;
+# else
+ if (max_mem >= 60 * 1024 * 1024)
+ max_mem = 60 * 1024 * 1024;
+# endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */
+#endif /* ANOMALY_05000263 */
+
#ifdef CONFIG_MPU
/* Round up to multiple of 4MB */
@@ -549,22 +567,16 @@ static __init void memory_setup(void)
# if defined(CONFIG_ROMFS_FS)
if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0
- && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1)
+ && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) {
mtd_size =
PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
-# if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
- /* Due to a Hardware Anomaly we need to limit the size of usable
- * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
- * 05000263 - Hardware loop corrupted when taking an ICPLB exception
- */
-# if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
- if (memory_end >= 56 * 1024 * 1024)
- memory_end = 56 * 1024 * 1024;
-# else
- if (memory_end >= 60 * 1024 * 1024)
- memory_end = 60 * 1024 * 1024;
-# endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */
-# endif /* ANOMALY_05000263 */
+
+ /* ROM_FS is XIP, so if we found it, we need to limit memory */
+ if (memory_end > max_mem) {
+ pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
+ memory_end = max_mem;
+ }
+ }
# endif /* CONFIG_ROMFS_FS */
/* Since the default MTD_UCLINUX has no magic number, we just blindly
@@ -586,20 +598,14 @@ static __init void memory_setup(void)
}
#endif /* CONFIG_MTD_UCLINUX */
-#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
- /* Due to a Hardware Anomaly we need to limit the size of usable
- * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
- * 05000263 - Hardware loop corrupted when taking an ICPLB exception
+ /* We need lo limit memory, since everything could have a text section
+ * of userspace in it, and expose anomaly 05000263. If the anomaly
+ * doesn't exist, or we don't need to - then dont.
*/
-#if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
- if (memory_end >= 56 * 1024 * 1024)
- memory_end = 56 * 1024 * 1024;
-#else
- if (memory_end >= 60 * 1024 * 1024)
- memory_end = 60 * 1024 * 1024;
-#endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */
- printk(KERN_NOTICE "Warning: limiting memory to %liMB due to hardware anomaly 05000263\n", memory_end >> 20);
-#endif /* ANOMALY_05000263 */
+ if (memory_end > max_mem) {
+ pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
+ memory_end = max_mem;
+ }
#ifdef CONFIG_MPU
page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32;
@@ -693,7 +699,7 @@ static __init void setup_bootmem_allocator(void)
sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map);
print_memory_map("boot memmap");
- /* intialize globals in linux/bootmem.h */
+ /* initialize globals in linux/bootmem.h */
find_min_max_pfn();
/* pfn of the last usable page frame */
if (max_pfn > memory_end >> PAGE_SHIFT)
@@ -806,6 +812,8 @@ void __init setup_arch(char **cmdline_p)
{
unsigned long sclk, cclk;
+ enable_shadow_console();
+
/* Check to make sure we are running on the right processor */
if (unlikely(CPUID != bfin_cpuid()))
printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
@@ -1230,57 +1238,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#ifdef __ARCH_SYNC_CORE_ICACHE
seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count);
#endif
-#ifdef CONFIG_BFIN_ICACHE_LOCK
- switch ((cpudata->imemctl >> 3) & WAYALL_L) {
- case WAY0_L:
- seq_printf(m, "Way0 Locked-Down\n");
- break;
- case WAY1_L:
- seq_printf(m, "Way1 Locked-Down\n");
- break;
- case WAY01_L:
- seq_printf(m, "Way0,Way1 Locked-Down\n");
- break;
- case WAY2_L:
- seq_printf(m, "Way2 Locked-Down\n");
- break;
- case WAY02_L:
- seq_printf(m, "Way0,Way2 Locked-Down\n");
- break;
- case WAY12_L:
- seq_printf(m, "Way1,Way2 Locked-Down\n");
- break;
- case WAY012_L:
- seq_printf(m, "Way0,Way1 & Way2 Locked-Down\n");
- break;
- case WAY3_L:
- seq_printf(m, "Way3 Locked-Down\n");
- break;
- case WAY03_L:
- seq_printf(m, "Way0,Way3 Locked-Down\n");
- break;
- case WAY13_L:
- seq_printf(m, "Way1,Way3 Locked-Down\n");
- break;
- case WAY013_L:
- seq_printf(m, "Way 0,Way1,Way3 Locked-Down\n");
- break;
- case WAY32_L:
- seq_printf(m, "Way3,Way2 Locked-Down\n");
- break;
- case WAY320_L:
- seq_printf(m, "Way3,Way2,Way0 Locked-Down\n");
- break;
- case WAY321_L:
- seq_printf(m, "Way3,Way2,Way1 Locked-Down\n");
- break;
- case WAYALL_L:
- seq_printf(m, "All Ways are locked\n");
- break;
- default:
- seq_printf(m, "No Ways are locked\n");
- }
-#endif
if (cpu_num != num_possible_cpus() - 1)
return 0;
@@ -1346,6 +1303,7 @@ const struct seq_operations cpuinfo_op = {
void __init cmdline_init(const char *r0)
{
+ early_shadow_stamp();
if (r0)
strncpy(command_line, r0, COMMAND_LINE_SIZE);
}
diff --git a/arch/blackfin/kernel/shadow_console.c b/arch/blackfin/kernel/shadow_console.c
new file mode 100644
index 000000000000..8b8c7107a162
--- /dev/null
+++ b/arch/blackfin/kernel/shadow_console.c
@@ -0,0 +1,113 @@
+/*
+ * manage a small early shadow of the log buffer which we can pass between the
+ * bootloader so early crash messages are communicated properly and easily
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/string.h>
+#include <asm/blackfin.h>
+#include <asm/irq_handler.h>
+#include <asm/early_printk.h>
+
+#define SHADOW_CONSOLE_START (0x500)
+#define SHADOW_CONSOLE_END (0x1000)
+#define SHADOW_CONSOLE_MAGIC_LOC (0x4F0)
+#define SHADOW_CONSOLE_MAGIC (0xDEADBEEF)
+
+static __initdata char *shadow_console_buffer = (char *)SHADOW_CONSOLE_START;
+
+__init void early_shadow_write(struct console *con, const char *s,
+ unsigned int n)
+{
+ unsigned int i;
+ /*
+ * save 2 bytes for the double null at the end
+ * once we fail on a long line, make sure we don't write a short line afterwards
+ */
+ if ((shadow_console_buffer + n) <= (char *)(SHADOW_CONSOLE_END - 2)) {
+ /* can't use memcpy - it may not be relocated yet */
+ for (i = 0; i <= n; i++)
+ shadow_console_buffer[i] = s[i];
+ shadow_console_buffer += n;
+ shadow_console_buffer[0] = 0;
+ shadow_console_buffer[1] = 0;
+ } else
+ shadow_console_buffer = (char *)SHADOW_CONSOLE_END;
+}
+
+static __initdata struct console early_shadow_console = {
+ .name = "early_shadow",
+ .write = early_shadow_write,
+ .flags = CON_BOOT | CON_PRINTBUFFER,
+ .index = -1,
+ .device = 0,
+};
+
+__init int shadow_console_enabled(void)
+{
+ return early_shadow_console.flags & CON_ENABLED;
+}
+
+__init void mark_shadow_error(void)
+{
+ int *loc = (int *)SHADOW_CONSOLE_MAGIC_LOC;
+ loc[0] = SHADOW_CONSOLE_MAGIC;
+ loc[1] = SHADOW_CONSOLE_START;
+}
+
+__init void enable_shadow_console(void)
+{
+ if (!shadow_console_enabled()) {
+ register_console(&early_shadow_console);
+ /* for now, assume things are going to fail */
+ mark_shadow_error();
+ }
+}
+
+static __init int disable_shadow_console(void)
+{
+ /*
+ * by the time pure_initcall runs, the standard console is enabled,
+ * and the early_console is off, so unset the magic numbers
+ * unregistering the console is taken care of in common code (See
+ * ./kernel/printk:disable_boot_consoles() )
+ */
+ int *loc = (int *)SHADOW_CONSOLE_MAGIC_LOC;
+
+ loc[0] = 0;
+
+ return 0;
+}
+pure_initcall(disable_shadow_console);
+
+/*
+ * since we can't use printk, dump numbers (as hex), n = # bits
+ */
+__init void early_shadow_reg(unsigned long reg, unsigned int n)
+{
+ /*
+ * can't use any "normal" kernel features, since thay
+ * may not be relocated to their execute address yet
+ */
+ int i;
+ char ascii[11] = " 0x";
+
+ n = n / 4;
+ reg = reg << ((8 - n) * 4);
+ n += 3;
+
+ for (i = 3; i <= n ; i++) {
+ ascii[i] = hex_asc_lo(reg >> 28);
+ reg <<= 4;
+ }
+ early_shadow_write(NULL, ascii, n);
+
+}
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index 0791eba40d9f..f9715764383e 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -66,7 +66,7 @@ static cycle_t bfin_read_cycles(struct clocksource *cs)
static struct clocksource bfin_cs_cycles = {
.name = "bfin_cs_cycles",
- .rating = 350,
+ .rating = 400,
.read = bfin_read_cycles,
.mask = CLOCKSOURCE_MASK(64),
.shift = 22,
@@ -115,7 +115,7 @@ static cycle_t bfin_read_gptimer0(void)
static struct clocksource bfin_cs_gptimer0 = {
.name = "bfin_cs_gptimer0",
- .rating = 400,
+ .rating = 350,
.read = bfin_read_gptimer0,
.mask = CLOCKSOURCE_MASK(32),
.shift = 22,
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index bf2b2d1f8ae5..56464cb8edf3 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -100,7 +100,11 @@ static void decode_address(char *buf, unsigned long address)
char *modname;
char *delim = ":";
char namebuf[128];
+#endif
+
+ buf += sprintf(buf, "<0x%08lx> ", address);
+#ifdef CONFIG_KALLSYMS
/* look up the address and see if we are in kernel space */
symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
@@ -108,23 +112,33 @@ static void decode_address(char *buf, unsigned long address)
/* yeah! kernel space! */
if (!modname)
modname = delim = "";
- sprintf(buf, "<0x%p> { %s%s%s%s + 0x%lx }",
- (void *)address, delim, modname, delim, symname,
- (unsigned long)offset);
+ sprintf(buf, "{ %s%s%s%s + 0x%lx }",
+ delim, modname, delim, symname,
+ (unsigned long)offset);
return;
-
}
#endif
- /* Problem in fixed code section? */
if (address >= FIXED_CODE_START && address < FIXED_CODE_END) {
- sprintf(buf, "<0x%p> /* Maybe fixed code section */", (void *)address);
+ /* Problem in fixed code section? */
+ strcat(buf, "/* Maybe fixed code section */");
+ return;
+
+ } else if (address < CONFIG_BOOT_LOAD) {
+ /* Problem somewhere before the kernel start address */
+ strcat(buf, "/* Maybe null pointer? */");
+ return;
+
+ } else if (address >= COREMMR_BASE) {
+ strcat(buf, "/* core mmrs */");
return;
- }
- /* Problem somewhere before the kernel start address */
- if (address < CONFIG_BOOT_LOAD) {
- sprintf(buf, "<0x%p> /* Maybe null pointer? */", (void *)address);
+ } else if (address >= SYSMMR_BASE) {
+ strcat(buf, "/* system mmrs */");
+ return;
+
+ } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) {
+ strcat(buf, "/* on-chip L1 ROM */");
return;
}
@@ -172,18 +186,16 @@ static void decode_address(char *buf, unsigned long address)
offset = (address - vma->vm_start) +
(vma->vm_pgoff << PAGE_SHIFT);
- sprintf(buf, "<0x%p> [ %s + 0x%lx ]",
- (void *)address, name, offset);
+ sprintf(buf, "[ %s + 0x%lx ]", name, offset);
} else
- sprintf(buf, "<0x%p> [ %s vma:0x%lx-0x%lx]",
- (void *)address, name,
- vma->vm_start, vma->vm_end);
+ sprintf(buf, "[ %s vma:0x%lx-0x%lx]",
+ name, vma->vm_start, vma->vm_end);
if (!in_atomic)
mmput(mm);
- if (!strlen(buf))
- sprintf(buf, "<0x%p> [ %s ] dynamic memory", (void *)address, name);
+ if (buf[0] == '\0')
+ sprintf(buf, "[ %s ] dynamic memory", name);
goto done;
}
@@ -193,7 +205,7 @@ static void decode_address(char *buf, unsigned long address)
}
/* we were unable to find this address anywhere */
- sprintf(buf, "<0x%p> /* kernel dynamic memory */", (void *)address);
+ sprintf(buf, "/* kernel dynamic memory */");
done:
write_unlock_irqrestore(&tasklist_lock, flags);
@@ -215,14 +227,14 @@ asmlinkage void double_fault_c(struct pt_regs *fp)
printk(KERN_EMERG "Double Fault\n");
#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) {
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu = raw_smp_processor_id();
char buf[150];
- decode_address(buf, cpu_pda[cpu].retx);
+ decode_address(buf, cpu_pda[cpu].retx_doublefault);
printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n",
- (unsigned int)cpu_pda[cpu].seqstat & SEQSTAT_EXCAUSE, buf);
- decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
+ (unsigned int)cpu_pda[cpu].seqstat_doublefault & SEQSTAT_EXCAUSE, buf);
+ decode_address(buf, cpu_pda[cpu].dcplb_doublefault_addr);
printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf);
- decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
+ decode_address(buf, cpu_pda[cpu].icplb_doublefault_addr);
printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf);
decode_address(buf, fp->retx);
@@ -245,13 +257,13 @@ static int kernel_mode_regs(struct pt_regs *regs)
return regs->ipend & 0xffc0;
}
-asmlinkage void trap_c(struct pt_regs *fp)
+asmlinkage notrace void trap_c(struct pt_regs *fp)
{
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
int j;
#endif
#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu = raw_smp_processor_id();
#endif
const char *strerror = NULL;
int sig = 0;
@@ -267,11 +279,6 @@ asmlinkage void trap_c(struct pt_regs *fp)
* double faults if the stack has become corrupt
*/
-#ifndef CONFIG_KGDB
- /* IPEND is skipped if KGDB isn't enabled (see entry code) */
- fp->ipend = bfin_read_IPEND();
-#endif
-
/* trap_c() will be called for exceptions. During exceptions
* processing, the pc value should be set with retx value.
* With this change we can cleanup some code in signal.c- TODO
@@ -404,7 +411,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
/* 0x23 - Data CPLB protection violation, handled here */
case VEC_CPLB_VL:
info.si_code = ILL_CPLB_VI;
- sig = SIGBUS;
+ sig = SIGSEGV;
strerror = KERN_NOTICE EXC_0x23(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
@@ -904,7 +911,7 @@ void show_stack(struct task_struct *task, unsigned long *stack)
frame_no = 0;
for (addr = (unsigned int *)((unsigned int)stack & ~0xF), i = 0;
- addr <= endstack; addr++, i++) {
+ addr < endstack; addr++, i++) {
ret_addr = 0;
if (!j && i % 8 == 0)
@@ -949,6 +956,7 @@ void show_stack(struct task_struct *task, unsigned long *stack)
}
#endif
}
+EXPORT_SYMBOL(show_stack);
void dump_stack(void)
{
@@ -1090,7 +1098,7 @@ void show_regs(struct pt_regs *fp)
struct irqaction *action;
unsigned int i;
unsigned long flags = 0;
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu = raw_smp_processor_id();
unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
verbose_printk(KERN_NOTICE "\n");
@@ -1116,10 +1124,16 @@ void show_regs(struct pt_regs *fp)
verbose_printk(KERN_NOTICE "%s", linux_banner);
- verbose_printk(KERN_NOTICE "\nSEQUENCER STATUS:\t\t%s\n",
- print_tainted());
- verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx SYSCFG: %04lx\n",
- (long)fp->seqstat, fp->ipend, fp->syscfg);
+ verbose_printk(KERN_NOTICE "\nSEQUENCER STATUS:\t\t%s\n", print_tainted());
+ verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n",
+ (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg);
+ if (fp->ipend & EVT_IRPTEN)
+ verbose_printk(KERN_NOTICE " Global Interrupts Disabled (IPEND[4])\n");
+ if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 |
+ EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR)))
+ verbose_printk(KERN_NOTICE " Peripheral interrupts masked off\n");
+ if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14)))
+ verbose_printk(KERN_NOTICE " Kernel interrupts masked off\n");
if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) {
verbose_printk(KERN_NOTICE " HWERRCAUSE: 0x%lx\n",
(fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14);
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 6ac307ca0d80..21ac7c26079e 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -221,7 +221,7 @@ SECTIONS
. = ALIGN(4);
__ebss_l1 = .;
}
- ASSERT (SIZEOF(.data_a_l1) <= L1_DATA_A_LENGTH, "L1 data A overflow!")
+ ASSERT (SIZEOF(.data_l1) <= L1_DATA_A_LENGTH, "L1 data A overflow!")
.data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1))
{
@@ -262,7 +262,7 @@ SECTIONS
. = ALIGN(4);
__ebss_l2 = .;
}
- ASSERT (SIZEOF(.text_data_l1) <= L2_LENGTH, "L2 overflow!")
+ ASSERT (SIZEOF(.text_data_l2) <= L2_LENGTH, "L2 overflow!")
/* Force trailing alignment of our init section so that when we
* free our init memory, we don't leave behind a partial page.
@@ -277,8 +277,5 @@ SECTIONS
DWARF_DEBUG
- /DISCARD/ :
- {
- *(.exitcall.exit)
- }
+ DISCARDS
}