summaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/entry-common.S20
-rw-r--r--arch/arm/kernel/head-nommu.S2
-rw-r--r--arch/arm/kernel/irq.c2
-rw-r--r--arch/arm/kernel/kprobes-test-arm.c4
-rw-r--r--arch/arm/kernel/machine_kexec.c7
-rw-r--r--arch/arm/kernel/perf_event.c4
-rw-r--r--arch/arm/kernel/process.c4
-rw-r--r--arch/arm/kernel/ptrace.c43
-rw-r--r--arch/arm/kernel/sched_clock.c18
-rw-r--r--arch/arm/kernel/smp.c21
-rw-r--r--arch/arm/kernel/smp_twd.c58
-rw-r--r--arch/arm/kernel/vmlinux.lds.S19
12 files changed, 122 insertions, 80 deletions
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 417bac1846bd..804153c0a9cf 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -88,9 +88,9 @@ ENTRY(ret_from_fork)
bl schedule_tail
cmp r5, #0
movne r0, r4
- movne lr, pc
+ adrne lr, BSYM(1f)
movne pc, r5
- get_thread_info tsk
+1: get_thread_info tsk
b ret_slow_syscall
ENDPROC(ret_from_fork)
@@ -417,16 +417,6 @@ local_restart:
ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
stmdb sp!, {r4, r5} @ push fifth and sixth args
-#ifdef CONFIG_SECCOMP
- tst r10, #_TIF_SECCOMP
- beq 1f
- mov r0, scno
- bl __secure_computing
- add r0, sp, #S_R0 + S_OFF @ pointer to regs
- ldmia r0, {r0 - r3} @ have to reload r0 - r3
-1:
-#endif
-
tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
bne __sys_trace
@@ -458,11 +448,13 @@ __sys_trace:
ldmccia r1, {r0 - r6} @ have to reload r0 - r6
stmccia sp, {r4, r5} @ and update the stack args
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
- b 2b
+ cmp scno, #-1 @ skip the syscall?
+ bne 2b
+ add sp, sp, #S_OFF @ restore stack
+ b ret_slow_syscall
__sys_trace_return:
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
- mov r1, scno
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 278cfc144f44..2c228a07e58c 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -68,7 +68,7 @@ __after_proc_init:
* CP15 system control register value returned in r0 from
* the CPU init function.
*/
-#ifdef CONFIG_ALIGNMENT_TRAP
+#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
orr r0, r0, #CR_A
#else
bic r0, r0, #CR_A
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 16cedb42c0c3..896165096d6a 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -34,6 +34,7 @@
#include <linux/list.h>
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
+#include <linux/export.h>
#include <asm/exception.h>
#include <asm/mach/arch.h>
@@ -109,6 +110,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
/* Order is clear bits in "clr" then set bits in "set" */
irq_modify_status(irq, clr, set & ~clr);
}
+EXPORT_SYMBOL_GPL(set_irq_flags);
void __init init_IRQ(void)
{
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c
index 38c1a3b103a0..839312905067 100644
--- a/arch/arm/kernel/kprobes-test-arm.c
+++ b/arch/arm/kernel/kprobes-test-arm.c
@@ -366,7 +366,9 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(".word 0xe04f0392 @ umaal r0, pc, r2, r3")
TEST_UNSUPPORTED(".word 0xe0500090 @ undef")
TEST_UNSUPPORTED(".word 0xe05fff9f @ undef")
+#endif
+#if __LINUX_ARM_ARCH__ >= 7
TEST_RRR( "mls r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
TEST_RRR( "mlshi r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
TEST_RR( "mls lr, r",1, VAL2,", r",2, VAL3,", r13")
@@ -456,6 +458,8 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(".word 0xe1700090") /* Unallocated space */
#if __LINUX_ARM_ARCH__ >= 6
TEST_UNSUPPORTED("ldrex r2, [sp]")
+#endif
+#if (__LINUX_ARM_ARCH__ >= 7) || defined(CONFIG_CPU_32v6K)
TEST_UNSUPPORTED("strexd r0, r2, r3, [sp]")
TEST_UNSUPPORTED("ldrexd r2, r3, [sp]")
TEST_UNSUPPORTED("strexb r0, r2, [sp]")
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index e29c3337ca81..8ef8c9337809 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -45,10 +45,9 @@ int machine_kexec_prepare(struct kimage *image)
for (i = 0; i < image->nr_segments; i++) {
current_segment = &image->segment[i];
- err = memblock_is_region_memory(current_segment->mem,
- current_segment->memsz);
- if (err)
- return - EINVAL;
+ if (!memblock_is_region_memory(current_segment->mem,
+ current_segment->memsz))
+ return -EINVAL;
err = get_user(header, (__be32*)current_segment->buf);
if (err)
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 93971b1a4f0b..53c0304b734a 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -96,6 +96,10 @@ armpmu_event_set_period(struct perf_event *event,
s64 period = hwc->sample_period;
int ret = 0;
+ /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
+ if (unlikely(period != hwc->last_period))
+ left = period - (hwc->last_period - left);
+
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 90084a6de35a..44bc0b327e2b 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -34,6 +34,7 @@
#include <linux/leds.h>
#include <asm/cacheflush.h>
+#include <asm/idmap.h>
#include <asm/processor.h>
#include <asm/thread_notify.h>
#include <asm/stacktrace.h>
@@ -56,8 +57,6 @@ static const char *isa_modes[] = {
"ARM" , "Thumb" , "Jazelle", "ThumbEE"
};
-extern void setup_mm_for_reboot(void);
-
static volatile int hlt_counter;
void disable_hlt(void)
@@ -70,6 +69,7 @@ EXPORT_SYMBOL(disable_hlt);
void enable_hlt(void)
{
hlt_counter--;
+ BUG_ON(hlt_counter < 0);
}
EXPORT_SYMBOL(enable_hlt);
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 739db3a1b2d2..03deeffd9f6d 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -916,16 +916,11 @@ enum ptrace_syscall_dir {
PTRACE_SYSCALL_EXIT,
};
-static int ptrace_syscall_trace(struct pt_regs *regs, int scno,
- enum ptrace_syscall_dir dir)
+static int tracehook_report_syscall(struct pt_regs *regs,
+ enum ptrace_syscall_dir dir)
{
unsigned long ip;
- current_thread_info()->syscall = scno;
-
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return scno;
-
/*
* IP is used to denote syscall entry/exit:
* IP = 0 -> entry, =1 -> exit
@@ -944,19 +939,41 @@ static int ptrace_syscall_trace(struct pt_regs *regs, int scno,
asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
{
- scno = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_ENTER);
+ current_thread_info()->syscall = scno;
+
+ /* Do the secure computing check first; failures should be fast. */
+ if (secure_computing(scno) == -1)
+ return -1;
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ scno = tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, scno);
+
audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1,
regs->ARM_r2, regs->ARM_r3);
+
return scno;
}
-asmlinkage int syscall_trace_exit(struct pt_regs *regs, int scno)
+asmlinkage void syscall_trace_exit(struct pt_regs *regs)
{
- scno = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_EXIT);
- if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
- trace_sys_exit(regs, scno);
+ /*
+ * Audit the syscall before anything else, as a debugger may
+ * come in and change the current registers.
+ */
audit_syscall_exit(regs);
- return scno;
+
+ /*
+ * Note that we haven't updated the ->syscall field for the
+ * current thread. This isn't a problem because it will have
+ * been set on syscall entry and there hasn't been an opportunity
+ * for a PTRACE_SET_SYSCALL since then.
+ */
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_exit(regs, regs_return_value(regs));
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
}
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index e21bac20d90d..fc6692e2b603 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -107,13 +107,6 @@ static void sched_clock_poll(unsigned long wrap_ticks)
update_sched_clock();
}
-void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
- unsigned long rate)
-{
- setup_sched_clock(read, bits, rate);
- cd.needs_suspend = true;
-}
-
void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{
unsigned long r, w;
@@ -189,18 +182,15 @@ void __init sched_clock_postinit(void)
static int sched_clock_suspend(void)
{
sched_clock_poll(sched_clock_timer.data);
- if (cd.needs_suspend)
- cd.suspended = true;
+ cd.suspended = true;
return 0;
}
static void sched_clock_resume(void)
{
- if (cd.needs_suspend) {
- cd.epoch_cyc = read_sched_clock();
- cd.epoch_cyc_copy = cd.epoch_cyc;
- cd.suspended = false;
- }
+ cd.epoch_cyc = read_sched_clock();
+ cd.epoch_cyc_copy = cd.epoch_cyc;
+ cd.suspended = false;
}
static struct syscore_ops sched_clock_ops = {
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 8e20754dd31d..57f537731979 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -294,18 +294,24 @@ static void percpu_timer_setup(void);
asmlinkage void __cpuinit secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu;
+
+ /*
+ * The identity mapping is uncached (strongly ordered), so
+ * switch away from it before attempting any exclusive accesses.
+ */
+ cpu_switch_mm(mm->pgd, mm);
+ enter_lazy_tlb(mm, current);
+ local_flush_tlb_all();
/*
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
+ cpu = smp_processor_id();
atomic_inc(&mm->mm_count);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
- cpu_switch_mm(mm->pgd, mm);
- enter_lazy_tlb(mm, current);
- local_flush_tlb_all();
printk("CPU%u: Booted secondary processor\n", cpu);
@@ -415,6 +421,11 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
smp_cross_call(mask, IPI_CALL_FUNC);
}
+void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_WAKEUP);
+}
+
void arch_send_call_function_single_ipi(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
@@ -437,7 +448,7 @@ void show_ipi_list(struct seq_file *p, int prec)
for (i = 0; i < NR_IPI; i++) {
seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
- for_each_present_cpu(cpu)
+ for_each_online_cpu(cpu)
seq_printf(p, "%10u ",
__get_irq_stat(cpu, ipi_irqs[i]));
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index e1f906989bb8..ff07879ad95d 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -31,6 +31,8 @@ static void __iomem *twd_base;
static struct clk *twd_clk;
static unsigned long twd_timer_rate;
+static bool common_setup_called;
+static DEFINE_PER_CPU(bool, percpu_setup_called);
static struct clock_event_device __percpu **twd_evt;
static int twd_ppi;
@@ -42,10 +44,10 @@ static void twd_set_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- /* timer load already set up */
ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
| TWD_TIMER_CONTROL_PERIODIC;
- __raw_writel(twd_timer_rate / HZ, twd_base + TWD_TIMER_LOAD);
+ __raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
+ twd_base + TWD_TIMER_LOAD);
break;
case CLOCK_EVT_MODE_ONESHOT:
/* period set, and timer enabled in 'next_event' hook */
@@ -248,17 +250,9 @@ static struct clk *twd_get_clock(void)
return clk;
}
- err = clk_prepare(clk);
+ err = clk_prepare_enable(clk);
if (err) {
- pr_err("smp_twd: clock failed to prepare: %d\n", err);
- clk_put(clk);
- return ERR_PTR(err);
- }
-
- err = clk_enable(clk);
- if (err) {
- pr_err("smp_twd: clock failed to enable: %d\n", err);
- clk_unprepare(clk);
+ pr_err("smp_twd: clock failed to prepare+enable: %d\n", err);
clk_put(clk);
return ERR_PTR(err);
}
@@ -272,15 +266,45 @@ static struct clk *twd_get_clock(void)
static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
{
struct clock_event_device **this_cpu_clk;
+ int cpu = smp_processor_id();
+
+ /*
+ * If the basic setup for this CPU has been done before don't
+ * bother with the below.
+ */
+ if (per_cpu(percpu_setup_called, cpu)) {
+ __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+ clockevents_register_device(*__this_cpu_ptr(twd_evt));
+ enable_percpu_irq(clk->irq, 0);
+ return 0;
+ }
+ per_cpu(percpu_setup_called, cpu) = true;
- if (!twd_clk)
+ /*
+ * This stuff only need to be done once for the entire TWD cluster
+ * during the runtime of the system.
+ */
+ if (!common_setup_called) {
twd_clk = twd_get_clock();
- if (!IS_ERR_OR_NULL(twd_clk))
- twd_timer_rate = clk_get_rate(twd_clk);
- else
- twd_calibrate_rate();
+ /*
+ * We use IS_ERR_OR_NULL() here, because if the clock stubs
+ * are active we will get a valid clk reference which is
+ * however NULL and will return the rate 0. In that case we
+ * need to calibrate the rate instead.
+ */
+ if (!IS_ERR_OR_NULL(twd_clk))
+ twd_timer_rate = clk_get_rate(twd_clk);
+ else
+ twd_calibrate_rate();
+
+ common_setup_called = true;
+ }
+ /*
+ * The following is done once per CPU the first time .setup() is
+ * called.
+ */
__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
clk->name = "local_timer";
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 36ff15bbfdd4..b9f38e388b43 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -114,6 +114,15 @@ SECTIONS
RO_DATA(PAGE_SIZE)
+ . = ALIGN(4);
+ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
+ __start___ex_table = .;
+#ifdef CONFIG_MMU
+ *(__ex_table)
+#endif
+ __stop___ex_table = .;
+ }
+
#ifdef CONFIG_ARM_UNWIND
/*
* Stack unwinding tables
@@ -220,16 +229,6 @@ SECTIONS
READ_MOSTLY_DATA(L1_CACHE_BYTES)
/*
- * The exception fixup table (might need resorting at runtime)
- */
- . = ALIGN(4);
- __start___ex_table = .;
-#ifdef CONFIG_MMU
- *(__ex_table)
-#endif
- __stop___ex_table = .;
-
- /*
* and the usual data section
*/
DATA_DATA