diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/bugs.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/entry-common.S | 4 | ||||
-rw-r--r-- | arch/arm/kernel/head-common.S | 6 | ||||
-rw-r--r-- | arch/arm/kernel/irq.c | 62 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 40 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 70 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 49 | ||||
-rw-r--r-- | arch/arm/kernel/sys_oabi-compat.c | 8 |
8 files changed, 125 insertions, 118 deletions
diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c index 7be511310191..d41d3598e5e5 100644 --- a/arch/arm/kernel/bugs.c +++ b/arch/arm/kernel/bugs.c @@ -6,8 +6,8 @@ void check_other_bugs(void) { #ifdef MULTI_CPU - if (processor.check_bugs) - processor.check_bugs(); + if (cpu_check_bugs) + cpu_check_bugs(); #endif } diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 56be67ecf0fa..d69adfb3d79e 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -32,6 +32,7 @@ * features make this path too inefficient. */ ret_fast_syscall: +__ret_fast_syscall: UNWIND(.fnstart ) UNWIND(.cantunwind ) disable_irq_notrace @ disable interrupts @@ -57,6 +58,7 @@ fast_work_pending: * r0 first to avoid needing to save registers around each C function call. */ ret_fast_syscall: +__ret_fast_syscall: UNWIND(.fnstart ) UNWIND(.cantunwind ) str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 @@ -223,7 +225,7 @@ local_restart: tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? bne __sys_trace - invoke_syscall tbl, scno, r10, ret_fast_syscall + invoke_syscall tbl, scno, r10, __ret_fast_syscall add r1, sp, #S_OFF 2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 8733012d231f..7e662bdd5cb3 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -122,6 +122,9 @@ __mmap_switched_data: .long init_thread_union + THREAD_START_SP @ sp .size __mmap_switched_data, . - __mmap_switched_data + __FINIT + .text + /* * This provides a C-API version of __lookup_processor_type */ @@ -133,9 +136,6 @@ ENTRY(lookup_processor_type) ldmfd sp!, {r4 - r6, r9, pc} ENDPROC(lookup_processor_type) - __FINIT - .text - /* * Read processor ID register (CP#15, CR0), and look up in the linker-built * supported processor list. Note that we can't use the absolute addresses diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index ece04a457486..5b07c7a31c31 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -31,7 +31,6 @@ #include <linux/smp.h> #include <linux/init.h> #include <linux/seq_file.h> -#include <linux/ratelimit.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/kallsyms.h> @@ -119,64 +118,3 @@ int __init arch_probe_nr_irqs(void) return nr_irqs; } #endif - -#ifdef CONFIG_HOTPLUG_CPU -static bool migrate_one_irq(struct irq_desc *desc) -{ - struct irq_data *d = irq_desc_get_irq_data(desc); - const struct cpumask *affinity = irq_data_get_affinity_mask(d); - struct irq_chip *c; - bool ret = false; - - /* - * If this is a per-CPU interrupt, or the affinity does not - * include this CPU, then we have nothing to do. - */ - if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) - return false; - - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { - affinity = cpu_online_mask; - ret = true; - } - - c = irq_data_get_irq_chip(d); - if (!c->irq_set_affinity) - pr_debug("IRQ%u: unable to set affinity\n", d->irq); - else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) - cpumask_copy(irq_data_get_affinity_mask(d), affinity); - - return ret; -} - -/* - * The current CPU has been marked offline. Migrate IRQs off this CPU. - * If the affinity settings do not allow other CPUs, force them onto any - * available CPU. - * - * Note: we must iterate over all IRQs, whether they have an attached - * action structure or not, as we need to get chained interrupts too. - */ -void migrate_irqs(void) -{ - unsigned int i; - struct irq_desc *desc; - unsigned long flags; - - local_irq_save(flags); - - for_each_irq_desc(i, desc) { - bool affinity_broken; - - raw_spin_lock(&desc->lock); - affinity_broken = migrate_one_irq(desc); - raw_spin_unlock(&desc->lock); - - if (affinity_broken) - pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", - i, smp_processor_id()); - } - - local_irq_restore(flags); -} -#endif /* CONFIG_HOTPLUG_CPU */ diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index f4e54503afa9..4764742db7b0 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -115,6 +115,11 @@ EXPORT_SYMBOL(elf_hwcap2); #ifdef MULTI_CPU struct processor processor __ro_after_init; +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) +struct processor *cpu_vtable[NR_CPUS] = { + [0] = &processor, +}; +#endif #endif #ifdef MULTI_TLB struct cpu_tlb_fns cpu_tlb __ro_after_init; @@ -667,28 +672,33 @@ static void __init smp_build_mpidr_hash(void) } #endif -static void __init setup_processor(void) +/* + * locate processor in the list of supported processor types. The linker + * builds this table for us from the entries in arch/arm/mm/proc-*.S + */ +struct proc_info_list *lookup_processor(u32 midr) { - struct proc_info_list *list; + struct proc_info_list *list = lookup_processor_type(midr); - /* - * locate processor in the list of supported processor - * types. The linker builds this table for us from the - * entries in arch/arm/mm/proc-*.S - */ - list = lookup_processor_type(read_cpuid_id()); if (!list) { - pr_err("CPU configuration botched (ID %08x), unable to continue.\n", - read_cpuid_id()); - while (1); + pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n", + smp_processor_id(), midr); + while (1) + /* can't use cpu_relax() here as it may require MMU setup */; } + return list; +} + +static void __init setup_processor(void) +{ + unsigned int midr = read_cpuid_id(); + struct proc_info_list *list = lookup_processor(midr); + cpu_name = list->cpu_name; __cpu_architecture = __get_cpu_architecture(); -#ifdef MULTI_CPU - processor = *list->proc; -#endif + init_proc_vtable(list->proc); #ifdef MULTI_TLB cpu_tlb = *list->tlb; #endif @@ -700,7 +710,7 @@ static void __init setup_processor(void) #endif pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", - cpu_name, read_cpuid_id(), read_cpuid_id() & 15, + list->cpu_name, midr, midr & 15, proc_arch[cpu_architecture()], get_cr()); snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 6bee5c9b1133..0a066f03b5ec 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -94,17 +94,18 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) static int preserve_vfp_context(struct vfp_sigframe __user *frame) { - const unsigned long magic = VFP_MAGIC; - const unsigned long size = VFP_STORAGE_SIZE; + struct vfp_sigframe kframe; int err = 0; - __put_user_error(magic, &frame->magic, err); - __put_user_error(size, &frame->size, err); + memset(&kframe, 0, sizeof(kframe)); + kframe.magic = VFP_MAGIC; + kframe.size = VFP_STORAGE_SIZE; + err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc); if (err) - return -EFAULT; + return err; - return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc); + return __copy_to_user(frame, &kframe, sizeof(kframe)); } static int restore_vfp_context(struct vfp_sigframe __user *auxp) @@ -256,30 +257,35 @@ static int setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) { struct aux_sigframe __user *aux; + struct sigcontext context; int err = 0; - __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); - __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); - __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); - __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); - __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); - __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); - __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); - __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); - __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); - __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); - __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); - __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); - __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); - __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); - __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); - __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); - __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); - - __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err); - __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err); - __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err); - __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); + context = (struct sigcontext) { + .arm_r0 = regs->ARM_r0, + .arm_r1 = regs->ARM_r1, + .arm_r2 = regs->ARM_r2, + .arm_r3 = regs->ARM_r3, + .arm_r4 = regs->ARM_r4, + .arm_r5 = regs->ARM_r5, + .arm_r6 = regs->ARM_r6, + .arm_r7 = regs->ARM_r7, + .arm_r8 = regs->ARM_r8, + .arm_r9 = regs->ARM_r9, + .arm_r10 = regs->ARM_r10, + .arm_fp = regs->ARM_fp, + .arm_ip = regs->ARM_ip, + .arm_sp = regs->ARM_sp, + .arm_lr = regs->ARM_lr, + .arm_pc = regs->ARM_pc, + .arm_cpsr = regs->ARM_cpsr, + + .trap_no = current->thread.trap_no, + .error_code = current->thread.error_code, + .fault_address = current->thread.address, + .oldmask = set->sig[0], + }; + + err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context)); err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); @@ -296,7 +302,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) if (err == 0) err |= preserve_vfp_context(&aux->vfp); #endif - __put_user_error(0, &aux->end_magic, err); + err |= __put_user(0, &aux->end_magic); return err; } @@ -428,7 +434,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) /* * Set uc.uc_flags to a value which sc.trap_no would never have. */ - __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); + err = __put_user(0x5ac3c35a, &frame->uc.uc_flags); err |= setup_sigframe(frame, regs, set); if (err == 0) @@ -448,8 +454,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) err |= copy_siginfo_to_user(&frame->info, &ksig->info); - __put_user_error(0, &frame->sig.uc.uc_flags, err); - __put_user_error(NULL, &frame->sig.uc.uc_link, err); + err |= __put_user(0, &frame->sig.uc.uc_flags); + err |= __put_user(NULL, &frame->sig.uc.uc_link); err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp); err |= setup_sigframe(&frame->sig, regs, set); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index d2ce37da87d8..bc83ec7ed53f 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -27,6 +27,7 @@ #include <linux/completion.h> #include <linux/cpufreq.h> #include <linux/irq_work.h> +#include <linux/slab.h> #include <linux/atomic.h> #include <asm/bugs.h> @@ -40,6 +41,7 @@ #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> +#include <asm/procinfo.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/tlbflush.h> @@ -100,6 +102,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd) #endif } +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) +static int secondary_biglittle_prepare(unsigned int cpu) +{ + if (!cpu_vtable[cpu]) + cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL); + + return cpu_vtable[cpu] ? 0 : -ENOMEM; +} + +static void secondary_biglittle_init(void) +{ + init_proc_vtable(lookup_processor(read_cpuid_id())->proc); +} +#else +static int secondary_biglittle_prepare(unsigned int cpu) +{ + return 0; +} + +static void secondary_biglittle_init(void) +{ +} +#endif + int __cpu_up(unsigned int cpu, struct task_struct *idle) { int ret; @@ -107,6 +133,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) if (!smp_ops.smp_boot_secondary) return -ENOSYS; + ret = secondary_biglittle_prepare(cpu); + if (ret) + return ret; + /* * We need to tell the secondary core where to find * its stack and the page tables. @@ -223,7 +253,7 @@ int __cpu_disable(void) /* * OK - migrate IRQs away from this CPU */ - migrate_irqs(); + irq_migrate_all_off_this_cpu(); /* * Flush user cache and TLB mappings, and then remove this CPU @@ -358,6 +388,8 @@ asmlinkage void secondary_start_kernel(void) struct mm_struct *mm = &init_mm; unsigned int cpu; + secondary_biglittle_init(); + /* * The identity mapping is uncached (strongly ordered), so * switch away from it before attempting any exclusive accesses. @@ -690,6 +722,21 @@ void smp_send_stop(void) pr_warn("SMP: failed to stop secondary CPUs\n"); } +/* In case panic() and panic() called at the same time on CPU1 and CPU2, + * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop() + * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online, + * kdump fails. So split out the panic_smp_self_stop() and add + * set_cpu_online(smp_processor_id(), false). + */ +void panic_smp_self_stop(void) +{ + pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n", + smp_processor_id()); + set_cpu_online(smp_processor_id(), false); + while (1) + cpu_relax(); +} + /* * not supported here */ diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 640748e27035..d844c5c9364b 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -276,6 +276,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, int maxevents, int timeout) { struct epoll_event *kbuf; + struct oabi_epoll_event e; mm_segment_t fs; long ret, err, i; @@ -294,8 +295,11 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, set_fs(fs); err = 0; for (i = 0; i < ret; i++) { - __put_user_error(kbuf[i].events, &events->events, err); - __put_user_error(kbuf[i].data, &events->data, err); + e.events = kbuf[i].events; + e.data = kbuf[i].data; + err = __copy_to_user(events, &e, sizeof(e)); + if (err) + break; events++; } kfree(kbuf); |