diff options
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r-- | arch/arm64/kernel/armv8_deprecated.c | 18 | ||||
-rw-r--r-- | arch/arm64/kernel/cpuidle.c | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/psci.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/stacktrace.c | 6 | ||||
-rw-r--r-- | arch/arm64/kernel/suspend.c | 26 | ||||
-rw-r--r-- | arch/arm64/kernel/vmlinux.lds.S | 5 |
6 files changed, 33 insertions, 28 deletions
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index 7922c2e710ca..7ac3920b1356 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -279,22 +279,24 @@ static void register_insn_emulation_sysctl(struct ctl_table *table) */ #define __user_swpX_asm(data, addr, res, temp, B) \ __asm__ __volatile__( \ - " mov %w2, %w1\n" \ - "0: ldxr"B" %w1, [%3]\n" \ - "1: stxr"B" %w0, %w2, [%3]\n" \ + "0: ldxr"B" %w2, [%3]\n" \ + "1: stxr"B" %w0, %w1, [%3]\n" \ " cbz %w0, 2f\n" \ " mov %w0, %w4\n" \ + " b 3f\n" \ "2:\n" \ + " mov %w1, %w2\n" \ + "3:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ - "3: mov %w0, %w5\n" \ - " b 2b\n" \ + "4: mov %w0, %w5\n" \ + " b 3b\n" \ " .popsection" \ " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ - " .quad 0b, 3b\n" \ - " .quad 1b, 3b\n" \ - " .popsection" \ + " .quad 0b, 4b\n" \ + " .quad 1b, 4b\n" \ + " .popsection\n" \ : "=&r" (res), "+r" (data), "=&r" (temp) \ : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \ : "memory") diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index a78143a5c99f..2bbd0fee084f 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -37,7 +37,7 @@ int arm_cpuidle_init(unsigned int cpu) * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU * operations back-end error code otherwise. */ -int cpu_suspend(unsigned long arg) +int arm_cpuidle_suspend(int index) { int cpu = smp_processor_id(); @@ -47,5 +47,5 @@ int cpu_suspend(unsigned long arg) */ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) return -EOPNOTSUPP; - return cpu_ops[cpu]->cpu_suspend(arg); + return cpu_ops[cpu]->cpu_suspend(index); } diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index ea18cb53921e..24d4733b7e3c 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -546,7 +546,7 @@ static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index) if (state[index - 1].type == PSCI_POWER_STATE_TYPE_STANDBY) ret = psci_ops.cpu_suspend(state[index - 1], 0); else - ret = __cpu_suspend(index, psci_suspend_finisher); + ret = cpu_suspend(index, psci_suspend_finisher); return ret; } diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 407991bf79f5..ccb6078ed9f2 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -48,11 +48,7 @@ int notrace unwind_frame(struct stackframe *frame) frame->sp = fp + 0x10; frame->fp = *(unsigned long *)(fp); - /* - * -4 here because we care about the PC at time of bl, - * not where the return will go. - */ - frame->pc = *(unsigned long *)(fp + 8) - 4; + frame->pc = *(unsigned long *)(fp + 8); return 0; } diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index d7daf45ae7a2..53f1f8dccf6c 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -51,13 +51,13 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) } /* - * __cpu_suspend + * cpu_suspend * * arg: argument to pass to the finisher function * fn: finisher function pointer * */ -int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) +int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { struct mm_struct *mm = current->active_mm; int ret; @@ -80,17 +80,21 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) if (ret == 0) { /* * We are resuming from reset with TTBR0_EL1 set to the - * idmap to enable the MMU; restore the active_mm mappings in - * TTBR0_EL1 unless the active_mm == &init_mm, in which case - * the thread entered __cpu_suspend with TTBR0_EL1 set to - * reserved TTBR0 page tables and should be restored as such. + * idmap to enable the MMU; set the TTBR0 to the reserved + * page tables to prevent speculative TLB allocations, flush + * the local tlb and set the default tcr_el1.t0sz so that + * the TTBR0 address space set-up is properly restored. + * If the current active_mm != &init_mm we entered cpu_suspend + * with mappings in TTBR0 that must be restored, so we switch + * them back to complete the address space configuration + * restoration before returning. */ - if (mm == &init_mm) - cpu_set_reserved_ttbr0(); - else - cpu_switch_mm(mm->pgd, mm); - + cpu_set_reserved_ttbr0(); flush_tlb_all(); + cpu_set_default_tcr_t0sz(); + + if (mm != &init_mm) + cpu_switch_mm(mm->pgd, mm); /* * Restore per-cpu offset before any kernel diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index a2c29865c3fe..aff07bcad882 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -54,9 +54,12 @@ PECOFF_FILE_ALIGNMENT = 0x200; #define PECOFF_EDATA_PADDING #endif -#ifdef CONFIG_DEBUG_ALIGN_RODATA +#if defined(CONFIG_DEBUG_ALIGN_RODATA) #define ALIGN_DEBUG_RO . = ALIGN(1<<SECTION_SHIFT); #define ALIGN_DEBUG_RO_MIN(min) ALIGN_DEBUG_RO +#elif defined(CONFIG_DEBUG_RODATA) +#define ALIGN_DEBUG_RO . = ALIGN(1<<PAGE_SHIFT); +#define ALIGN_DEBUG_RO_MIN(min) ALIGN_DEBUG_RO #else #define ALIGN_DEBUG_RO #define ALIGN_DEBUG_RO_MIN(min) . = ALIGN(min); |