From 7a292b6c7c9c35afee01ce3b2248f705869d0ff1 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Mon, 23 Sep 2019 11:12:29 +0200 Subject: arm64: errata: Update stale comment Commit 73f381660959 ("arm64: Advertise mitigation of Spectre-v2, or lack thereof") renamed the caller of the install_bp_hardening_cb() function but forgot to update a comment, which can be confusing when trying to follow the code flow. Fixes: 73f381660959 ("arm64: Advertise mitigation of Spectre-v2, or lack thereof") Signed-off-by: Thierry Reding Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu_errata.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 1e43ba5c79b7..f593f4cffc0d 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -128,8 +128,8 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn, int cpu, slot = -1; /* - * enable_smccc_arch_workaround_1() passes NULL for the hyp_vecs - * start/end if we're a guest. Skip the hyp-vectors work. + * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if + * we're a guest. Skip the hyp-vectors work. */ if (!hyp_vecs_start) { __this_cpu_write(bp_hardening_data.fn, fn); -- cgit v1.2.3 From 4585fc59c0e813188d6a4c5de1f6976fce461fc2 Mon Sep 17 00:00:00 2001 From: Masayoshi Mizuma Date: Mon, 30 Sep 2019 16:56:00 -0400 Subject: arm64/sve: Fix wrong free for task->thread.sve_state The system which has SVE feature crashed because of the memory pointed by task->thread.sve_state was destroyed by someone. That is because sve_state is freed while the forking the child process. The child process has the pointer of sve_state which is same as the parent's because the child's task_struct is copied from the parent's one. If the copy_process() fails as an error on somewhere, for example, copy_creds(), then the sve_state is freed even if the parent is alive. The flow is as follows. copy_process p = dup_task_struct => arch_dup_task_struct *dst = *src; // copy the entire region. : retval = copy_creds if (retval < 0) goto bad_fork_free; : bad_fork_free: ... delayed_free_task(p); => free_task => arch_release_task_struct => fpsimd_release_task => __sve_free => kfree(task->thread.sve_state); // free the parent's sve_state Move child's sve_state = NULL and clearing TIF_SVE flag to arch_dup_task_struct() so that the child doesn't free the parent's one. There is no need to wait until copy_process() to clear TIF_SVE for dst, because the thread flags for dst are initialized already by copying the src task_struct. This change simplifies the code, so get rid of comments that are no longer needed. As a note, arm64 used to have thread_info on the stack. So it would not be possible to clear TIF_SVE until the stack is initialized. From commit c02433dd6de3 ("arm64: split thread_info from task stack"), the thread_info is part of the task, so it should be valid to modify the flag from arch_dup_task_struct(). Cc: stable@vger.kernel.org # 4.15.x- Fixes: bc0ee4760364 ("arm64/sve: Core task context handling") Signed-off-by: Masayoshi Mizuma Reported-by: Hidetoshi Seto Suggested-by: Dave Martin Reviewed-by: Dave Martin Tested-by: Julien Grall Signed-off-by: Will Deacon --- arch/arm64/kernel/process.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index a47462def04b..1fb2819fc048 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -332,22 +332,27 @@ void arch_release_task_struct(struct task_struct *tsk) fpsimd_release_task(tsk); } -/* - * src and dst may temporarily have aliased sve_state after task_struct - * is copied. We cannot fix this properly here, because src may have - * live SVE state and dst's thread_info may not exist yet, so tweaking - * either src's or dst's TIF_SVE is not safe. - * - * The unaliasing is done in copy_thread() instead. This works because - * dst is not schedulable or traceable until both of these functions - * have been called. - */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { if (current->mm) fpsimd_preserve_current_state(); *dst = *src; + /* We rely on the above assignment to initialize dst's thread_flags: */ + BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK)); + + /* + * Detach src's sve_state (if any) from dst so that it does not + * get erroneously used or freed prematurely. dst's sve_state + * will be allocated on demand later on if dst uses SVE. + * For consistency, also clear TIF_SVE here: this could be done + * later in copy_process(), but to avoid tripping up future + * maintainers it is best not to leave TIF_SVE and sve_state in + * an inconsistent state, even temporarily. + */ + dst->thread.sve_state = NULL; + clear_tsk_thread_flag(dst, TIF_SVE); + return 0; } @@ -360,13 +365,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); - /* - * Unalias p->thread.sve_state (if any) from the parent task - * and disable discard SVE state for p: - */ - clear_tsk_thread_flag(p, TIF_SVE); - p->thread.sve_state = NULL; - /* * In case p was allocated the same task_struct pointer as some * other recently-exited task, make sure p is disassociated from -- cgit v1.2.3 From 7230f7e99fecc684180322b056fad3853d1029d3 Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Thu, 3 Oct 2019 12:12:08 +0100 Subject: arm64: cpufeature: Effectively expose FRINT capability to userspace The HWCAP framework will detect a new capability based on the sanitized version of the ID registers. Sanitization is based on a whitelist, so any field not described will end up to be zeroed. At the moment, ID_AA64ISAR1_EL1.FRINTTS is not described in ftr_id_aa64isar1. This means the field will be zeroed and therefore the userspace will not be able to see the HWCAP even if the hardware supports the feature. This can be fixed by describing the field in ftr_id_aa64isar1. Fixes: ca9503fc9e98 ("arm64: Expose FRINT capabilities to userspace") Signed-off-by: Julien Grall Cc: mark.brown@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/cpufeature.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 9323bcc40a58..cabebf1a7976 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -136,6 +136,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), -- cgit v1.2.3 From f46f27a576cc3b1e3d45ea50bc06287aa46b04b2 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 3 Oct 2019 18:01:27 +0100 Subject: arm64: Fix incorrect irqflag restore for priority masking for compat Commit bd82d4bd2188 ("arm64: Fix incorrect irqflag restore for priority masking") added a macro to the entry.S call paths that leave the PSTATE.I bit set. This tells the pPNMI masking logic that interrupts are masked by the CPU, not by the PMR. This value is read back by local_daif_save(). Commit bd82d4bd2188 added this call to el0_svc, as el0_svc_handler is called with interrupts masked. el0_svc_compat was missed, but should be covered in the same way as both of these paths end up in el0_svc_common(), which expects to unmask interrupts. Fixes: bd82d4bd2188 ("arm64: Fix incorrect irqflag restore for priority masking") Signed-off-by: James Morse Cc: Julien Thierry Signed-off-by: Will Deacon --- arch/arm64/kernel/entry.S | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 84a822748c84..e304fe04b098 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -775,6 +775,7 @@ el0_sync_compat: b.ge el0_dbg b el0_inv el0_svc_compat: + gic_prio_kentry_setup tmp=x1 mov x0, sp bl el0_svc_compat_handler b ret_to_user -- cgit v1.2.3 From dd8a1f13488438c6c220b7cafa500baaf21a6e53 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 2 Oct 2019 10:49:35 +0100 Subject: arm64: ftrace: Ensure synchronisation in PLT setup for Neoverse-N1 #1542419 CPUs affected by Neoverse-N1 #1542419 may execute a stale instruction if it was recently modified. The affected sequence requires freshly written instructions to be executable before a branch to them is updated. There are very few places in the kernel that modify executable text, all but one come with sufficient synchronisation: * The module loader's flush_module_icache() calls flush_icache_range(), which does a kick_all_cpus_sync() * bpf_int_jit_compile() calls flush_icache_range(). * Kprobes calls aarch64_insn_patch_text(), which does its work in stop_machine(). * static keys and ftrace both patch between nops and branches to existing kernel code (not generated code). The affected sequence is the interaction between ftrace and modules. The module PLT is cleaned using __flush_icache_range() as the trampoline shouldn't be executable until we update the branch to it. Drop the double-underscore so that this path runs kick_all_cpus_sync() too. Signed-off-by: James Morse Signed-off-by: Will Deacon --- arch/arm64/kernel/ftrace.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 171773257974..06e56b470315 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -121,10 +121,16 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) /* * Ensure updated trampoline is visible to instruction - * fetch before we patch in the branch. + * fetch before we patch in the branch. Although the + * architecture doesn't require an IPI in this case, + * Neoverse-N1 erratum #1542419 does require one + * if the TLB maintenance in module_enable_ro() is + * skipped due to rodata_enabled. It doesn't seem worth + * it to make it conditional given that this is + * certainly not a fast-path. */ - __flush_icache_range((unsigned long)&dst[0], - (unsigned long)&dst[1]); + flush_icache_range((unsigned long)&dst[0], + (unsigned long)&dst[1]); } addr = (unsigned long)dst; #else /* CONFIG_ARM64_MODULE_PLTS */ -- cgit v1.2.3 From e0de01aafc3dd7b73308106b056ead2d48391905 Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Thu, 3 Oct 2019 18:48:33 +0100 Subject: arm64: vdso32: Fix broken compat vDSO build warnings The .config file and the generated include/config/auto.conf can end up out of sync after a set of commands since CONFIG_CROSS_COMPILE_COMPAT_VDSO is not updated correctly. The sequence can be reproduced as follows: $ make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- defconfig [...] $ make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- menuconfig [set CONFIG_CROSS_COMPILE_COMPAT_VDSO="arm-linux-gnueabihf-"] $ make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- Which results in: arch/arm64/Makefile:62: CROSS_COMPILE_COMPAT not defined or empty, the compat vDSO will not be built even though the compat vDSO has been built: $ file arch/arm64/kernel/vdso32/vdso.so arch/arm64/kernel/vdso32/vdso.so: ELF 32-bit LSB pie executable, ARM, EABI5 version 1 (SYSV), dynamically linked, BuildID[sha1]=c67f6c786f2d2d6f86c71f708595594aa25247f6, stripped A similar case that involves changing the configuration parameter multiple times can be reconducted to the same family of problems. Remove the use of CONFIG_CROSS_COMPILE_COMPAT_VDSO altogether and instead rely on the cross-compiler prefix coming from the environment via CROSS_COMPILE_COMPAT, much like we do for the rest of the kernel. Cc: Will Deacon Cc: Catalin Marinas Reported-by: Will Deacon Signed-off-by: Vincenzo Frascino Signed-off-by: Will Deacon --- arch/arm64/kernel/vdso32/Makefile | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 1fba0776ed40..19e0d3115ffe 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -8,8 +8,6 @@ ARCH_REL_TYPE_ABS := R_ARM_JUMP_SLOT|R_ARM_GLOB_DAT|R_ARM_ABS32 include $(srctree)/lib/vdso/Makefile -COMPATCC := $(CROSS_COMPILE_COMPAT)gcc - # Same as cc-*option, but using COMPATCC instead of CC cc32-option = $(call try-run,\ $(COMPATCC) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2)) -- cgit v1.2.3 From 37a5076098c17f40913c772b017ff8e48e449656 Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Thu, 3 Oct 2019 18:48:35 +0100 Subject: arm64: vdso: Remove stale files from old assembly implementation Moving over to the generic C implementation of the vDSO inadvertently left some stale files behind which are no longer used. Remove them. Cc: Will Deacon Cc: Catalin Marinas Signed-off-by: Vincenzo Frascino Acked-by: Catalin Marinas Tested-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/kernel/vdso/gettimeofday.S | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 arch/arm64/kernel/vdso/gettimeofday.S (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S deleted file mode 100644 index e69de29bb2d1..000000000000 -- cgit v1.2.3 From 0df2c90eba60791148cee1823c0bf5fc66e3465c Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Thu, 3 Oct 2019 18:48:34 +0100 Subject: arm64: vdso32: Detect binutils support for dmb ishld Older versions of binutils (prior to 2.24) do not support the "ISHLD" option for memory barrier instructions, which leads to a build failure when assembling the vdso32 library. Add a compilation time mechanism that detects if binutils supports those instructions and configure the kernel accordingly. Cc: Will Deacon Cc: Catalin Marinas Reported-by: Will Deacon Signed-off-by: Vincenzo Frascino Reviewed-by: Catalin Marinas Tested-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/kernel/vdso32/Makefile | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 19e0d3115ffe..77aa61340374 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -15,6 +15,8 @@ cc32-disable-warning = $(call try-run,\ $(COMPATCC) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) cc32-ldoption = $(call try-run,\ $(COMPATCC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2)) +cc32-as-instr = $(call try-run,\ + printf "%b\n" "$(1)" | $(COMPATCC) $(VDSO_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3)) # We cannot use the global flags to compile the vDSO files, the main reason # being that the 32-bit compiler may be older than the main (64-bit) compiler @@ -53,6 +55,7 @@ endif VDSO_CAFLAGS += -fPIC -fno-builtin -fno-stack-protector VDSO_CAFLAGS += -DDISABLE_BRANCH_PROFILING + # Try to compile for ARMv8. If the compiler is too old and doesn't support it, # fall back to v7. There is no easy way to check for what architecture the code # is being compiled, so define a macro specifying that (see arch/arm/Makefile). @@ -89,6 +92,12 @@ VDSO_CFLAGS += -Wno-int-to-pointer-cast VDSO_AFLAGS := $(VDSO_CAFLAGS) VDSO_AFLAGS += -D__ASSEMBLY__ +# Check for binutils support for dmb ishld +dmbinstr := $(call cc32-as-instr,dmb ishld,-DCONFIG_AS_DMB_ISHLD=1) + +VDSO_CFLAGS += $(dmbinstr) +VDSO_AFLAGS += $(dmbinstr) + VDSO_LDFLAGS := $(VDSO_CPPFLAGS) # From arm vDSO Makefile VDSO_LDFLAGS += -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 -- cgit v1.2.3 From a7f93103f86e2bbc5646831707d2fa565315004d Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Thu, 3 Oct 2019 18:48:36 +0100 Subject: arm64: vdso32: Remove jump label config option in Makefile The jump labels are not used in vdso32 since it is not possible to run runtime patching on them. Remove the configuration option from the Makefile. Cc: Will Deacon Cc: Catalin Marinas Signed-off-by: Vincenzo Frascino Acked-by: Catalin Marinas Tested-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/kernel/vdso32/Makefile | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 77aa61340374..038357a1e835 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -38,9 +38,6 @@ VDSO_CAFLAGS += $(call cc32-option,-fno-PIE) ifdef CONFIG_DEBUG_INFO VDSO_CAFLAGS += -g endif -ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(COMPATCC)), y) -VDSO_CAFLAGS += -DCC_HAVE_ASM_GOTO -endif # From arm Makefile VDSO_CAFLAGS += $(call cc32-option,-fno-dwarf2-cfi-asm) -- cgit v1.2.3 From bcaf9b57e4884e86717c1f4cee8157fd68189aa7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 4 Oct 2019 15:43:53 +0100 Subject: arm64: vdso32: Move definition of COMPATCC into vdso32/Makefile There's no need to export COMPATCC, so just define it locally in the vdso32/Makefile, which is the only place where it is used. Acked-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/kernel/vdso32/Makefile | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 038357a1e835..f52d29027d8d 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -9,6 +9,12 @@ ARCH_REL_TYPE_ABS := R_ARM_JUMP_SLOT|R_ARM_GLOB_DAT|R_ARM_ABS32 include $(srctree)/lib/vdso/Makefile # Same as cc-*option, but using COMPATCC instead of CC +ifeq ($(CONFIG_CC_IS_CLANG), y) +COMPATCC ?= $(CC) --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%)) +else +COMPATCC ?= $(CROSS_COMPILE_COMPAT)gcc +endif + cc32-option = $(call try-run,\ $(COMPATCC) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2)) cc32-disable-warning = $(call try-run,\ -- cgit v1.2.3 From c71e88c437962c1ec43d4d23a0ebf4c9cf9bee0d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 4 Oct 2019 15:44:45 +0100 Subject: arm64: vdso32: Don't use KBUILD_CPPFLAGS unconditionally KBUILD_CPPFLAGS is defined differently depending on whether the main compiler is clang or not. This means that it is not possible to build the compat vDSO with GCC if the rest of the kernel is built with clang. Define VDSO_CPPFLAGS directly to break this dependency and allow a clang kernel to build a compat vDSO with GCC: $ make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- \ CROSS_COMPILE_COMPAT=arm-linux-gnueabihf- CC=clang \ COMPATCC=arm-linux-gnueabihf-gcc Acked-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/kernel/vdso32/Makefile | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index f52d29027d8d..7de96a6a56f9 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -31,11 +31,9 @@ cc32-as-instr = $(call try-run,\ # arm64 one. # As a result we set our own flags here. -# From top-level Makefile -# NOSTDINC_FLAGS -VDSO_CPPFLAGS := -nostdinc -isystem $(shell $(COMPATCC) -print-file-name=include) +# KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile +VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc -isystem $(shell $(COMPATCC) -print-file-name=include) VDSO_CPPFLAGS += $(LINUXINCLUDE) -VDSO_CPPFLAGS += $(KBUILD_CPPFLAGS) # Common C and assembly flags # From top-level Makefile -- cgit v1.2.3 From 7424ee2b1617de62c3761bdd6260857363e1e4d4 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 7 Oct 2019 12:27:59 +0100 Subject: arm64: vdso32: Pass '--target' option to clang via VDSO_CAFLAGS Directly passing the '--target' option to clang by appending to COMPATCC does not work if COMPATCC has been specified explicitly as an argument to Make unless the 'override' directive is used, which is ugly and different to what is done in the top-level Makefile. Move the '--target' option for clang out of COMPATCC and into VDSO_CAFLAGS, where it will be picked up when compiling and assembling the 32-bit vDSO under clang. Reported-by: Catalin Marinas Acked-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/kernel/vdso32/Makefile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 7de96a6a56f9..60e19a3fc109 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -10,7 +10,7 @@ include $(srctree)/lib/vdso/Makefile # Same as cc-*option, but using COMPATCC instead of CC ifeq ($(CONFIG_CC_IS_CLANG), y) -COMPATCC ?= $(CC) --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%)) +COMPATCC ?= $(CC) else COMPATCC ?= $(CROSS_COMPILE_COMPAT)gcc endif @@ -38,6 +38,10 @@ VDSO_CPPFLAGS += $(LINUXINCLUDE) # Common C and assembly flags # From top-level Makefile VDSO_CAFLAGS := $(VDSO_CPPFLAGS) +ifneq ($(shell $(COMPATCC) --version 2>&1 | head -n 1 | grep clang),) +VDSO_CAFLAGS += --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%)) +endif + VDSO_CAFLAGS += $(call cc32-option,-fno-PIE) ifdef CONFIG_DEBUG_INFO VDSO_CAFLAGS += -g -- cgit v1.2.3 From eff9cb67be21346402ea07d7a48564909b4f0f25 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 4 Oct 2019 14:20:06 +0100 Subject: arm64: vdso32: Rename COMPATCC to CC_COMPAT For consistency with CROSS_COMPILE_COMPAT, mechanically rename COMPATCC to CC_COMPAT so that specifying aspects of the compat vDSO toolchain in the environment isn't needlessly confusing. Acked-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/kernel/vdso32/Makefile | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 60e19a3fc109..76b327f88fbb 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -8,21 +8,21 @@ ARCH_REL_TYPE_ABS := R_ARM_JUMP_SLOT|R_ARM_GLOB_DAT|R_ARM_ABS32 include $(srctree)/lib/vdso/Makefile -# Same as cc-*option, but using COMPATCC instead of CC +# Same as cc-*option, but using CC_COMPAT instead of CC ifeq ($(CONFIG_CC_IS_CLANG), y) -COMPATCC ?= $(CC) +CC_COMPAT ?= $(CC) else -COMPATCC ?= $(CROSS_COMPILE_COMPAT)gcc +CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc endif cc32-option = $(call try-run,\ - $(COMPATCC) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2)) + $(CC_COMPAT) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2)) cc32-disable-warning = $(call try-run,\ - $(COMPATCC) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) + $(CC_COMPAT) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) cc32-ldoption = $(call try-run,\ - $(COMPATCC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2)) + $(CC_COMPAT) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2)) cc32-as-instr = $(call try-run,\ - printf "%b\n" "$(1)" | $(COMPATCC) $(VDSO_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3)) + printf "%b\n" "$(1)" | $(CC_COMPAT) $(VDSO_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3)) # We cannot use the global flags to compile the vDSO files, the main reason # being that the 32-bit compiler may be older than the main (64-bit) compiler @@ -32,13 +32,13 @@ cc32-as-instr = $(call try-run,\ # As a result we set our own flags here. # KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile -VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc -isystem $(shell $(COMPATCC) -print-file-name=include) +VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc -isystem $(shell $(CC_COMPAT) -print-file-name=include) VDSO_CPPFLAGS += $(LINUXINCLUDE) # Common C and assembly flags # From top-level Makefile VDSO_CAFLAGS := $(VDSO_CPPFLAGS) -ifneq ($(shell $(COMPATCC) --version 2>&1 | head -n 1 | grep clang),) +ifneq ($(shell $(CC_COMPAT) --version 2>&1 | head -n 1 | grep clang),) VDSO_CAFLAGS += --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%)) endif @@ -171,14 +171,14 @@ quiet_cmd_vdsold_and_vdso_check = LD32 $@ cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check) quiet_cmd_vdsold = LD32 $@ - cmd_vdsold = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \ + cmd_vdsold = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \ -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@ quiet_cmd_vdsocc = CC32 $@ - cmd_vdsocc = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $< + cmd_vdsocc = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $< quiet_cmd_vdsocc_gettimeofday = CC32 $@ - cmd_vdsocc_gettimeofday = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $< + cmd_vdsocc_gettimeofday = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $< quiet_cmd_vdsoas = AS32 $@ - cmd_vdsoas = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $< + cmd_vdsoas = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $< quiet_cmd_vdsomunge = MUNGE $@ cmd_vdsomunge = $(obj)/$(munge) $< $@ -- cgit v1.2.3 From 93916beb70143c46bf1d2bacf814be3a124b253b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Apr 2019 16:26:21 +0100 Subject: arm64: Enable workaround for Cavium TX2 erratum 219 when running SMT It appears that the only case where we need to apply the TX2_219_TVM mitigation is when the core is in SMT mode. So let's condition the enabling on detecting a CPU whose MPIDR_EL1.Aff0 is non-zero. Cc: Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu_errata.c | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 1e43ba5c79b7..d999ca2dd760 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -12,6 +12,7 @@ #include #include #include +#include static bool __maybe_unused is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) @@ -623,6 +624,30 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) return (need_wa > 0); } +static const __maybe_unused struct midr_range tx2_family_cpus[] = { + MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), + MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), + {}, +}; + +static bool __maybe_unused +needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, + int scope) +{ + int i; + + if (!is_affected_midr_range_list(entry, scope) || + !is_hyp_mode_available()) + return false; + + for_each_possible_cpu(i) { + if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0) + return true; + } + + return false; +} + #ifdef CONFIG_HARDEN_EL2_VECTORS static const struct midr_range arm64_harden_el2_vectors[] = { @@ -851,6 +876,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .matches = has_cortex_a76_erratum_1463225, }, +#endif +#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219 + { + .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)", + .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM, + ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), + .matches = needs_tx2_tvm_workaround, + }, #endif { } -- cgit v1.2.3 From 9405447ef79bc93101373e130f72e9e6cbf17dbb Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Apr 2019 16:22:24 +0100 Subject: arm64: Avoid Cavium TX2 erratum 219 when switching TTBR As a PRFM instruction racing against a TTBR update can have undesirable effects on TX2, NOP-out such PRFM on cores that are affected by the TX2-219 erratum. Cc: Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu_errata.c | 5 +++++ arch/arm64/kernel/entry.S | 2 ++ 2 files changed, 7 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index d999ca2dd760..a19bb3e4bcfb 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -884,6 +884,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), .matches = needs_tx2_tvm_workaround, }, + { + .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)", + .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM, + ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), + }, #endif { } diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 84a822748c84..109894bd3194 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -1070,7 +1070,9 @@ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 #else ldr x30, =vectors #endif +alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM prfm plil1strm, [x30, #(1b - tramp_vectors)] +alternative_else_nop_endif msr vbar_el1, x30 add x30, x30, #(1b - tramp_vectors) isb -- cgit v1.2.3 From 3e7c93bd04edfb0cae7dad1215544c9350254b8f Mon Sep 17 00:00:00 2001 From: Yunfeng Ye Date: Sun, 29 Sep 2019 12:44:17 +0800 Subject: arm64: armv8_deprecated: Checking return value for memory allocation There are no return value checking when using kzalloc() and kcalloc() for memory allocation. so add it. Signed-off-by: Yunfeng Ye Signed-off-by: Will Deacon --- arch/arm64/kernel/armv8_deprecated.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index 2ec09debc2bb..ca158be21f83 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -174,6 +174,9 @@ static void __init register_insn_emulation(struct insn_emulation_ops *ops) struct insn_emulation *insn; insn = kzalloc(sizeof(*insn), GFP_KERNEL); + if (!insn) + return; + insn->ops = ops; insn->min = INSN_UNDEF; @@ -233,6 +236,8 @@ static void __init register_insn_emulation_sysctl(void) insns_sysctl = kcalloc(nr_insn_emulated + 1, sizeof(*sysctl), GFP_KERNEL); + if (!insns_sysctl) + return; raw_spin_lock_irqsave(&insn_emulation_lock, flags); list_for_each_entry(insn, &insn_emulation, node) { -- cgit v1.2.3 From ec52c7134b1fcef0edfc56d55072fd4f261ef198 Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Mon, 14 Oct 2019 11:21:13 +0100 Subject: arm64: cpufeature: Treat ID_AA64ZFR0_EL1 as RAZ when SVE is not enabled If CONFIG_ARM64_SVE=n then we fail to report ID_AA64ZFR0_EL1 as 0 when read by userspace, despite being required by the architecture. Although this is theoretically a change in ABI, userspace will first check for the presence of SVE via the HWCAP or the ID_AA64PFR0_EL1.SVE field before probing the ID_AA64ZFR0_EL1 register. Given that these are reported correctly for this configuration, we can safely tighten up the current behaviour. Ensure ID_AA64ZFR0_EL1 is treated as RAZ when CONFIG_ARM64_SVE=n. Signed-off-by: Julien Grall Reviewed-by: Suzuki K Poulose Reviewed-by: Mark Rutland Reviewed-by: Dave Martin Fixes: 06a916feca2b ("arm64: Expose SVE2 features for userspace") Signed-off-by: Will Deacon --- arch/arm64/kernel/cpufeature.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index cabebf1a7976..80f459ad0190 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -176,11 +176,16 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { }; static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0), ARM64_FTR_END, }; -- cgit v1.2.3 From 8c551f919a73c1dfa690a70a691be1da394145e8 Mon Sep 17 00:00:00 2001 From: Pavel Tatashin Date: Mon, 14 Oct 2019 10:48:24 -0400 Subject: arm64: hibernate: check pgd table allocation There is a bug in create_safe_exec_page(), when page table is allocated it is not checked that table is allocated successfully: But it is dereferenced in: pgd_none(READ_ONCE(*pgdp)). Check that allocation was successful. Fixes: 82869ac57b5d ("arm64: kernel: Add support for hibernate/suspend-to-disk") Reviewed-by: James Morse Signed-off-by: Pavel Tatashin Signed-off-by: Will Deacon --- arch/arm64/kernel/hibernate.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index e0a7fce0e01c..a96b2921d22c 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -201,6 +201,7 @@ static int create_safe_exec_page(void *src_start, size_t length, gfp_t mask) { int rc = 0; + pgd_t *trans_pgd; pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; @@ -215,7 +216,13 @@ static int create_safe_exec_page(void *src_start, size_t length, memcpy((void *)dst, src_start, length); __flush_icache_range(dst, dst + length); - pgdp = pgd_offset_raw(allocator(mask), dst_addr); + trans_pgd = allocator(mask); + if (!trans_pgd) { + rc = -ENOMEM; + goto out; + } + + pgdp = pgd_offset_raw(trans_pgd, dst_addr); if (pgd_none(READ_ONCE(*pgdp))) { pudp = allocator(mask); if (!pudp) { -- cgit v1.2.3 From 19c95f261c6558d4c2cbbfacd2d8bb6501384601 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Tue, 15 Oct 2019 18:25:44 +0100 Subject: arm64: entry.S: Do not preempt from IRQ before all cpufeatures are enabled Preempting from IRQ-return means that the task has its PSTATE saved on the stack, which will get restored when the task is resumed and does the actual IRQ return. However, enabling some CPU features requires modifying the PSTATE. This means that, if a task was scheduled out during an IRQ-return before all CPU features are enabled, the task might restore a PSTATE that does not include the feature enablement changes once scheduled back in. * Task 1: PAN == 0 ---| |--------------- | |<- return from IRQ, PSTATE.PAN = 0 | <- IRQ | +--------+ <- preempt() +-- ^ | reschedule Task 1, PSTATE.PAN == 1 * Init: --------------------+------------------------ ^ | enable_cpu_features set PSTATE.PAN on all CPUs Worse than this, since PSTATE is untouched when task switching is done, a task missing the new bits in PSTATE might affect another task, if both do direct calls to schedule() (outside of IRQ/exception contexts). Fix this by preventing preemption on IRQ-return until features are enabled on all CPUs. This way the only PSTATE values that are saved on the stack are from synchronous exceptions. These are expected to be fatal this early, the exception is BRK for WARN_ON(), but as this uses do_debug_exception() which keeps IRQs masked, it shouldn't call schedule(). Signed-off-by: Julien Thierry [james: Replaced a really cool hack, with an even simpler static key in C. expanded commit message with Julien's cover-letter ascii art] Signed-off-by: James Morse Signed-off-by: Will Deacon --- arch/arm64/kernel/entry.S | 2 +- arch/arm64/kernel/process.c | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index e304fe04b098..e1859e010c5f 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -680,7 +680,7 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING orr x24, x24, x0 alternative_else_nop_endif cbnz x24, 1f // preempt count != 0 || NMI return path - bl preempt_schedule_irq // irq en/disable is done inside + bl arm64_preempt_schedule_irq // irq en/disable is done inside 1: #endif diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 1fb2819fc048..71f788cd2b18 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -44,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -631,3 +633,19 @@ static int __init tagged_addr_init(void) core_initcall(tagged_addr_init); #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ + +asmlinkage void __sched arm64_preempt_schedule_irq(void) +{ + lockdep_assert_irqs_disabled(); + + /* + * Preempting a task from an IRQ means we leave copies of PSTATE + * on the stack. cpufeature's enable calls may modify PSTATE, but + * resuming one of these preempted tasks would undo those changes. + * + * Only allow a task to be preempted once cpufeatures have been + * enabled. + */ + if (static_branch_likely(&arm64_const_caps_ready)) + preempt_schedule_irq(); +} -- cgit v1.2.3 From 597399d0cb91d049fcb78fb45c7694771b583bb7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 15 Oct 2019 21:04:18 -0700 Subject: arm64: tags: Preserve tags for addresses translated via TTBR1 Sign-extending TTBR1 addresses when converting to an untagged address breaks the documented POSIX semantics for mlock() in some obscure error cases where we end up returning -EINVAL instead of -ENOMEM as a direct result of rewriting the upper address bits. Rework the untagged_addr() macro to preserve the upper address bits for TTBR1 addresses and only clear the tag bits for user addresses. This matches the behaviour of the 'clear_address_tag' assembly macro, so rename that and align the implementations at the same time so that they use the same instruction sequences for the tag manipulation. Link: https://lore.kernel.org/stable/20191014162651.GF19200@arrakis.emea.arm.com/ Reported-by: Jan Stancek Tested-by: Jan Stancek Reviewed-by: Catalin Marinas Tested-by: Catalin Marinas Reviewed-by: Vincenzo Frascino Tested-by: Vincenzo Frascino Reviewed-by: Andrey Konovalov Signed-off-by: Will Deacon --- arch/arm64/kernel/entry.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index e1859e010c5f..a3a63092eba9 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -604,7 +604,7 @@ el1_da: */ mrs x3, far_el1 inherit_daif pstate=x23, tmp=x2 - clear_address_tag x0, x3 + untagged_addr x0, x3 mov x2, sp // struct pt_regs bl do_mem_abort @@ -808,7 +808,7 @@ el0_da: mrs x26, far_el1 ct_user_exit_irqoff enable_daif - clear_address_tag x0, x26 + untagged_addr x0, x26 mov x1, x25 mov x2, sp bl do_mem_abort -- cgit v1.2.3