summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/lse.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-04 07:18:09 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-04 07:18:09 -0700
commita4fdb2a46f617b8b2cd47acec026ec16532edbc6 (patch)
tree8d993287c9337349034ce6bbe050f7ce016a5268 /arch/arm64/include/asm/lse.h
parent807249d3ada1ff28a47c4054ca4edd479421b671 (diff)
parent674c242c9323d3c293fc4f9a3a3a619fe3063290 (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: - Support for new architectural features introduced in ARMv8.1: * Privileged Access Never (PAN) to catch user pointer dereferences in the kernel * Large System Extension (LSE) for building scalable atomics and locks (depends on locking/arch-atomic from tip, which is included here) * Hardware Dirty Bit Management (DBM) for updating clean PTEs automatically - Move our PSCI implementation out into drivers/firmware/, where it can be shared with arch/arm/. RMK has also pulled this component branch and has additional patches moving arch/arm/ over. MAINTAINERS is updated accordingly. - Better BUG implementation based on the BRK instruction for trapping - Leaf TLB invalidation for unmapping user pages - Support for PROBE_ONLY PCI configurations - Various cleanups and non-critical fixes, including: * Always flush FP/SIMD state over exec() * Restrict memblock additions based on range of linear mapping * Ensure *(LIST_POISON) generates a fatal fault * Context-tracking syscall return no longer corrupts return value when not forced on. * Alternatives patching synchronisation/stability improvements * Signed sub-word cmpxchg compare fix (tickled by HAVE_CMPXCHG_LOCAL) * Force SMP=y * Hide direct DCC access from userspace * Fix EFI stub memory allocation when DRAM starts at 0x0 * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (92 commits) arm64: flush FP/SIMD state correctly after execve() arm64: makefile: fix perf_callchain.o kconfig dependency arm64: set MAX_MEMBLOCK_ADDR according to linear region size of/fdt: make memblock maximum physical address arch configurable arm64: Fix source code file path in comments arm64: entry: always restore x0 from the stack on syscall return arm64: mdscr_el1: avoid exposing DCC to userspace arm64: kconfig: Move LIST_POISON to a safe value arm64: Add __exception_irq_entry definition for function graph arm64: mm: ensure patched kernel text is fetched from PoU arm64: alternatives: ensure secondary CPUs execute ISB after patching arm64: make ll/sc __cmpxchg_case_##name asm consistent arm64: dma-mapping: Simplify pgprot handling arm64: restore cpu suspend/resume functionality ARM64: PCI: do not enable resources on PROBE_ONLY systems arm64: cmpxchg: truncate sub-word signed types before comparison arm64: alternative: put secondary CPUs into polling loop during patch arm64/Documentation: clarify wording regarding memory below the Image arm64: lse: fix lse cmpxchg code indentation arm64: remove redundant object file list ...
Diffstat (limited to 'arch/arm64/include/asm/lse.h')
-rw-r--r--arch/arm64/include/asm/lse.h53
1 files changed, 53 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
new file mode 100644
index 000000000000..3de42d68611d
--- /dev/null
+++ b/arch/arm64/include/asm/lse.h
@@ -0,0 +1,53 @@
+#ifndef __ASM_LSE_H
+#define __ASM_LSE_H
+
+#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+
+#include <linux/stringify.h>
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
+
+#ifdef __ASSEMBLER__
+
+.arch_extension lse
+
+.macro alt_lse, llsc, lse
+ alternative_insn "\llsc", "\lse", ARM64_HAS_LSE_ATOMICS
+.endm
+
+#else /* __ASSEMBLER__ */
+
+__asm__(".arch_extension lse");
+
+/* Move the ll/sc atomics out-of-line */
+#define __LL_SC_INLINE
+#define __LL_SC_PREFIX(x) __ll_sc_##x
+#define __LL_SC_EXPORT(x) EXPORT_SYMBOL(__LL_SC_PREFIX(x))
+
+/* Macro for constructing calls to out-of-line ll/sc atomics */
+#define __LL_SC_CALL(op) "bl\t" __stringify(__LL_SC_PREFIX(op)) "\n"
+
+/* In-line patching at runtime */
+#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
+ ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
+
+#endif /* __ASSEMBLER__ */
+#else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+
+#ifdef __ASSEMBLER__
+
+.macro alt_lse, llsc, lse
+ \llsc
+.endm
+
+#else /* __ASSEMBLER__ */
+
+#define __LL_SC_INLINE static inline
+#define __LL_SC_PREFIX(x) x
+#define __LL_SC_EXPORT(x)
+
+#define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc
+
+#endif /* __ASSEMBLER__ */
+#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+#endif /* __ASM_LSE_H */