summaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/alternative-asm.h29
-rw-r--r--arch/arm64/include/asm/alternative.h44
-rw-r--r--arch/arm64/include/asm/arch_timer.h9
-rw-r--r--arch/arm64/include/asm/cache.h2
-rw-r--r--arch/arm64/include/asm/cacheflush.h2
-rw-r--r--arch/arm64/include/asm/cmpxchg.h73
-rw-r--r--arch/arm64/include/asm/compat.h7
-rw-r--r--arch/arm64/include/asm/cpu.h2
-rw-r--r--arch/arm64/include/asm/cpufeature.h29
-rw-r--r--arch/arm64/include/asm/cputype.h5
-rw-r--r--arch/arm64/include/asm/dmi.h31
-rw-r--r--arch/arm64/include/asm/fixmap.h8
-rw-r--r--arch/arm64/include/asm/hwcap.h1
-rw-r--r--arch/arm64/include/asm/insn.h10
-rw-r--r--arch/arm64/include/asm/io.h145
-rw-r--r--arch/arm64/include/asm/irq.h3
-rw-r--r--arch/arm64/include/asm/kvm_arm.h21
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/opcodes.h1
-rw-r--r--arch/arm64/include/asm/percpu.h215
-rw-r--r--arch/arm64/include/asm/pgalloc.h8
-rw-r--r--arch/arm64/include/asm/seccomp.h25
-rw-r--r--arch/arm64/include/asm/tlb.h67
-rw-r--r--arch/arm64/include/asm/traps.h16
-rw-r--r--arch/arm64/include/asm/unistd.h3
-rw-r--r--arch/arm64/include/asm/unistd32.h3
26 files changed, 576 insertions, 185 deletions
diff --git a/arch/arm64/include/asm/alternative-asm.h b/arch/arm64/include/asm/alternative-asm.h
new file mode 100644
index 000000000000..919a67855b63
--- /dev/null
+++ b/arch/arm64/include/asm/alternative-asm.h
@@ -0,0 +1,29 @@
+#ifndef __ASM_ALTERNATIVE_ASM_H
+#define __ASM_ALTERNATIVE_ASM_H
+
+#ifdef __ASSEMBLY__
+
+.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
+ .word \orig_offset - .
+ .word \alt_offset - .
+ .hword \feature
+ .byte \orig_len
+ .byte \alt_len
+.endm
+
+.macro alternative_insn insn1 insn2 cap
+661: \insn1
+662: .pushsection .altinstructions, "a"
+ altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
+ .popsection
+ .pushsection .altinstr_replacement, "ax"
+663: \insn2
+664: .popsection
+ .if ((664b-663b) != (662b-661b))
+ .error "Alternatives instruction length mismatch"
+ .endif
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ALTERNATIVE_ASM_H */
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
new file mode 100644
index 000000000000..d261f01e2bae
--- /dev/null
+++ b/arch/arm64/include/asm/alternative.h
@@ -0,0 +1,44 @@
+#ifndef __ASM_ALTERNATIVE_H
+#define __ASM_ALTERNATIVE_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+struct alt_instr {
+ s32 orig_offset; /* offset to original instruction */
+ s32 alt_offset; /* offset to replacement instruction */
+ u16 cpufeature; /* cpufeature bit set for replacement */
+ u8 orig_len; /* size of original instruction(s) */
+ u8 alt_len; /* size of new instruction(s), <= orig_len */
+};
+
+void apply_alternatives_all(void);
+void apply_alternatives(void *start, size_t length);
+void free_alternatives_memory(void);
+
+#define ALTINSTR_ENTRY(feature) \
+ " .word 661b - .\n" /* label */ \
+ " .word 663f - .\n" /* new instruction */ \
+ " .hword " __stringify(feature) "\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* source len */ \
+ " .byte 664f-663f\n" /* replacement len */
+
+/* alternative assembly primitive: */
+#define ALTERNATIVE(oldinstr, newinstr, feature) \
+ "661:\n\t" \
+ oldinstr "\n" \
+ "662:\n" \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(feature) \
+ ".popsection\n" \
+ ".pushsection .altinstr_replacement, \"a\"\n" \
+ "663:\n\t" \
+ newinstr "\n" \
+ "664:\n\t" \
+ ".popsection\n\t" \
+ ".if ((664b-663b) != (662b-661b))\n\t" \
+ " .error \"Alternatives instruction length mismatch\"\n\t"\
+ ".endif\n"
+
+#endif /* __ASM_ALTERNATIVE_H */
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index f19097134b02..b1fa4e614718 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -104,6 +104,15 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
}
+static inline u64 arch_counter_get_cntpct(void)
+{
+ /*
+ * AArch64 kernel and user space mandate the use of CNTVCT.
+ */
+ BUG();
+ return 0;
+}
+
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 88cc05b5f3ac..bde449936e2f 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -32,6 +32,8 @@
#ifndef __ASSEMBLY__
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
static inline int cache_line_size(void)
{
u32 cwg = cache_type_cwg();
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 689b6379188c..7ae31a2cc6c0 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -73,7 +73,7 @@ extern void flush_cache_all(void);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
-extern void __flush_cache_user_range(unsigned long start, unsigned long end);
+extern long __flush_cache_user_range(unsigned long start, unsigned long end);
static inline void flush_cache_mm(struct mm_struct *mm)
{
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index ddb9d7830558..cb9593079f29 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -19,6 +19,7 @@
#define __ASM_CMPXCHG_H
#include <linux/bug.h>
+#include <linux/mmdebug.h>
#include <asm/barrier.h>
@@ -152,6 +153,51 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return oldval;
}
+#define system_has_cmpxchg_double() 1
+
+static inline int __cmpxchg_double(volatile void *ptr1, volatile void *ptr2,
+ unsigned long old1, unsigned long old2,
+ unsigned long new1, unsigned long new2, int size)
+{
+ unsigned long loop, lost;
+
+ switch (size) {
+ case 8:
+ VM_BUG_ON((unsigned long *)ptr2 - (unsigned long *)ptr1 != 1);
+ do {
+ asm volatile("// __cmpxchg_double8\n"
+ " ldxp %0, %1, %2\n"
+ " eor %0, %0, %3\n"
+ " eor %1, %1, %4\n"
+ " orr %1, %0, %1\n"
+ " mov %w0, #0\n"
+ " cbnz %1, 1f\n"
+ " stxp %w0, %5, %6, %2\n"
+ "1:\n"
+ : "=&r"(loop), "=&r"(lost), "+Q" (*(u64 *)ptr1)
+ : "r" (old1), "r"(old2), "r"(new1), "r"(new2));
+ } while (loop);
+ break;
+ default:
+ BUILD_BUG();
+ }
+
+ return !lost;
+}
+
+static inline int __cmpxchg_double_mb(volatile void *ptr1, volatile void *ptr2,
+ unsigned long old1, unsigned long old2,
+ unsigned long new1, unsigned long new2, int size)
+{
+ int ret;
+
+ smp_mb();
+ ret = __cmpxchg_double(ptr1, ptr2, old1, old2, new1, new2, size);
+ smp_mb();
+
+ return ret;
+}
+
static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
@@ -182,6 +228,33 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
__ret; \
})
+#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
+({\
+ int __ret;\
+ __ret = __cmpxchg_double_mb((ptr1), (ptr2), (unsigned long)(o1), \
+ (unsigned long)(o2), (unsigned long)(n1), \
+ (unsigned long)(n2), sizeof(*(ptr1)));\
+ __ret; \
+})
+
+#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
+({\
+ int __ret;\
+ __ret = __cmpxchg_double((ptr1), (ptr2), (unsigned long)(o1), \
+ (unsigned long)(o2), (unsigned long)(n1), \
+ (unsigned long)(n2), sizeof(*(ptr1)));\
+ __ret; \
+})
+
+#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
+#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
+#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
+#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
+
+#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
+ cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \
+ o1, o2, n1, n2)
+
#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 56de5aadede2..3fb053fa6e98 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -205,6 +205,13 @@ typedef struct compat_siginfo {
compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
} _sigpoll;
+
+ /* SIGSYS */
+ struct {
+ compat_uptr_t _call_addr; /* calling user insn */
+ int _syscall; /* triggering system call number */
+ compat_uint_t _arch; /* AUDIT_ARCH_* of syscall */
+ } _sigsys;
} _sifields;
} compat_siginfo_t;
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index 056443086019..ace70682499b 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -30,6 +30,8 @@ struct cpuinfo_arm64 {
u32 reg_dczid;
u32 reg_midr;
+ u64 reg_id_aa64dfr0;
+ u64 reg_id_aa64dfr1;
u64 reg_id_aa64isar0;
u64 reg_id_aa64isar1;
u64 reg_id_aa64mmfr0;
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index cd4ac0516488..07547ccc1f2b 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -21,9 +21,38 @@
#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
#define cpu_feature(x) ilog2(HWCAP_ ## x)
+#define ARM64_WORKAROUND_CLEAN_CACHE 0
+#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
+
+#define ARM64_NCAPS 2
+
+#ifndef __ASSEMBLY__
+
+extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+
static inline bool cpu_have_feature(unsigned int num)
{
return elf_hwcap & (1UL << num);
}
+static inline bool cpus_have_cap(unsigned int num)
+{
+ if (num >= ARM64_NCAPS)
+ return false;
+ return test_bit(num, cpu_hwcaps);
+}
+
+static inline void cpus_set_cap(unsigned int num)
+{
+ if (num >= ARM64_NCAPS)
+ pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n",
+ num, ARM64_NCAPS);
+ else
+ __set_bit(num, cpu_hwcaps);
+}
+
+void check_local_cpu_errata(void);
+
+#endif /* __ASSEMBLY__ */
+
#endif
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 379d0b874328..8adb986a3086 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -57,6 +57,11 @@
#define MIDR_IMPLEMENTOR(midr) \
(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
+#define MIDR_CPU_PART(imp, partnum) \
+ (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
+ (0xf << MIDR_ARCHITECTURE_SHIFT) | \
+ ((partnum) << MIDR_PARTNUM_SHIFT))
+
#define ARM_CPU_IMP_ARM 0x41
#define ARM_CPU_IMP_APM 0x50
diff --git a/arch/arm64/include/asm/dmi.h b/arch/arm64/include/asm/dmi.h
new file mode 100644
index 000000000000..69d37d87b159
--- /dev/null
+++ b/arch/arm64/include/asm/dmi.h
@@ -0,0 +1,31 @@
+/*
+ * arch/arm64/include/asm/dmi.h
+ *
+ * Copyright (C) 2013 Linaro Limited.
+ * Written by: Yi Li (yi.li@linaro.org)
+ *
+ * based on arch/ia64/include/asm/dmi.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef __ASM_DMI_H
+#define __ASM_DMI_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+
+/*
+ * According to section 2.3.6 of the UEFI spec, the firmware should not
+ * request a virtual mapping for configuration tables such as SMBIOS.
+ * This means we have to map them before use.
+ */
+#define dmi_early_remap(x, l) ioremap_cache(x, l)
+#define dmi_early_unmap(x, l) iounmap(x)
+#define dmi_remap(x, l) ioremap_cache(x, l)
+#define dmi_unmap(x) iounmap(x)
+#define dmi_alloc(l) kzalloc(l, GFP_KERNEL)
+
+#endif
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 5f7bfe6df723..9ef6eca905ca 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -31,6 +31,7 @@
*
*/
enum fixed_addresses {
+ FIX_HOLE,
FIX_EARLYCON_MEM_BASE,
__end_of_permanent_fixed_addresses,
@@ -56,10 +57,11 @@ enum fixed_addresses {
#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE)
-extern void __early_set_fixmap(enum fixed_addresses idx,
- phys_addr_t phys, pgprot_t flags);
+void __init early_fixmap_init(void);
-#define __set_fixmap __early_set_fixmap
+#define __early_set_fixmap __set_fixmap
+
+extern void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
#include <asm-generic/fixmap.h>
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 024c46183c3c..0ad735166d9f 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -30,6 +30,7 @@
#define COMPAT_HWCAP_IDIVA (1 << 17)
#define COMPAT_HWCAP_IDIVT (1 << 18)
#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
+#define COMPAT_HWCAP_LPAE (1 << 20)
#define COMPAT_HWCAP_EVTSTRM (1 << 21)
#define COMPAT_HWCAP2_AES (1 << 0)
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 56a9e63b6c33..e2ff32a93b5c 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -354,6 +354,16 @@ bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt);
int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
+
+bool aarch32_insn_is_wide(u32 insn);
+
+#define A32_RN_OFFSET 16
+#define A32_RT_OFFSET 12
+#define A32_RT2_OFFSET 0
+
+u32 aarch32_insn_extract_reg_num(u32 insn, int offset);
+u32 aarch32_insn_mcr_extract_opc2(u32 insn);
+u32 aarch32_insn_mcr_extract_crm(u32 insn);
#endif /* __ASSEMBLY__ */
#endif /* __ASM_INSN_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 79f1d519221f..949c406d4df4 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -28,57 +28,80 @@
#include <asm/barrier.h>
#include <asm/pgtable.h>
#include <asm/early_ioremap.h>
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
#include <xen/xen.h>
/*
* Generic IO read/write. These perform native-endian accesses.
*/
+#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
{
asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr));
}
+#define __raw_writew __raw_writew
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
{
asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr));
}
+#define __raw_writel __raw_writel
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
{
asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr));
}
+#define __raw_writeq __raw_writeq
static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
{
asm volatile("str %0, [%1]" : : "r" (val), "r" (addr));
}
+#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
u8 val;
- asm volatile("ldrb %w0, [%1]" : "=r" (val) : "r" (addr));
+ asm volatile(ALTERNATIVE("ldrb %w0, [%1]",
+ "ldarb %w0, [%1]",
+ ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
+ : "=r" (val) : "r" (addr));
return val;
}
+#define __raw_readw __raw_readw
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
u16 val;
- asm volatile("ldrh %w0, [%1]" : "=r" (val) : "r" (addr));
+
+ asm volatile(ALTERNATIVE("ldrh %w0, [%1]",
+ "ldarh %w0, [%1]",
+ ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
+ : "=r" (val) : "r" (addr));
return val;
}
+#define __raw_readl __raw_readl
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
u32 val;
- asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr));
+ asm volatile(ALTERNATIVE("ldr %w0, [%1]",
+ "ldar %w0, [%1]",
+ ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
+ : "=r" (val) : "r" (addr));
return val;
}
+#define __raw_readq __raw_readq
static inline u64 __raw_readq(const volatile void __iomem *addr)
{
u64 val;
- asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr));
+ asm volatile(ALTERNATIVE("ldr %0, [%1]",
+ "ldar %0, [%1]",
+ ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
+ : "=r" (val) : "r" (addr));
return val;
}
@@ -125,94 +148,6 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#define IO_SPACE_LIMIT (SZ_32M - 1)
#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M))
-static inline u8 inb(unsigned long addr)
-{
- return readb(addr + PCI_IOBASE);
-}
-
-static inline u16 inw(unsigned long addr)
-{
- return readw(addr + PCI_IOBASE);
-}
-
-static inline u32 inl(unsigned long addr)
-{
- return readl(addr + PCI_IOBASE);
-}
-
-static inline void outb(u8 b, unsigned long addr)
-{
- writeb(b, addr + PCI_IOBASE);
-}
-
-static inline void outw(u16 b, unsigned long addr)
-{
- writew(b, addr + PCI_IOBASE);
-}
-
-static inline void outl(u32 b, unsigned long addr)
-{
- writel(b, addr + PCI_IOBASE);
-}
-
-#define inb_p(addr) inb(addr)
-#define inw_p(addr) inw(addr)
-#define inl_p(addr) inl(addr)
-
-#define outb_p(x, addr) outb((x), (addr))
-#define outw_p(x, addr) outw((x), (addr))
-#define outl_p(x, addr) outl((x), (addr))
-
-static inline void insb(unsigned long addr, void *buffer, int count)
-{
- u8 *buf = buffer;
- while (count--)
- *buf++ = __raw_readb(addr + PCI_IOBASE);
-}
-
-static inline void insw(unsigned long addr, void *buffer, int count)
-{
- u16 *buf = buffer;
- while (count--)
- *buf++ = __raw_readw(addr + PCI_IOBASE);
-}
-
-static inline void insl(unsigned long addr, void *buffer, int count)
-{
- u32 *buf = buffer;
- while (count--)
- *buf++ = __raw_readl(addr + PCI_IOBASE);
-}
-
-static inline void outsb(unsigned long addr, const void *buffer, int count)
-{
- const u8 *buf = buffer;
- while (count--)
- __raw_writeb(*buf++, addr + PCI_IOBASE);
-}
-
-static inline void outsw(unsigned long addr, const void *buffer, int count)
-{
- const u16 *buf = buffer;
- while (count--)
- __raw_writew(*buf++, addr + PCI_IOBASE);
-}
-
-static inline void outsl(unsigned long addr, const void *buffer, int count)
-{
- const u32 *buf = buffer;
- while (count--)
- __raw_writel(*buf++, addr + PCI_IOBASE);
-}
-
-#define insb_p(port,to,len) insb(port,to,len)
-#define insw_p(port,to,len) insw(port,to,len)
-#define insl_p(port,to,len) insl(port,to,len)
-
-#define outsb_p(port,from,len) outsb(port,from,len)
-#define outsw_p(port,from,len) outsw(port,from,len)
-#define outsl_p(port,from,len) outsl(port,from,len)
-
/*
* String version of I/O memory access operations.
*/
@@ -236,18 +171,14 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
#define iounmap __iounmap
-#define ARCH_HAS_IOREMAP_WC
-#include <asm-generic/iomap.h>
-
/*
- * More restrictive address range checking than the default implementation
- * (PHYS_OFFSET and PHYS_MASK taken into account).
+ * io{read,write}{16,32}be() macros
*/
-#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
-extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
-extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
+#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
-extern int devmem_is_allowed(unsigned long pfn);
+#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
@@ -260,6 +191,18 @@ extern int devmem_is_allowed(unsigned long pfn);
*/
#define xlate_dev_kmem_ptr(p) p
+#include <asm-generic/io.h>
+
+/*
+ * More restrictive address range checking than the default implementation
+ * (PHYS_OFFSET and PHYS_MASK taken into account).
+ */
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
+extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
+
+extern int devmem_is_allowed(unsigned long pfn);
+
struct bio_vec;
extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
const struct bio_vec *vec2);
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index e1f7ecdde11f..94c53674a31d 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -3,7 +3,8 @@
#include <asm-generic/irq.h>
-extern void (*handle_arch_irq)(struct pt_regs *);
+struct pt_regs;
+
extern void migrate_irqs(void);
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 7fd3e27e3ccc..8afb863f5a9e 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -18,6 +18,7 @@
#ifndef __ARM64_KVM_ARM_H__
#define __ARM64_KVM_ARM_H__
+#include <asm/memory.h>
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
@@ -160,9 +161,9 @@
#endif
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK (((1LLU << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
-#define VTTBR_VMID_SHIFT (48LLU)
-#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
+#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_VMID_SHIFT (UL(48))
+#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT)
/* Hyp System Trap Register */
#define HSTR_EL2_TTEE (1 << 16)
@@ -185,13 +186,13 @@
/* Exception Syndrome Register (ESR) bits */
#define ESR_EL2_EC_SHIFT (26)
-#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT)
-#define ESR_EL2_IL (1U << 25)
+#define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT)
+#define ESR_EL2_IL (UL(1) << 25)
#define ESR_EL2_ISS (ESR_EL2_IL - 1)
#define ESR_EL2_ISV_SHIFT (24)
-#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT)
+#define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT)
#define ESR_EL2_SAS_SHIFT (22)
-#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT)
+#define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT)
#define ESR_EL2_SSE (1 << 21)
#define ESR_EL2_SRT_SHIFT (16)
#define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
@@ -205,16 +206,16 @@
#define ESR_EL2_FSC_TYPE (0x3c)
#define ESR_EL2_CV_SHIFT (24)
-#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT)
+#define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT)
#define ESR_EL2_COND_SHIFT (20)
-#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT)
+#define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT)
#define FSC_FAULT (0x04)
#define FSC_PERM (0x0c)
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
-#define HPFAR_MASK (~0xFUL)
+#define HPFAR_MASK (~UL(0xf))
#define ESR_EL2_EC_UNKNOWN (0x00)
#define ESR_EL2_EC_WFI (0x01)
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index a62cd077457b..6486b2bfd562 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -120,11 +120,13 @@ extern phys_addr_t memstart_addr;
* translation for translating DMA addresses. Use the driver
* DMA support - see dma-mapping.h.
*/
+#define virt_to_phys virt_to_phys
static inline phys_addr_t virt_to_phys(const volatile void *x)
{
return __virt_to_phys((unsigned long)(x));
}
+#define phys_to_virt phys_to_virt
static inline void *phys_to_virt(phys_addr_t x)
{
return (void *)(__phys_to_virt(x));
diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h
new file mode 100644
index 000000000000..4e603ea36ad3
--- /dev/null
+++ b/arch/arm64/include/asm/opcodes.h
@@ -0,0 +1 @@
+#include <../../arm/include/asm/opcodes.h>
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 5279e5733386..09da25bc596f 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -44,6 +44,221 @@ static inline unsigned long __my_cpu_offset(void)
#endif /* CONFIG_SMP */
+#define PERCPU_OP(op, asm_op) \
+static inline unsigned long __percpu_##op(void *ptr, \
+ unsigned long val, int size) \
+{ \
+ unsigned long loop, ret; \
+ \
+ switch (size) { \
+ case 1: \
+ do { \
+ asm ("//__per_cpu_" #op "_1\n" \
+ "ldxrb %w[ret], %[ptr]\n" \
+ #asm_op " %w[ret], %w[ret], %w[val]\n" \
+ "stxrb %w[loop], %w[ret], %[ptr]\n" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u8 *)ptr) \
+ : [val] "Ir" (val)); \
+ } while (loop); \
+ break; \
+ case 2: \
+ do { \
+ asm ("//__per_cpu_" #op "_2\n" \
+ "ldxrh %w[ret], %[ptr]\n" \
+ #asm_op " %w[ret], %w[ret], %w[val]\n" \
+ "stxrh %w[loop], %w[ret], %[ptr]\n" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u16 *)ptr) \
+ : [val] "Ir" (val)); \
+ } while (loop); \
+ break; \
+ case 4: \
+ do { \
+ asm ("//__per_cpu_" #op "_4\n" \
+ "ldxr %w[ret], %[ptr]\n" \
+ #asm_op " %w[ret], %w[ret], %w[val]\n" \
+ "stxr %w[loop], %w[ret], %[ptr]\n" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u32 *)ptr) \
+ : [val] "Ir" (val)); \
+ } while (loop); \
+ break; \
+ case 8: \
+ do { \
+ asm ("//__per_cpu_" #op "_8\n" \
+ "ldxr %[ret], %[ptr]\n" \
+ #asm_op " %[ret], %[ret], %[val]\n" \
+ "stxr %w[loop], %[ret], %[ptr]\n" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u64 *)ptr) \
+ : [val] "Ir" (val)); \
+ } while (loop); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ \
+ return ret; \
+}
+
+PERCPU_OP(add, add)
+PERCPU_OP(and, and)
+PERCPU_OP(or, orr)
+#undef PERCPU_OP
+
+static inline unsigned long __percpu_read(void *ptr, int size)
+{
+ unsigned long ret;
+
+ switch (size) {
+ case 1:
+ ret = ACCESS_ONCE(*(u8 *)ptr);
+ break;
+ case 2:
+ ret = ACCESS_ONCE(*(u16 *)ptr);
+ break;
+ case 4:
+ ret = ACCESS_ONCE(*(u32 *)ptr);
+ break;
+ case 8:
+ ret = ACCESS_ONCE(*(u64 *)ptr);
+ break;
+ default:
+ BUILD_BUG();
+ }
+
+ return ret;
+}
+
+static inline void __percpu_write(void *ptr, unsigned long val, int size)
+{
+ switch (size) {
+ case 1:
+ ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
+ break;
+ case 2:
+ ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
+ break;
+ case 4:
+ ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
+ break;
+ case 8:
+ ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
+ break;
+ default:
+ BUILD_BUG();
+ }
+}
+
+static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
+ int size)
+{
+ unsigned long ret, loop;
+
+ switch (size) {
+ case 1:
+ do {
+ asm ("//__percpu_xchg_1\n"
+ "ldxrb %w[ret], %[ptr]\n"
+ "stxrb %w[loop], %w[val], %[ptr]\n"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u8 *)ptr)
+ : [val] "r" (val));
+ } while (loop);
+ break;
+ case 2:
+ do {
+ asm ("//__percpu_xchg_2\n"
+ "ldxrh %w[ret], %[ptr]\n"
+ "stxrh %w[loop], %w[val], %[ptr]\n"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u16 *)ptr)
+ : [val] "r" (val));
+ } while (loop);
+ break;
+ case 4:
+ do {
+ asm ("//__percpu_xchg_4\n"
+ "ldxr %w[ret], %[ptr]\n"
+ "stxr %w[loop], %w[val], %[ptr]\n"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u32 *)ptr)
+ : [val] "r" (val));
+ } while (loop);
+ break;
+ case 8:
+ do {
+ asm ("//__percpu_xchg_8\n"
+ "ldxr %[ret], %[ptr]\n"
+ "stxr %w[loop], %[val], %[ptr]\n"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u64 *)ptr)
+ : [val] "r" (val));
+ } while (loop);
+ break;
+ default:
+ BUILD_BUG();
+ }
+
+ return ret;
+}
+
+#define _percpu_add(pcp, val) \
+ __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+
+#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val))
+
+#define _percpu_and(pcp, val) \
+ __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+
+#define _percpu_or(pcp, val) \
+ __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+
+#define _percpu_read(pcp) (typeof(pcp)) \
+ (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp)))
+
+#define _percpu_write(pcp, val) \
+ __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))
+
+#define _percpu_xchg(pcp, val) (typeof(pcp)) \
+ (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)))
+
+#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
+#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)
+#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
+#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
+
+#define this_cpu_add_return_1(pcp, val) _percpu_add_return(pcp, val)
+#define this_cpu_add_return_2(pcp, val) _percpu_add_return(pcp, val)
+#define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val)
+#define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val)
+
+#define this_cpu_and_1(pcp, val) _percpu_and(pcp, val)
+#define this_cpu_and_2(pcp, val) _percpu_and(pcp, val)
+#define this_cpu_and_4(pcp, val) _percpu_and(pcp, val)
+#define this_cpu_and_8(pcp, val) _percpu_and(pcp, val)
+
+#define this_cpu_or_1(pcp, val) _percpu_or(pcp, val)
+#define this_cpu_or_2(pcp, val) _percpu_or(pcp, val)
+#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
+#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
+
+#define this_cpu_read_1(pcp) _percpu_read(pcp)
+#define this_cpu_read_2(pcp) _percpu_read(pcp)
+#define this_cpu_read_4(pcp) _percpu_read(pcp)
+#define this_cpu_read_8(pcp) _percpu_read(pcp)
+
+#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
+#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
+
+#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
+#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
+#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
+#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
+
#include <asm-generic/percpu.h>
#endif /* __ASM_PERCPU_H */
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index d5bed02073d6..e20df38a8ff3 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -26,11 +26,13 @@
#define check_pgt_cache() do { } while (0)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+
#if CONFIG_ARM64_PGTABLE_LEVELS > 2
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+ return (pmd_t *)__get_free_page(PGALLOC_GFP);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
@@ -50,7 +52,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+ return (pud_t *)__get_free_page(PGALLOC_GFP);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -69,8 +71,6 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
-
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
diff --git a/arch/arm64/include/asm/seccomp.h b/arch/arm64/include/asm/seccomp.h
new file mode 100644
index 000000000000..c76fac979629
--- /dev/null
+++ b/arch/arm64/include/asm/seccomp.h
@@ -0,0 +1,25 @@
+/*
+ * arch/arm64/include/asm/seccomp.h
+ *
+ * Copyright (C) 2014 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm/unistd.h>
+
+#ifdef CONFIG_COMPAT
+#define __NR_seccomp_read_32 __NR_compat_read
+#define __NR_seccomp_write_32 __NR_compat_write
+#define __NR_seccomp_exit_32 __NR_compat_exit
+#define __NR_seccomp_sigreturn_32 __NR_compat_rt_sigreturn
+#endif /* CONFIG_COMPAT */
+
+#include <asm-generic/seccomp.h>
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index a82c0c5c8b52..c028fe37456f 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -19,10 +19,6 @@
#ifndef __ASM_TLB_H
#define __ASM_TLB_H
-#define __tlb_remove_pmd_tlb_entry __tlb_remove_pmd_tlb_entry
-
-#include <asm-generic/tlb.h>
-
#include <linux/pagemap.h>
#include <linux/swap.h>
@@ -37,71 +33,22 @@ static inline void __tlb_remove_table(void *_table)
#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
-/*
- * There's three ways the TLB shootdown code is used:
- * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
- * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
- * 2. Unmapping all vmas. See exit_mmap().
- * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
- * Page tables will be freed.
- * 3. Unmapping argument pages. See shift_arg_pages().
- * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
- */
+#include <asm-generic/tlb.h>
+
static inline void tlb_flush(struct mmu_gather *tlb)
{
if (tlb->fullmm) {
flush_tlb_mm(tlb->mm);
- } else if (tlb->end > 0) {
+ } else {
struct vm_area_struct vma = { .vm_mm = tlb->mm, };
flush_tlb_range(&vma, tlb->start, tlb->end);
- tlb->start = TASK_SIZE;
- tlb->end = 0;
- }
-}
-
-static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
-{
- if (!tlb->fullmm) {
- tlb->start = min(tlb->start, addr);
- tlb->end = max(tlb->end, addr + PAGE_SIZE);
- }
-}
-
-/*
- * Memorize the range for the TLB flush.
- */
-static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
- unsigned long addr)
-{
- tlb_add_flush(tlb, addr);
-}
-
-/*
- * In the case of tlb vma handling, we can optimise these away in the
- * case where we're doing a full MM flush. When we're doing a munmap,
- * the vmas are adjusted to only cover the region to be torn down.
- */
-static inline void tlb_start_vma(struct mmu_gather *tlb,
- struct vm_area_struct *vma)
-{
- if (!tlb->fullmm) {
- tlb->start = TASK_SIZE;
- tlb->end = 0;
}
}
-static inline void tlb_end_vma(struct mmu_gather *tlb,
- struct vm_area_struct *vma)
-{
- if (!tlb->fullmm)
- tlb_flush(tlb);
-}
-
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
pgtable_page_dtor(pte);
- tlb_add_flush(tlb, addr);
tlb_remove_entry(tlb, pte);
}
@@ -109,7 +56,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr)
{
- tlb_add_flush(tlb, addr);
tlb_remove_entry(tlb, virt_to_page(pmdp));
}
#endif
@@ -118,15 +64,8 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
unsigned long addr)
{
- tlb_add_flush(tlb, addr);
tlb_remove_entry(tlb, virt_to_page(pudp));
}
#endif
-static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp,
- unsigned long address)
-{
- tlb_add_flush(tlb, address);
-}
-
#endif
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 10ca8ff93cc2..232e4ba5d314 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -18,6 +18,22 @@
#ifndef __ASM_TRAP_H
#define __ASM_TRAP_H
+#include <linux/list.h>
+
+struct pt_regs;
+
+struct undef_hook {
+ struct list_head node;
+ u32 instr_mask;
+ u32 instr_val;
+ u64 pstate_mask;
+ u64 pstate_val;
+ int (*fn)(struct pt_regs *regs, u32 instr);
+};
+
+void register_undef_hook(struct undef_hook *hook);
+void unregister_undef_hook(struct undef_hook *hook);
+
static inline int in_exception_text(unsigned long ptr)
{
extern char __exception_text_start[];
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 6d2bf419431d..49c9aefd24a5 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -31,6 +31,9 @@
* Compat syscall numbers used by the AArch64 kernel.
*/
#define __NR_compat_restart_syscall 0
+#define __NR_compat_exit 1
+#define __NR_compat_read 3
+#define __NR_compat_write 4
#define __NR_compat_sigreturn 119
#define __NR_compat_rt_sigreturn 173
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 9dfdac4a74a1..8893cebcea5b 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -787,7 +787,8 @@ __SYSCALL(__NR_sched_setattr, sys_sched_setattr)
__SYSCALL(__NR_sched_getattr, sys_sched_getattr)
#define __NR_renameat2 382
__SYSCALL(__NR_renameat2, sys_renameat2)
- /* 383 for seccomp */
+#define __NR_seccomp 383
+__SYSCALL(__NR_seccomp, sys_seccomp)
#define __NR_getrandom 384
__SYSCALL(__NR_getrandom, sys_getrandom)
#define __NR_memfd_create 385