summaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/5level-fixup.h1
-rw-r--r--include/asm-generic/atomic-instrumented.h476
-rw-r--r--include/asm-generic/atomic.h2
-rw-r--r--include/asm-generic/barrier.h2
-rw-r--r--include/asm-generic/exec.h2
-rw-r--r--include/asm-generic/io.h185
-rw-r--r--include/asm-generic/kvm_para.h5
-rw-r--r--include/asm-generic/pci_iomap.h2
-rw-r--r--include/asm-generic/pgtable-nop4d.h9
-rw-r--r--include/asm-generic/pgtable.h46
-rw-r--r--include/asm-generic/switch_to.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h11
12 files changed, 703 insertions, 40 deletions
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index dfbd9d990637..9c2e0708eb82 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -8,6 +8,7 @@
#define P4D_SHIFT PGDIR_SHIFT
#define P4D_SIZE PGDIR_SIZE
#define P4D_MASK PGDIR_MASK
+#define MAX_PTRS_PER_P4D 1
#define PTRS_PER_P4D 1
#define p4d_t pgd_t
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
new file mode 100644
index 000000000000..ec07f23678ea
--- /dev/null
+++ b/include/asm-generic/atomic-instrumented.h
@@ -0,0 +1,476 @@
+/*
+ * This file provides wrappers with KASAN instrumentation for atomic operations.
+ * To use this functionality an arch's atomic.h file needs to define all
+ * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
+ * this file at the end. This file provides atomic_read() that forwards to
+ * arch_atomic_read() for actual atomic operation.
+ * Note: if an arch atomic operation is implemented by means of other atomic
+ * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
+ * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
+ * double instrumentation.
+ */
+
+#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
+#define _LINUX_ATOMIC_INSTRUMENTED_H
+
+#include <linux/build_bug.h>
+#include <linux/kasan-checks.h>
+
+static __always_inline int atomic_read(const atomic_t *v)
+{
+ kasan_check_read(v, sizeof(*v));
+ return arch_atomic_read(v);
+}
+
+static __always_inline s64 atomic64_read(const atomic64_t *v)
+{
+ kasan_check_read(v, sizeof(*v));
+ return arch_atomic64_read(v);
+}
+
+static __always_inline void atomic_set(atomic_t *v, int i)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_set(v, i);
+}
+
+static __always_inline void atomic64_set(atomic64_t *v, s64 i)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_set(v, i);
+}
+
+static __always_inline int atomic_xchg(atomic_t *v, int i)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_xchg(v, i);
+}
+
+static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_xchg(v, i);
+}
+
+static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_cmpxchg(v, old, new);
+}
+
+static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_cmpxchg(v, old, new);
+}
+
+#ifdef arch_atomic_try_cmpxchg
+#define atomic_try_cmpxchg atomic_try_cmpxchg
+static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ kasan_check_write(v, sizeof(*v));
+ kasan_check_read(old, sizeof(*old));
+ return arch_atomic_try_cmpxchg(v, old, new);
+}
+#endif
+
+#ifdef arch_atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg atomic64_try_cmpxchg
+static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ kasan_check_write(v, sizeof(*v));
+ kasan_check_read(old, sizeof(*old));
+ return arch_atomic64_try_cmpxchg(v, old, new);
+}
+#endif
+
+static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ kasan_check_write(v, sizeof(*v));
+ return __arch_atomic_add_unless(v, a, u);
+}
+
+
+static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_add_unless(v, a, u);
+}
+
+static __always_inline void atomic_inc(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_inc(v);
+}
+
+static __always_inline void atomic64_inc(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_inc(v);
+}
+
+static __always_inline void atomic_dec(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_dec(v);
+}
+
+static __always_inline void atomic64_dec(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_dec(v);
+}
+
+static __always_inline void atomic_add(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_add(i, v);
+}
+
+static __always_inline void atomic64_add(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_add(i, v);
+}
+
+static __always_inline void atomic_sub(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_sub(i, v);
+}
+
+static __always_inline void atomic64_sub(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_sub(i, v);
+}
+
+static __always_inline void atomic_and(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_and(i, v);
+}
+
+static __always_inline void atomic64_and(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_and(i, v);
+}
+
+static __always_inline void atomic_or(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_or(i, v);
+}
+
+static __always_inline void atomic64_or(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_or(i, v);
+}
+
+static __always_inline void atomic_xor(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic_xor(i, v);
+}
+
+static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ arch_atomic64_xor(i, v);
+}
+
+static __always_inline int atomic_inc_return(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_inc_return(v);
+}
+
+static __always_inline s64 atomic64_inc_return(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_return(v);
+}
+
+static __always_inline int atomic_dec_return(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_dec_return(v);
+}
+
+static __always_inline s64 atomic64_dec_return(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_return(v);
+}
+
+static __always_inline s64 atomic64_inc_not_zero(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_not_zero(v);
+}
+
+static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_if_positive(v);
+}
+
+static __always_inline bool atomic_dec_and_test(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_dec_and_test(v);
+}
+
+static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_dec_and_test(v);
+}
+
+static __always_inline bool atomic_inc_and_test(atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_inc_and_test(v);
+}
+
+static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_inc_and_test(v);
+}
+
+static __always_inline int atomic_add_return(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_add_return(i, v);
+}
+
+static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_add_return(i, v);
+}
+
+static __always_inline int atomic_sub_return(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_sub_return(i, v);
+}
+
+static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_sub_return(i, v);
+}
+
+static __always_inline int atomic_fetch_add(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_add(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add(i, v);
+}
+
+static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_sub(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_sub(i, v);
+}
+
+static __always_inline int atomic_fetch_and(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_and(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_and(i, v);
+}
+
+static __always_inline int atomic_fetch_or(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_or(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_or(i, v);
+}
+
+static __always_inline int atomic_fetch_xor(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_fetch_xor(i, v);
+}
+
+static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_fetch_xor(i, v);
+}
+
+static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_sub_and_test(i, v);
+}
+
+static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_sub_and_test(i, v);
+}
+
+static __always_inline bool atomic_add_negative(int i, atomic_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic_add_negative(i, v);
+}
+
+static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
+{
+ kasan_check_write(v, sizeof(*v));
+ return arch_atomic64_add_negative(i, v);
+}
+
+static __always_inline unsigned long
+cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+ kasan_check_write(ptr, size);
+ switch (size) {
+ case 1:
+ return arch_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
+ case 2:
+ return arch_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
+ case 4:
+ return arch_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
+ case 8:
+ BUILD_BUG_ON(sizeof(unsigned long) != 8);
+ return arch_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
+ }
+ BUILD_BUG();
+ return 0;
+}
+
+#define cmpxchg(ptr, old, new) \
+({ \
+ ((__typeof__(*(ptr)))cmpxchg_size((ptr), (unsigned long)(old), \
+ (unsigned long)(new), sizeof(*(ptr)))); \
+})
+
+static __always_inline unsigned long
+sync_cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new,
+ int size)
+{
+ kasan_check_write(ptr, size);
+ switch (size) {
+ case 1:
+ return arch_sync_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
+ case 2:
+ return arch_sync_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
+ case 4:
+ return arch_sync_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
+ case 8:
+ BUILD_BUG_ON(sizeof(unsigned long) != 8);
+ return arch_sync_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
+ }
+ BUILD_BUG();
+ return 0;
+}
+
+#define sync_cmpxchg(ptr, old, new) \
+({ \
+ ((__typeof__(*(ptr)))sync_cmpxchg_size((ptr), \
+ (unsigned long)(old), (unsigned long)(new), \
+ sizeof(*(ptr)))); \
+})
+
+static __always_inline unsigned long
+cmpxchg_local_size(volatile void *ptr, unsigned long old, unsigned long new,
+ int size)
+{
+ kasan_check_write(ptr, size);
+ switch (size) {
+ case 1:
+ return arch_cmpxchg_local((u8 *)ptr, (u8)old, (u8)new);
+ case 2:
+ return arch_cmpxchg_local((u16 *)ptr, (u16)old, (u16)new);
+ case 4:
+ return arch_cmpxchg_local((u32 *)ptr, (u32)old, (u32)new);
+ case 8:
+ BUILD_BUG_ON(sizeof(unsigned long) != 8);
+ return arch_cmpxchg_local((u64 *)ptr, (u64)old, (u64)new);
+ }
+ BUILD_BUG();
+ return 0;
+}
+
+#define cmpxchg_local(ptr, old, new) \
+({ \
+ ((__typeof__(*(ptr)))cmpxchg_local_size((ptr), \
+ (unsigned long)(old), (unsigned long)(new), \
+ sizeof(*(ptr)))); \
+})
+
+static __always_inline u64
+cmpxchg64_size(volatile u64 *ptr, u64 old, u64 new)
+{
+ kasan_check_write(ptr, sizeof(*ptr));
+ return arch_cmpxchg64(ptr, old, new);
+}
+
+#define cmpxchg64(ptr, old, new) \
+({ \
+ ((__typeof__(*(ptr)))cmpxchg64_size((ptr), (u64)(old), \
+ (u64)(new))); \
+})
+
+static __always_inline u64
+cmpxchg64_local_size(volatile u64 *ptr, u64 old, u64 new)
+{
+ kasan_check_write(ptr, sizeof(*ptr));
+ return arch_cmpxchg64_local(ptr, old, new);
+}
+
+#define cmpxchg64_local(ptr, old, new) \
+({ \
+ ((__typeof__(*(ptr)))cmpxchg64_local_size((ptr), (u64)(old), \
+ (u64)(new))); \
+})
+
+/*
+ * Originally we had the following code here:
+ * __typeof__(p1) ____p1 = (p1);
+ * kasan_check_write(____p1, 2 * sizeof(*____p1));
+ * arch_cmpxchg_double(____p1, (p2), (o1), (o2), (n1), (n2));
+ * But it leads to compilation failures (see gcc issue 72873).
+ * So for now it's left non-instrumented.
+ * There are few callers of cmpxchg_double(), so it's not critical.
+ */
+#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
+({ \
+ arch_cmpxchg_double((p1), (p2), (o1), (o2), (n1), (n2)); \
+})
+
+#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
+({ \
+ arch_cmpxchg_double_local((p1), (p2), (o1), (o2), (n1), (n2)); \
+})
+
+#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 3f38eb03649c..abe6dd9ca2a8 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -2,8 +2,6 @@
* Generic C implementation of atomic counter operations. Usable on
* UP systems only. Do not include in machine independent code.
*
- * Originally implemented for MN10300.
- *
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index fe297b599b0a..29458bbb2fa0 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -1,5 +1,5 @@
/*
- * Generic barrier definitions, originally based on MN10300 definitions.
+ * Generic barrier definitions.
*
* It should be possible to use these on really simple architectures,
* but it serves more as a starting point for new ports.
diff --git a/include/asm-generic/exec.h b/include/asm-generic/exec.h
index 567766b0074a..32c0a216f576 100644
--- a/include/asm-generic/exec.h
+++ b/include/asm-generic/exec.h
@@ -1,4 +1,4 @@
-/* Generic process execution definitions, based on MN10300 definitions.
+/* Generic process execution definitions.
*
* It should be possible to use these on really simple architectures,
* but it serves more as a starting point for new ports.
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index b4531e3b2120..66d1d45fa2e1 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -1,4 +1,4 @@
-/* Generic I/O port emulation, based on MN10300 code
+/* Generic I/O port emulation.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -25,6 +25,50 @@
#define mmiowb() do {} while (0)
#endif
+#ifndef __io_br
+#define __io_br() barrier()
+#endif
+
+/* prevent prefetching of coherent DMA data ahead of a dma-complete */
+#ifndef __io_ar
+#ifdef rmb
+#define __io_ar() rmb()
+#else
+#define __io_ar() barrier()
+#endif
+#endif
+
+/* flush writes to coherent DMA data before possibly triggering a DMA read */
+#ifndef __io_bw
+#ifdef wmb
+#define __io_bw() wmb()
+#else
+#define __io_bw() barrier()
+#endif
+#endif
+
+/* serialize device access against a spin_unlock, usually handled there. */
+#ifndef __io_aw
+#define __io_aw() barrier()
+#endif
+
+#ifndef __io_pbw
+#define __io_pbw() __io_bw()
+#endif
+
+#ifndef __io_paw
+#define __io_paw() __io_aw()
+#endif
+
+#ifndef __io_pbr
+#define __io_pbr() __io_br()
+#endif
+
+#ifndef __io_par
+#define __io_par() __io_ar()
+#endif
+
+
/*
* __raw_{read,write}{b,w,l,q}() access memory in native endianness.
*
@@ -110,7 +154,12 @@ static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
#define readb readb
static inline u8 readb(const volatile void __iomem *addr)
{
- return __raw_readb(addr);
+ u8 val;
+
+ __io_br();
+ val = __raw_readb(addr);
+ __io_ar();
+ return val;
}
#endif
@@ -118,7 +167,12 @@ static inline u8 readb(const volatile void __iomem *addr)
#define readw readw
static inline u16 readw(const volatile void __iomem *addr)
{
- return __le16_to_cpu(__raw_readw(addr));
+ u16 val;
+
+ __io_br();
+ val = __le16_to_cpu(__raw_readw(addr));
+ __io_ar();
+ return val;
}
#endif
@@ -126,7 +180,12 @@ static inline u16 readw(const volatile void __iomem *addr)
#define readl readl
static inline u32 readl(const volatile void __iomem *addr)
{
- return __le32_to_cpu(__raw_readl(addr));
+ u32 val;
+
+ __io_br();
+ val = __le32_to_cpu(__raw_readl(addr));
+ __io_ar();
+ return val;
}
#endif
@@ -135,7 +194,12 @@ static inline u32 readl(const volatile void __iomem *addr)
#define readq readq
static inline u64 readq(const volatile void __iomem *addr)
{
- return __le64_to_cpu(__raw_readq(addr));
+ u64 val;
+
+ __io_br();
+ val = __le64_to_cpu(__raw_readq(addr));
+ __io_ar();
+ return val;
}
#endif
#endif /* CONFIG_64BIT */
@@ -144,7 +208,9 @@ static inline u64 readq(const volatile void __iomem *addr)
#define writeb writeb
static inline void writeb(u8 value, volatile void __iomem *addr)
{
+ __io_bw();
__raw_writeb(value, addr);
+ __io_aw();
}
#endif
@@ -152,7 +218,9 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
#define writew writew
static inline void writew(u16 value, volatile void __iomem *addr)
{
+ __io_bw();
__raw_writew(cpu_to_le16(value), addr);
+ __io_aw();
}
#endif
@@ -160,7 +228,9 @@ static inline void writew(u16 value, volatile void __iomem *addr)
#define writel writel
static inline void writel(u32 value, volatile void __iomem *addr)
{
+ __io_bw();
__raw_writel(__cpu_to_le32(value), addr);
+ __io_aw();
}
#endif
@@ -169,7 +239,9 @@ static inline void writel(u32 value, volatile void __iomem *addr)
#define writeq writeq
static inline void writeq(u64 value, volatile void __iomem *addr)
{
+ __io_bw();
__raw_writeq(__cpu_to_le64(value), addr);
+ __io_aw();
}
#endif
#endif /* CONFIG_64BIT */
@@ -180,35 +252,67 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
* accesses.
*/
#ifndef readb_relaxed
-#define readb_relaxed readb
+#define readb_relaxed readb_relaxed
+static inline u8 readb_relaxed(const volatile void __iomem *addr)
+{
+ return __raw_readb(addr);
+}
#endif
#ifndef readw_relaxed
-#define readw_relaxed readw
+#define readw_relaxed readw_relaxed
+static inline u16 readw_relaxed(const volatile void __iomem *addr)
+{
+ return __le16_to_cpu(__raw_readw(addr));
+}
#endif
#ifndef readl_relaxed
-#define readl_relaxed readl
+#define readl_relaxed readl_relaxed
+static inline u32 readl_relaxed(const volatile void __iomem *addr)
+{
+ return __le32_to_cpu(__raw_readl(addr));
+}
#endif
#if defined(readq) && !defined(readq_relaxed)
-#define readq_relaxed readq
+#define readq_relaxed readq_relaxed
+static inline u64 readq_relaxed(const volatile void __iomem *addr)
+{
+ return __le64_to_cpu(__raw_readq(addr));
+}
#endif
#ifndef writeb_relaxed
-#define writeb_relaxed writeb
+#define writeb_relaxed writeb_relaxed
+static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
+{
+ __raw_writeb(value, addr);
+}
#endif
#ifndef writew_relaxed
-#define writew_relaxed writew
+#define writew_relaxed writew_relaxed
+static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
+{
+ __raw_writew(cpu_to_le16(value), addr);
+}
#endif
#ifndef writel_relaxed
-#define writel_relaxed writel
+#define writel_relaxed writel_relaxed
+static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
+{
+ __raw_writel(__cpu_to_le32(value), addr);
+}
#endif
#if defined(writeq) && !defined(writeq_relaxed)
-#define writeq_relaxed writeq
+#define writeq_relaxed writeq_relaxed
+static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
+{
+ __raw_writeq(__cpu_to_le64(value), addr);
+}
#endif
/*
@@ -351,6 +455,8 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer,
#define IO_SPACE_LIMIT 0xffff
#endif
+#include <linux/logic_pio.h>
+
/*
* {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
* implemented on hardware that needs an additional delay for I/O accesses to
@@ -361,7 +467,12 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer,
#define inb inb
static inline u8 inb(unsigned long addr)
{
- return readb(PCI_IOBASE + addr);
+ u8 val;
+
+ __io_pbr();
+ val = __raw_readb(PCI_IOBASE + addr);
+ __io_par();
+ return val;
}
#endif
@@ -369,7 +480,12 @@ static inline u8 inb(unsigned long addr)
#define inw inw
static inline u16 inw(unsigned long addr)
{
- return readw(PCI_IOBASE + addr);
+ u16 val;
+
+ __io_pbr();
+ val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr));
+ __io_par();
+ return val;
}
#endif
@@ -377,7 +493,12 @@ static inline u16 inw(unsigned long addr)
#define inl inl
static inline u32 inl(unsigned long addr)
{
- return readl(PCI_IOBASE + addr);
+ u32 val;
+
+ __io_pbr();
+ val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr));
+ __io_par();
+ return val;
}
#endif
@@ -385,7 +506,9 @@ static inline u32 inl(unsigned long addr)
#define outb outb
static inline void outb(u8 value, unsigned long addr)
{
- writeb(value, PCI_IOBASE + addr);
+ __io_pbw();
+ __raw_writeb(value, PCI_IOBASE + addr);
+ __io_paw();
}
#endif
@@ -393,7 +516,9 @@ static inline void outb(u8 value, unsigned long addr)
#define outw outw
static inline void outw(u16 value, unsigned long addr)
{
- writew(value, PCI_IOBASE + addr);
+ __io_pbw();
+ __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr);
+ __io_paw();
}
#endif
@@ -401,7 +526,9 @@ static inline void outw(u16 value, unsigned long addr)
#define outl outl
static inline void outl(u32 value, unsigned long addr)
{
- writel(value, PCI_IOBASE + addr);
+ __io_pbw();
+ __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr);
+ __io_paw();
}
#endif
@@ -852,7 +979,16 @@ static inline void __iomem *__ioremap(phys_addr_t offset, size_t size,
}
#endif
+#ifndef iounmap
+#define iounmap iounmap
+
+static inline void iounmap(void __iomem *addr)
+{
+}
+#endif
+#endif /* CONFIG_MMU */
#ifndef ioremap_nocache
+void __iomem *ioremap(phys_addr_t phys_addr, size_t size);
#define ioremap_nocache ioremap_nocache
static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
{
@@ -884,22 +1020,13 @@ static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
}
#endif
-#ifndef iounmap
-#define iounmap iounmap
-
-static inline void iounmap(void __iomem *addr)
-{
-}
-#endif
-#endif /* CONFIG_MMU */
-
#ifdef CONFIG_HAS_IOPORT_MAP
#ifndef CONFIG_GENERIC_IOMAP
#ifndef ioport_map
#define ioport_map ioport_map
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
- return PCI_IOBASE + (port & IO_SPACE_LIMIT);
+ return PCI_IOBASE + (port & MMIO_UPPER_LIMIT);
}
#endif
diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h
index 18c6abe81fbd..728e5c5706c4 100644
--- a/include/asm-generic/kvm_para.h
+++ b/include/asm-generic/kvm_para.h
@@ -19,6 +19,11 @@ static inline unsigned int kvm_arch_para_features(void)
return 0;
}
+static inline unsigned int kvm_arch_para_hints(void)
+{
+ return 0;
+}
+
static inline bool kvm_para_available(void)
{
return false;
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index 854f96ad5ccb..d4f16dcc2ed7 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0+ */
-/* Generic I/O port emulation, based on MN10300 code
+/* Generic I/O port emulation.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index 8f22f55de17a..1a29b2a0282b 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -8,10 +8,11 @@
typedef struct { pgd_t pgd; } p4d_t;
-#define P4D_SHIFT PGDIR_SHIFT
-#define PTRS_PER_P4D 1
-#define P4D_SIZE (1UL << P4D_SHIFT)
-#define P4D_MASK (~(P4D_SIZE-1))
+#define P4D_SHIFT PGDIR_SHIFT
+#define MAX_PTRS_PER_P4D 1
+#define PTRS_PER_P4D 1
+#define P4D_SIZE (1UL << P4D_SHIFT)
+#define P4D_MASK (~(P4D_SIZE-1))
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 2cfa3075d148..f59639afaa39 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -400,6 +400,42 @@ static inline int pud_same(pud_t pud_a, pud_t pud_b)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef __HAVE_ARCH_DO_SWAP_PAGE
+/*
+ * Some architectures support metadata associated with a page. When a
+ * page is being swapped out, this metadata must be saved so it can be
+ * restored when the page is swapped back in. SPARC M7 and newer
+ * processors support an ADI (Application Data Integrity) tag for the
+ * page as metadata for the page. arch_do_swap_page() can restore this
+ * metadata when a page is swapped back in.
+ */
+static inline void arch_do_swap_page(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t pte, pte_t oldpte)
+{
+
+}
+#endif
+
+#ifndef __HAVE_ARCH_UNMAP_ONE
+/*
+ * Some architectures support metadata associated with a page. When a
+ * page is being swapped out, this metadata must be saved so it can be
+ * restored when the page is swapped back in. SPARC M7 and newer
+ * processors support an ADI (Application Data Integrity) tag for the
+ * page as metadata for the page. arch_unmap_one() can save this
+ * metadata on a swap-out of a page.
+ */
+static inline int arch_unmap_one(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t orig_pte)
+{
+ return 0;
+}
+#endif
+
#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
#endif
@@ -983,6 +1019,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
int pud_clear_huge(pud_t *pud);
int pmd_clear_huge(pmd_t *pmd);
+int pud_free_pmd_page(pud_t *pud);
+int pmd_free_pte_page(pmd_t *pmd);
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
@@ -1008,6 +1046,14 @@ static inline int pmd_clear_huge(pmd_t *pmd)
{
return 0;
}
+static inline int pud_free_pmd_page(pud_t *pud)
+{
+ return 0;
+}
+static inline int pmd_free_pte_page(pmd_t *pmd)
+{
+ return 0;
+}
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
diff --git a/include/asm-generic/switch_to.h b/include/asm-generic/switch_to.h
index 052c4ac04fd5..986acc9d34bb 100644
--- a/include/asm-generic/switch_to.h
+++ b/include/asm-generic/switch_to.h
@@ -1,4 +1,4 @@
-/* Generic task switch macro wrapper, based on MN10300 definitions.
+/* Generic task switch macro wrapper.
*
* It should be possible to use these on really simple architectures,
* but it serves more as a starting point for new ports.
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 1ab0e520d6fc..278841c75b97 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -178,6 +178,15 @@
#define TRACE_SYSCALLS()
#endif
+#ifdef CONFIG_BPF_EVENTS
+#define BPF_RAW_TP() STRUCT_ALIGN(); \
+ VMLINUX_SYMBOL(__start__bpf_raw_tp) = .; \
+ KEEP(*(__bpf_raw_tp_map)) \
+ VMLINUX_SYMBOL(__stop__bpf_raw_tp) = .;
+#else
+#define BPF_RAW_TP()
+#endif
+
#ifdef CONFIG_SERIAL_EARLYCON
#define EARLYCON_TABLE() STRUCT_ALIGN(); \
VMLINUX_SYMBOL(__earlycon_table) = .; \
@@ -249,6 +258,7 @@
LIKELY_PROFILE() \
BRANCH_PROFILE() \
TRACE_PRINTKS() \
+ BPF_RAW_TP() \
TRACEPOINT_STR()
/*
@@ -589,7 +599,6 @@
IRQCHIP_OF_MATCH_TABLE() \
ACPI_PROBE_TABLE(irqchip) \
ACPI_PROBE_TABLE(timer) \
- ACPI_PROBE_TABLE(iort) \
EARLYCON_TABLE()
#define INIT_TEXT \