summaryrefslogtreecommitdiff
path: root/arch/arm/include
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2014-04-28 10:31:04 +0100
committerDavid Vrabel <david.vrabel@citrix.com>2014-04-28 10:31:04 +0100
commit67dadcb324c2fe059cb2c35f8b80df42bb23f7c4 (patch)
tree3521a2a9e05a7de426ab50bc7685489ad6340e54 /arch/arm/include
parenteb47f71200b7d5b4c8c1f8c75675f592d855aafd (diff)
parente26a9e00afc482b971afcaef1db8c9034d4d6d7c (diff)
Merge commit 'e26a9e0' into stable/for-linus-3.15
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/assembler.h8
-rw-r--r--arch/arm/include/asm/atomic.h44
-rw-r--r--arch/arm/include/asm/cmpxchg.h6
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/floppy.h2
-rw-r--r--arch/arm/include/asm/futex.h9
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h1
-rw-r--r--arch/arm/include/asm/hwcap.h3
-rw-r--r--arch/arm/include/asm/jump_label.h1
-rw-r--r--arch/arm/include/asm/memory.h41
-rw-r--r--arch/arm/include/asm/pgtable-2level.h1
-rw-r--r--arch/arm/include/asm/pgtable.h7
-rw-r--r--arch/arm/include/asm/sync_bitops.h1
-rw-r--r--arch/arm/include/asm/system.h7
-rw-r--r--arch/arm/include/asm/uaccess.h2
-rw-r--r--arch/arm/include/uapi/asm/hwcap.h9
16 files changed, 98 insertions, 45 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 5c2285160575..380ac4f20000 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -30,8 +30,8 @@
* Endian independent macros for shifting bytes within registers.
*/
#ifndef __ARMEB__
-#define pull lsr
-#define push lsl
+#define lspull lsr
+#define lspush lsl
#define get_byte_0 lsl #0
#define get_byte_1 lsr #8
#define get_byte_2 lsr #16
@@ -41,8 +41,8 @@
#define put_byte_2 lsl #16
#define put_byte_3 lsl #24
#else
-#define pull lsl
-#define push lsr
+#define lspull lsl
+#define lspush lsr
#define get_byte_0 lsr #24
#define get_byte_1 lsr #16
#define get_byte_2 lsr #8
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 62d2cb53b069..9a92fd7864a8 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -60,6 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_add_return\n"
"1: ldrex %0, [%3]\n"
@@ -99,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_sub_return\n"
"1: ldrex %0, [%3]\n"
@@ -121,6 +123,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
unsigned long res;
smp_mb();
+ prefetchw(&ptr->counter);
do {
__asm__ __volatile__("@ atomic_cmpxchg\n"
@@ -138,6 +141,33 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
return oldval;
}
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int oldval, newval;
+ unsigned long tmp;
+
+ smp_mb();
+ prefetchw(&v->counter);
+
+ __asm__ __volatile__ ("@ atomic_add_unless\n"
+"1: ldrex %0, [%4]\n"
+" teq %0, %5\n"
+" beq 2f\n"
+" add %1, %0, %6\n"
+" strex %2, %1, [%4]\n"
+" teq %2, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "r" (u), "r" (a)
+ : "cc");
+
+ if (oldval != u)
+ smp_mb();
+
+ return oldval;
+}
+
#else /* ARM_ARCH_6 */
#ifdef CONFIG_SMP
@@ -186,10 +216,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-#endif /* __LINUX_ARM_ARCH__ */
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
@@ -200,6 +226,10 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
+#endif /* __LINUX_ARM_ARCH__ */
+
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
@@ -299,6 +329,7 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
unsigned long tmp;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add_return\n"
"1: ldrexd %0, %H0, [%3]\n"
@@ -340,6 +371,7 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
unsigned long tmp;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_sub_return\n"
"1: ldrexd %0, %H0, [%3]\n"
@@ -364,6 +396,7 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
unsigned long res;
smp_mb();
+ prefetchw(&ptr->counter);
do {
__asm__ __volatile__("@ atomic64_cmpxchg\n"
@@ -388,6 +421,7 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
unsigned long tmp;
smp_mb();
+ prefetchw(&ptr->counter);
__asm__ __volatile__("@ atomic64_xchg\n"
"1: ldrexd %0, %H0, [%3]\n"
@@ -409,6 +443,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_dec_if_positive\n"
"1: ldrexd %0, %H0, [%3]\n"
@@ -436,6 +471,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
int ret = 1;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add_unless\n"
"1: ldrexd %0, %H0, [%4]\n"
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index df2fbba7efc8..abb2c3769b01 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -2,6 +2,7 @@
#define __ASM_ARM_CMPXCHG_H
#include <linux/irqflags.h>
+#include <linux/prefetch.h>
#include <asm/barrier.h>
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
@@ -35,6 +36,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#endif
smp_mb();
+ prefetchw((const void *)ptr);
switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
@@ -138,6 +140,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
{
unsigned long oldval, res;
+ prefetchw((const void *)ptr);
+
switch (size) {
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
case 1:
@@ -230,6 +234,8 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
unsigned long long oldval;
unsigned long res;
+ prefetchw(ptr);
+
__asm__ __volatile__(
"1: ldrexd %1, %H1, [%3]\n"
" teq %1, %4\n"
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index acdde76b39bb..42f0889f0584 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -71,6 +71,7 @@
#define ARM_CPU_PART_CORTEX_A5 0xC050
#define ARM_CPU_PART_CORTEX_A15 0xC0F0
#define ARM_CPU_PART_CORTEX_A7 0xC070
+#define ARM_CPU_PART_CORTEX_A12 0xC0D0
#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
#define ARM_CPU_XSCALE_ARCH_V1 0x2000
diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h
index c9f03eccc9d8..f4882553fbb0 100644
--- a/arch/arm/include/asm/floppy.h
+++ b/arch/arm/include/asm/floppy.h
@@ -25,7 +25,7 @@
#define fd_inb(port) inb((port))
#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\
- IRQF_DISABLED,"floppy",NULL)
+ 0,"floppy",NULL)
#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL)
#define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK)
#define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK)
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index e42cf597f6e6..53e69dae796f 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -3,11 +3,6 @@
#ifdef __KERNEL__
-#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
-/* ARM doesn't provide unprivileged exclusive memory accessors */
-#include <asm-generic/futex.h>
-#else
-
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
@@ -28,6 +23,7 @@
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
smp_mb(); \
+ prefetchw(uaddr); \
__asm__ __volatile__( \
"1: ldrex %1, [%3]\n" \
" " insn "\n" \
@@ -51,6 +47,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
smp_mb();
+ /* Prefetching cannot fault */
+ prefetchw(uaddr);
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrex %1, [%4]\n"
" teq %1, %2\n"
@@ -164,6 +162,5 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
return ret;
}
-#endif /* !(CPU_USE_DOMAINS && SMP) */
#endif /* __KERNEL__ */
#endif /* _ASM_ARM_FUTEX_H */
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index eef55ea9ef00..8e427c7b4425 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -51,6 +51,7 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DEBUG_ARCH_V7_ECP14 3
#define ARM_DEBUG_ARCH_V7_MM 4
#define ARM_DEBUG_ARCH_V7_1 5
+#define ARM_DEBUG_ARCH_V8 6
/* Breakpoint */
#define ARM_BREAKPOINT_EXECUTE 0
diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
index 6ff56eca3f1f..6e183fd269fb 100644
--- a/arch/arm/include/asm/hwcap.h
+++ b/arch/arm/include/asm/hwcap.h
@@ -9,6 +9,7 @@
* instruction set this cpu supports.
*/
#define ELF_HWCAP (elf_hwcap)
-extern unsigned int elf_hwcap;
+#define ELF_HWCAP2 (elf_hwcap2)
+extern unsigned int elf_hwcap, elf_hwcap2;
#endif
#endif
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index 863c892b4aaa..70f9b9bfb1f9 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -4,7 +4,6 @@
#ifdef __KERNEL__
#include <linux/types.h>
-#include <asm/system.h>
#define JUMP_LABEL_NOP_SIZE 4
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 4afb376d9c7c..02fa2558f662 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -166,9 +166,17 @@
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
+ *
+ * PFNs are used to describe any physical page; this means
+ * PFN 0 == physical address 0.
*/
-#ifndef __virt_to_phys
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+#if defined(__virt_to_phys)
+#define PHYS_OFFSET PLAT_PHYS_OFFSET
+#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
+
+#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
+
+#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT)
/*
* Constants used to force the right instruction encodings and shifts
@@ -177,12 +185,17 @@
#define __PV_BITS_31_24 0x81000000
#define __PV_BITS_7_0 0x81
-extern u64 __pv_phys_offset;
+extern unsigned long __pv_phys_pfn_offset;
extern u64 __pv_offset;
extern void fixup_pv_table(const void *, unsigned long);
extern const void *__pv_table_begin, *__pv_table_end;
-#define PHYS_OFFSET __pv_phys_offset
+#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
+#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
+
+#define virt_to_pfn(kaddr) \
+ ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
+ PHYS_PFN_OFFSET)
#define __pv_stub(from,to,instr,type) \
__asm__("@ __pv_stub\n" \
@@ -243,6 +256,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#else
#define PHYS_OFFSET PLAT_PHYS_OFFSET
+#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
static inline phys_addr_t __virt_to_phys(unsigned long x)
{
@@ -254,18 +268,11 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
return x - PHYS_OFFSET + PAGE_OFFSET;
}
-#endif
-#endif
+#define virt_to_pfn(kaddr) \
+ ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
+ PHYS_PFN_OFFSET)
-/*
- * PFNs are used to describe any physical page; this means
- * PFN 0 == physical address 0.
- *
- * This is the PFN of the first RAM page in the kernel
- * direct-mapped view. We assume this is the first page
- * of RAM in the mem_map as well.
- */
-#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
+#endif
/*
* These are *only* valid on the kernel direct mapped RAM memory.
@@ -343,9 +350,9 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
*/
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
- && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
+ && pfn_valid(virt_to_pfn(kaddr)))
#endif
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index dfff709fda3c..219ac88a9542 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -140,6 +140,7 @@
#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
+#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */
#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 7d59b524f2af..5478e5d6ad89 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -216,13 +216,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
+#define pte_valid(pte) (pte_val(pte) & L_PTE_VALID)
+#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
#define pte_special(pte) (0)
-#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
+#define pte_valid_user(pte) \
+ (pte_valid(pte) && (pte_val(pte) & L_PTE_USER) && pte_young(pte))
#if __LINUX_ARM_ARCH__ < 6
static inline void __sync_icache_dcache(pte_t pteval)
@@ -237,7 +240,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
{
unsigned long ext = 0;
- if (addr < TASK_SIZE && pte_present_user(pteval)) {
+ if (addr < TASK_SIZE && pte_valid_user(pteval)) {
__sync_icache_dcache(pteval);
ext |= PTE_EXT_NG;
}
diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
index 63479eecbf76..9732b8e11e63 100644
--- a/arch/arm/include/asm/sync_bitops.h
+++ b/arch/arm/include/asm/sync_bitops.h
@@ -2,7 +2,6 @@
#define __ASM_SYNC_BITOPS_H__
#include <asm/bitops.h>
-#include <asm/system.h>
/* sync_bitops functions are equivalent to the SMP implementation of the
* original functions, independently from CONFIG_SMP being defined.
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
deleted file mode 100644
index 368165e33c1c..000000000000
--- a/arch/arm/include/asm/system.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */
-#include <asm/barrier.h>
-#include <asm/compiler.h>
-#include <asm/cmpxchg.h>
-#include <asm/switch_to.h>
-#include <asm/system_info.h>
-#include <asm/system_misc.h>
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 72abdc541f38..12c3a5decc60 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -19,7 +19,7 @@
#include <asm/unified.h>
#include <asm/compiler.h>
-#if __LINUX_ARM_ARCH__ < 6
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
#include <asm-generic/uaccess-unaligned.h>
#else
#define __get_user_unaligned __get_user
diff --git a/arch/arm/include/uapi/asm/hwcap.h b/arch/arm/include/uapi/asm/hwcap.h
index 7dcc10d67253..20d12f230a2f 100644
--- a/arch/arm/include/uapi/asm/hwcap.h
+++ b/arch/arm/include/uapi/asm/hwcap.h
@@ -28,4 +28,13 @@
#define HWCAP_LPAE (1 << 20)
#define HWCAP_EVTSTRM (1 << 21)
+/*
+ * HWCAP2 flags - for elf_hwcap2 (in kernel) and AT_HWCAP2
+ */
+#define HWCAP2_AES (1 << 0)
+#define HWCAP2_PMULL (1 << 1)
+#define HWCAP2_SHA1 (1 << 2)
+#define HWCAP2_SHA2 (1 << 3)
+#define HWCAP2_CRC32 (1 << 4)
+
#endif /* _UAPI__ASMARM_HWCAP_H */