summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/arch_timer.h109
-rw-r--r--arch/arm/include/asm/assembler.h10
-rw-r--r--arch/arm/include/asm/cputype.h33
-rw-r--r--arch/arm/include/asm/cti.h10
-rw-r--r--arch/arm/include/asm/delay.h1
-rw-r--r--arch/arm/include/asm/device.h6
-rw-r--r--arch/arm/include/asm/dma-iommu.h2
-rw-r--r--arch/arm/include/asm/dma.h2
-rw-r--r--arch/arm/include/asm/hardware/coresight.h6
-rw-r--r--arch/arm/include/asm/hardware/gic.h57
-rw-r--r--arch/arm/include/asm/hardware/pl080.h146
-rw-r--r--arch/arm/include/asm/hardware/sp810.h64
-rw-r--r--arch/arm/include/asm/hardware/vic.h57
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h3
-rw-r--r--arch/arm/include/asm/idmap.h1
-rw-r--r--arch/arm/include/asm/kvm_arch_timer.h85
-rw-r--r--arch/arm/include/asm/kvm_arm.h214
-rw-r--r--arch/arm/include/asm/kvm_asm.h83
-rw-r--r--arch/arm/include/asm/kvm_coproc.h47
-rw-r--r--arch/arm/include/asm/kvm_emulate.h72
-rw-r--r--arch/arm/include/asm/kvm_host.h184
-rw-r--r--arch/arm/include/asm/kvm_mmio.h56
-rw-r--r--arch/arm/include/asm/kvm_mmu.h50
-rw-r--r--arch/arm/include/asm/kvm_psci.h23
-rw-r--r--arch/arm/include/asm/kvm_vgic.h221
-rw-r--r--arch/arm/include/asm/mach/arch.h3
-rw-r--r--arch/arm/include/asm/mach/irq.h1
-rw-r--r--arch/arm/include/asm/mach/pci.h1
-rw-r--r--arch/arm/include/asm/mach/time.h30
-rw-r--r--arch/arm/include/asm/memory.h12
-rw-r--r--arch/arm/include/asm/mmu.h8
-rw-r--r--arch/arm/include/asm/mmu_context.h2
-rw-r--r--arch/arm/include/asm/opcodes-sec.h24
-rw-r--r--arch/arm/include/asm/opcodes.h1
-rw-r--r--arch/arm/include/asm/outercache.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h5
-rw-r--r--arch/arm/include/asm/pgtable-3level.h18
-rw-r--r--arch/arm/include/asm/pgtable.h10
-rw-r--r--arch/arm/include/asm/psci.h36
-rw-r--r--arch/arm/include/asm/signal.h18
-rw-r--r--arch/arm/include/asm/smp_scu.h25
-rw-r--r--arch/arm/include/asm/spinlock.h16
-rw-r--r--arch/arm/include/asm/tlbflush.h34
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/include/asm/virt.h4
-rw-r--r--arch/arm/include/asm/xen/events.h5
-rw-r--r--arch/arm/include/asm/xen/page.h4
47 files changed, 1368 insertions, 434 deletions
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index d40229d9a1c9..7ade91d8cc6f 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -1,13 +1,115 @@
#ifndef __ASMARM_ARCH_TIMER_H
#define __ASMARM_ARCH_TIMER_H
+#include <asm/barrier.h>
#include <asm/errno.h>
#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include <clocksource/arm_arch_timer.h>
#ifdef CONFIG_ARM_ARCH_TIMER
int arch_timer_of_register(void);
int arch_timer_sched_clock_init(void);
-struct timecounter *arch_timer_get_timecounter(void);
+
+/*
+ * These register accessors are marked inline so the compiler can
+ * nicely work out which register we want, and chuck away the rest of
+ * the code. At least it does so with a recent GCC (4.6.3).
+ */
+static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
+{
+ if (access == ARCH_TIMER_PHYS_ACCESS) {
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
+ break;
+ case ARCH_TIMER_REG_TVAL:
+ asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
+ break;
+ }
+ }
+
+ if (access == ARCH_TIMER_VIRT_ACCESS) {
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
+ break;
+ case ARCH_TIMER_REG_TVAL:
+ asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
+ break;
+ }
+ }
+
+ isb();
+}
+
+static inline u32 arch_timer_reg_read(const int access, const int reg)
+{
+ u32 val = 0;
+
+ if (access == ARCH_TIMER_PHYS_ACCESS) {
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
+ break;
+ case ARCH_TIMER_REG_TVAL:
+ asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
+ break;
+ }
+ }
+
+ if (access == ARCH_TIMER_VIRT_ACCESS) {
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
+ break;
+ case ARCH_TIMER_REG_TVAL:
+ asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
+ break;
+ }
+ }
+
+ return val;
+}
+
+static inline u32 arch_timer_get_cntfrq(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
+ return val;
+}
+
+static inline u64 arch_counter_get_cntpct(void)
+{
+ u64 cval;
+
+ isb();
+ asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
+ return cval;
+}
+
+static inline u64 arch_counter_get_cntvct(void)
+{
+ u64 cval;
+
+ isb();
+ asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
+ return cval;
+}
+
+static inline void __cpuinit arch_counter_set_user_access(void)
+{
+ u32 cntkctl;
+
+ asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
+
+ /* disable user access to everything */
+ cntkctl &= ~((3 << 8) | (7 << 0));
+
+ asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
+}
#else
static inline int arch_timer_of_register(void)
{
@@ -18,11 +120,6 @@ static inline int arch_timer_sched_clock_init(void)
{
return -ENXIO;
}
-
-static inline struct timecounter *arch_timer_get_timecounter(void)
-{
- return NULL;
-}
#endif
#endif
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index eb87200aa4b5..05ee9eebad6b 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -246,18 +246,14 @@
*
* This macro is intended for forcing the CPU into SVC mode at boot time.
* you cannot return to the original mode.
- *
- * Beware, it also clobers LR.
*/
.macro safe_svcmode_maskall reg:req
#if __LINUX_ARM_ARCH__ >= 6
mrs \reg , cpsr
- mov lr , \reg
- and lr , lr , #MODE_MASK
- cmp lr , #HYP_MODE
- orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT
+ eor \reg, \reg, #HYP_MODE
+ tst \reg, #MODE_MASK
bic \reg , \reg , #MODE_MASK
- orr \reg , \reg , #SVC_MODE
+ orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
THUMB( orr \reg , \reg , #PSR_T_BIT )
bne 1f
orr \reg, \reg, #PSR_A_BIT
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index a59dcb5ab5fc..ad41ec2471e8 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -64,6 +64,24 @@ extern unsigned int processor_id;
#define read_cpuid_ext(reg) 0
#endif
+#define ARM_CPU_IMP_ARM 0x41
+#define ARM_CPU_IMP_INTEL 0x69
+
+#define ARM_CPU_PART_ARM1136 0xB360
+#define ARM_CPU_PART_ARM1156 0xB560
+#define ARM_CPU_PART_ARM1176 0xB760
+#define ARM_CPU_PART_ARM11MPCORE 0xB020
+#define ARM_CPU_PART_CORTEX_A8 0xC080
+#define ARM_CPU_PART_CORTEX_A9 0xC090
+#define ARM_CPU_PART_CORTEX_A5 0xC050
+#define ARM_CPU_PART_CORTEX_A15 0xC0F0
+#define ARM_CPU_PART_CORTEX_A7 0xC070
+
+#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
+#define ARM_CPU_XSCALE_ARCH_V1 0x2000
+#define ARM_CPU_XSCALE_ARCH_V2 0x4000
+#define ARM_CPU_XSCALE_ARCH_V3 0x6000
+
/*
* The CPU ID never changes at run time, so we might as well tell the
* compiler that it's constant. Use this function to read the CPU ID
@@ -74,6 +92,21 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
return read_cpuid(CPUID_ID);
}
+static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
+{
+ return (read_cpuid_id() & 0xFF000000) >> 24;
+}
+
+static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
+{
+ return read_cpuid_id() & 0xFFF0;
+}
+
+static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
+{
+ return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK;
+}
+
static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
{
return read_cpuid(CPUID_CACHETYPE);
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
index f2e5cad3f306..2381199acb7d 100644
--- a/arch/arm/include/asm/cti.h
+++ b/arch/arm/include/asm/cti.h
@@ -2,6 +2,7 @@
#define __ASMARM_CTI_H
#include <asm/io.h>
+#include <asm/hardware/coresight.h>
/* The registers' definition is from section 3.2 of
* Embedded Cross Trigger Revision: r0p0
@@ -35,11 +36,6 @@
#define LOCKACCESS 0xFB0
#define LOCKSTATUS 0xFB4
-/* write this value to LOCKACCESS will unlock the module, and
- * other value will lock the module
- */
-#define LOCKCODE 0xC5ACCE55
-
/**
* struct cti - cross trigger interface struct
* @base: mapped virtual address for the cti base
@@ -146,7 +142,7 @@ static inline void cti_irq_ack(struct cti *cti)
*/
static inline void cti_unlock(struct cti *cti)
{
- __raw_writel(LOCKCODE, cti->base + LOCKACCESS);
+ __raw_writel(CS_LAR_KEY, cti->base + LOCKACCESS);
}
/**
@@ -158,6 +154,6 @@ static inline void cti_unlock(struct cti *cti)
*/
static inline void cti_lock(struct cti *cti)
{
- __raw_writel(~LOCKCODE, cti->base + LOCKACCESS);
+ __raw_writel(~CS_LAR_KEY, cti->base + LOCKACCESS);
}
#endif
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index ab98fdd083bd..720799fd3a81 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -24,6 +24,7 @@ extern struct arm_delay_ops {
void (*delay)(unsigned long);
void (*const_udelay)(unsigned long);
void (*udelay)(unsigned long);
+ bool const_clock;
} arm_delay_ops;
#define __delay(n) arm_delay_ops.delay(n)
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index b69c0d3285f8..dc662fca9230 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -27,4 +27,10 @@ struct pdev_archdata {
#endif
};
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
+#else
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
#endif
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 799b09409fad..a8c56acc8c98 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -7,6 +7,7 @@
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/kmemcheck.h>
+#include <linux/kref.h>
struct dma_iommu_mapping {
/* iommu specific data */
@@ -29,6 +30,7 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping);
+void arm_iommu_detach_device(struct device *dev);
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index 5694a0d6576b..58b8c6a0ab1f 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -105,7 +105,7 @@ extern void set_dma_sg(unsigned int chan, struct scatterlist *sg, int nr_sg);
*/
extern void __set_dma_addr(unsigned int chan, void *addr);
#define set_dma_addr(chan, addr) \
- __set_dma_addr(chan, bus_to_virt(addr))
+ __set_dma_addr(chan, (void *)__bus_to_virt(addr))
/* Set the DMA byte count for this channel
*
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 7ecd793b8f5a..0cf7a6b842ff 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -36,7 +36,7 @@
/* CoreSight Component Registers */
#define CSCR_CLASS 0xff4
-#define UNLOCK_MAGIC 0xc5acce55
+#define CS_LAR_KEY 0xc5acce55
/* ETM control register, "ETM Architecture", 3.3.1 */
#define ETMR_CTRL 0
@@ -147,11 +147,11 @@
#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
#define etm_unlock(t) \
- do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
+ do { etm_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
#define etb_unlock(t) \
- do { etb_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
+ do { etb_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
#endif /* __ASM_HARDWARE_CORESIGHT_H */
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
deleted file mode 100644
index 4b1ce6cd477f..000000000000
--- a/arch/arm/include/asm/hardware/gic.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/gic.h
- *
- * Copyright (C) 2002 ARM Limited, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ASM_ARM_HARDWARE_GIC_H
-#define __ASM_ARM_HARDWARE_GIC_H
-
-#include <linux/compiler.h>
-
-#define GIC_CPU_CTRL 0x00
-#define GIC_CPU_PRIMASK 0x04
-#define GIC_CPU_BINPOINT 0x08
-#define GIC_CPU_INTACK 0x0c
-#define GIC_CPU_EOI 0x10
-#define GIC_CPU_RUNNINGPRI 0x14
-#define GIC_CPU_HIGHPRI 0x18
-
-#define GIC_DIST_CTRL 0x000
-#define GIC_DIST_CTR 0x004
-#define GIC_DIST_ENABLE_SET 0x100
-#define GIC_DIST_ENABLE_CLEAR 0x180
-#define GIC_DIST_PENDING_SET 0x200
-#define GIC_DIST_PENDING_CLEAR 0x280
-#define GIC_DIST_ACTIVE_BIT 0x300
-#define GIC_DIST_PRI 0x400
-#define GIC_DIST_TARGET 0x800
-#define GIC_DIST_CONFIG 0xc00
-#define GIC_DIST_SOFTINT 0xf00
-
-#ifndef __ASSEMBLY__
-#include <linux/irqdomain.h>
-struct device_node;
-
-extern struct irq_chip gic_arch_extn;
-
-void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
- u32 offset, struct device_node *);
-int gic_of_init(struct device_node *node, struct device_node *parent);
-void gic_secondary_init(unsigned int);
-void gic_handle_irq(struct pt_regs *regs);
-void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
-void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
-
-static inline void gic_init(unsigned int nr, int start,
- void __iomem *dist , void __iomem *cpu)
-{
- gic_init_bases(nr, start, dist, cpu, 0, NULL);
-}
-
-#endif
-
-#endif
diff --git a/arch/arm/include/asm/hardware/pl080.h b/arch/arm/include/asm/hardware/pl080.h
deleted file mode 100644
index 4eea2107214b..000000000000
--- a/arch/arm/include/asm/hardware/pl080.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/* arch/arm/include/asm/hardware/pl080.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * ARM PrimeCell PL080 DMA controller
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* Note, there are some Samsung updates to this controller block which
- * make it not entierly compatible with the PL080 specification from
- * ARM. When in doubt, check the Samsung documentation first.
- *
- * The Samsung defines are PL080S, and add an extra control register,
- * the ability to move more than 2^11 counts of data and some extra
- * OneNAND features.
-*/
-
-#ifndef ASM_PL080_H
-#define ASM_PL080_H
-
-#define PL080_INT_STATUS (0x00)
-#define PL080_TC_STATUS (0x04)
-#define PL080_TC_CLEAR (0x08)
-#define PL080_ERR_STATUS (0x0C)
-#define PL080_ERR_CLEAR (0x10)
-#define PL080_RAW_TC_STATUS (0x14)
-#define PL080_RAW_ERR_STATUS (0x18)
-#define PL080_EN_CHAN (0x1c)
-#define PL080_SOFT_BREQ (0x20)
-#define PL080_SOFT_SREQ (0x24)
-#define PL080_SOFT_LBREQ (0x28)
-#define PL080_SOFT_LSREQ (0x2C)
-
-#define PL080_CONFIG (0x30)
-#define PL080_CONFIG_M2_BE (1 << 2)
-#define PL080_CONFIG_M1_BE (1 << 1)
-#define PL080_CONFIG_ENABLE (1 << 0)
-
-#define PL080_SYNC (0x34)
-
-/* Per channel configuration registers */
-
-#define PL080_Cx_STRIDE (0x20)
-#define PL080_Cx_BASE(x) ((0x100 + (x * 0x20)))
-#define PL080_Cx_SRC_ADDR(x) ((0x100 + (x * 0x20)))
-#define PL080_Cx_DST_ADDR(x) ((0x104 + (x * 0x20)))
-#define PL080_Cx_LLI(x) ((0x108 + (x * 0x20)))
-#define PL080_Cx_CONTROL(x) ((0x10C + (x * 0x20)))
-#define PL080_Cx_CONFIG(x) ((0x110 + (x * 0x20)))
-#define PL080S_Cx_CONTROL2(x) ((0x110 + (x * 0x20)))
-#define PL080S_Cx_CONFIG(x) ((0x114 + (x * 0x20)))
-
-#define PL080_CH_SRC_ADDR (0x00)
-#define PL080_CH_DST_ADDR (0x04)
-#define PL080_CH_LLI (0x08)
-#define PL080_CH_CONTROL (0x0C)
-#define PL080_CH_CONFIG (0x10)
-#define PL080S_CH_CONTROL2 (0x10)
-#define PL080S_CH_CONFIG (0x14)
-
-#define PL080_LLI_ADDR_MASK (0x3fffffff << 2)
-#define PL080_LLI_ADDR_SHIFT (2)
-#define PL080_LLI_LM_AHB2 (1 << 0)
-
-#define PL080_CONTROL_TC_IRQ_EN (1 << 31)
-#define PL080_CONTROL_PROT_MASK (0x7 << 28)
-#define PL080_CONTROL_PROT_SHIFT (28)
-#define PL080_CONTROL_PROT_CACHE (1 << 30)
-#define PL080_CONTROL_PROT_BUFF (1 << 29)
-#define PL080_CONTROL_PROT_SYS (1 << 28)
-#define PL080_CONTROL_DST_INCR (1 << 27)
-#define PL080_CONTROL_SRC_INCR (1 << 26)
-#define PL080_CONTROL_DST_AHB2 (1 << 25)
-#define PL080_CONTROL_SRC_AHB2 (1 << 24)
-#define PL080_CONTROL_DWIDTH_MASK (0x7 << 21)
-#define PL080_CONTROL_DWIDTH_SHIFT (21)
-#define PL080_CONTROL_SWIDTH_MASK (0x7 << 18)
-#define PL080_CONTROL_SWIDTH_SHIFT (18)
-#define PL080_CONTROL_DB_SIZE_MASK (0x7 << 15)
-#define PL080_CONTROL_DB_SIZE_SHIFT (15)
-#define PL080_CONTROL_SB_SIZE_MASK (0x7 << 12)
-#define PL080_CONTROL_SB_SIZE_SHIFT (12)
-#define PL080_CONTROL_TRANSFER_SIZE_MASK (0xfff << 0)
-#define PL080_CONTROL_TRANSFER_SIZE_SHIFT (0)
-
-#define PL080_BSIZE_1 (0x0)
-#define PL080_BSIZE_4 (0x1)
-#define PL080_BSIZE_8 (0x2)
-#define PL080_BSIZE_16 (0x3)
-#define PL080_BSIZE_32 (0x4)
-#define PL080_BSIZE_64 (0x5)
-#define PL080_BSIZE_128 (0x6)
-#define PL080_BSIZE_256 (0x7)
-
-#define PL080_WIDTH_8BIT (0x0)
-#define PL080_WIDTH_16BIT (0x1)
-#define PL080_WIDTH_32BIT (0x2)
-
-#define PL080N_CONFIG_ITPROT (1 << 20)
-#define PL080N_CONFIG_SECPROT (1 << 19)
-#define PL080_CONFIG_HALT (1 << 18)
-#define PL080_CONFIG_ACTIVE (1 << 17) /* RO */
-#define PL080_CONFIG_LOCK (1 << 16)
-#define PL080_CONFIG_TC_IRQ_MASK (1 << 15)
-#define PL080_CONFIG_ERR_IRQ_MASK (1 << 14)
-#define PL080_CONFIG_FLOW_CONTROL_MASK (0x7 << 11)
-#define PL080_CONFIG_FLOW_CONTROL_SHIFT (11)
-#define PL080_CONFIG_DST_SEL_MASK (0xf << 6)
-#define PL080_CONFIG_DST_SEL_SHIFT (6)
-#define PL080_CONFIG_SRC_SEL_MASK (0xf << 1)
-#define PL080_CONFIG_SRC_SEL_SHIFT (1)
-#define PL080_CONFIG_ENABLE (1 << 0)
-
-#define PL080_FLOW_MEM2MEM (0x0)
-#define PL080_FLOW_MEM2PER (0x1)
-#define PL080_FLOW_PER2MEM (0x2)
-#define PL080_FLOW_SRC2DST (0x3)
-#define PL080_FLOW_SRC2DST_DST (0x4)
-#define PL080_FLOW_MEM2PER_PER (0x5)
-#define PL080_FLOW_PER2MEM_PER (0x6)
-#define PL080_FLOW_SRC2DST_SRC (0x7)
-
-/* DMA linked list chain structure */
-
-struct pl080_lli {
- u32 src_addr;
- u32 dst_addr;
- u32 next_lli;
- u32 control0;
-};
-
-struct pl080s_lli {
- u32 src_addr;
- u32 dst_addr;
- u32 next_lli;
- u32 control0;
- u32 control1;
-};
-
-#endif /* ASM_PL080_H */
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h
deleted file mode 100644
index 6636430dd0e6..000000000000
--- a/arch/arm/include/asm/hardware/sp810.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/sp810.h
- *
- * ARM PrimeXsys System Controller SP810 header file
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARM_SP810_H
-#define __ASM_ARM_SP810_H
-
-#include <linux/io.h>
-
-/* sysctl registers offset */
-#define SCCTRL 0x000
-#define SCSYSSTAT 0x004
-#define SCIMCTRL 0x008
-#define SCIMSTAT 0x00C
-#define SCXTALCTRL 0x010
-#define SCPLLCTRL 0x014
-#define SCPLLFCTRL 0x018
-#define SCPERCTRL0 0x01C
-#define SCPERCTRL1 0x020
-#define SCPEREN 0x024
-#define SCPERDIS 0x028
-#define SCPERCLKEN 0x02C
-#define SCPERSTAT 0x030
-#define SCSYSID0 0xEE0
-#define SCSYSID1 0xEE4
-#define SCSYSID2 0xEE8
-#define SCSYSID3 0xEEC
-#define SCITCR 0xF00
-#define SCITIR0 0xF04
-#define SCITIR1 0xF08
-#define SCITOR 0xF0C
-#define SCCNTCTRL 0xF10
-#define SCCNTDATA 0xF14
-#define SCCNTSTEP 0xF18
-#define SCPERIPHID0 0xFE0
-#define SCPERIPHID1 0xFE4
-#define SCPERIPHID2 0xFE8
-#define SCPERIPHID3 0xFEC
-#define SCPCELLID0 0xFF0
-#define SCPCELLID1 0xFF4
-#define SCPCELLID2 0xFF8
-#define SCPCELLID3 0xFFC
-
-#define SCCTRL_TIMERENnSEL_SHIFT(n) (15 + ((n) * 2))
-
-static inline void sysctl_soft_reset(void __iomem *base)
-{
- /* switch to slow mode */
- writel(0x2, base + SCCTRL);
-
- /* writing any value to SCSYSSTAT reg will reset system */
- writel(0, base + SCSYSSTAT);
-}
-
-#endif /* __ASM_ARM_SP810_H */
diff --git a/arch/arm/include/asm/hardware/vic.h b/arch/arm/include/asm/hardware/vic.h
deleted file mode 100644
index 2bebad36fc83..000000000000
--- a/arch/arm/include/asm/hardware/vic.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/vic.h
- *
- * Copyright (c) ARM Limited 2003. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __ASM_ARM_HARDWARE_VIC_H
-#define __ASM_ARM_HARDWARE_VIC_H
-
-#define VIC_IRQ_STATUS 0x00
-#define VIC_FIQ_STATUS 0x04
-#define VIC_RAW_STATUS 0x08
-#define VIC_INT_SELECT 0x0c /* 1 = FIQ, 0 = IRQ */
-#define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */
-#define VIC_INT_ENABLE_CLEAR 0x14
-#define VIC_INT_SOFT 0x18
-#define VIC_INT_SOFT_CLEAR 0x1c
-#define VIC_PROTECT 0x20
-#define VIC_PL190_VECT_ADDR 0x30 /* PL190 only */
-#define VIC_PL190_DEF_VECT_ADDR 0x34 /* PL190 only */
-
-#define VIC_VECT_ADDR0 0x100 /* 0 to 15 (0..31 PL192) */
-#define VIC_VECT_CNTL0 0x200 /* 0 to 15 (0..31 PL192) */
-#define VIC_ITCR 0x300 /* VIC test control register */
-
-#define VIC_VECT_CNTL_ENABLE (1 << 5)
-
-#define VIC_PL192_VECT_ADDR 0xF00
-
-#ifndef __ASSEMBLY__
-#include <linux/compiler.h>
-#include <linux/types.h>
-
-struct device_node;
-struct pt_regs;
-
-void __vic_init(void __iomem *base, int irq_start, u32 vic_sources,
- u32 resume_sources, struct device_node *node);
-void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources);
-int vic_of_init(struct device_node *node, struct device_node *parent);
-void vic_handle_irq(struct pt_regs *regs);
-
-#endif /* __ASSEMBLY__ */
-#endif
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index 01169dd723f1..eef55ea9ef00 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -85,6 +85,9 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DSCR_HDBGEN (1 << 14)
#define ARM_DSCR_MDBGEN (1 << 15)
+/* OSLSR os lock model bits */
+#define ARM_OSLSR_OSLM0 (1 << 0)
+
/* opcode2 numbers for the co-processor instructions. */
#define ARM_OP2_BVR 4
#define ARM_OP2_BCR 5
diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h
index bf863edb517d..1a66f907e5cc 100644
--- a/arch/arm/include/asm/idmap.h
+++ b/arch/arm/include/asm/idmap.h
@@ -8,6 +8,7 @@
#define __idmap __section(.idmap.text) noinline notrace
extern pgd_t *idmap_pgd;
+extern pgd_t *hyp_pgd;
void setup_mm_for_reboot(void);
diff --git a/arch/arm/include/asm/kvm_arch_timer.h b/arch/arm/include/asm/kvm_arch_timer.h
new file mode 100644
index 000000000000..68cb9e1dfb81
--- /dev/null
+++ b/arch/arm/include/asm/kvm_arch_timer.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARM_KVM_ARCH_TIMER_H
+#define __ASM_ARM_KVM_ARCH_TIMER_H
+
+#include <linux/clocksource.h>
+#include <linux/hrtimer.h>
+#include <linux/workqueue.h>
+
+struct arch_timer_kvm {
+#ifdef CONFIG_KVM_ARM_TIMER
+ /* Is the timer enabled */
+ bool enabled;
+
+ /* Virtual offset */
+ cycle_t cntvoff;
+#endif
+};
+
+struct arch_timer_cpu {
+#ifdef CONFIG_KVM_ARM_TIMER
+ /* Registers: control register, timer value */
+ u32 cntv_ctl; /* Saved/restored */
+ cycle_t cntv_cval; /* Saved/restored */
+
+ /*
+ * Anything that is not used directly from assembly code goes
+ * here.
+ */
+
+ /* Background timer used when the guest is not running */
+ struct hrtimer timer;
+
+ /* Work queued with the above timer expires */
+ struct work_struct expired;
+
+ /* Background timer active */
+ bool armed;
+
+ /* Timer IRQ */
+ const struct kvm_irq_level *irq;
+#endif
+};
+
+#ifdef CONFIG_KVM_ARM_TIMER
+int kvm_timer_hyp_init(void);
+int kvm_timer_init(struct kvm *kvm);
+void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
+void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
+void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
+#else
+static inline int kvm_timer_hyp_init(void)
+{
+ return 0;
+};
+
+static inline int kvm_timer_init(struct kvm *kvm)
+{
+ return 0;
+}
+
+static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
+static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {}
+static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {}
+static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
new file mode 100644
index 000000000000..7c3d813e15df
--- /dev/null
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_ARM_H__
+#define __ARM_KVM_ARM_H__
+
+#include <linux/types.h>
+
+/* Hyp Configuration Register (HCR) bits */
+#define HCR_TGE (1 << 27)
+#define HCR_TVM (1 << 26)
+#define HCR_TTLB (1 << 25)
+#define HCR_TPU (1 << 24)
+#define HCR_TPC (1 << 23)
+#define HCR_TSW (1 << 22)
+#define HCR_TAC (1 << 21)
+#define HCR_TIDCP (1 << 20)
+#define HCR_TSC (1 << 19)
+#define HCR_TID3 (1 << 18)
+#define HCR_TID2 (1 << 17)
+#define HCR_TID1 (1 << 16)
+#define HCR_TID0 (1 << 15)
+#define HCR_TWE (1 << 14)
+#define HCR_TWI (1 << 13)
+#define HCR_DC (1 << 12)
+#define HCR_BSU (3 << 10)
+#define HCR_BSU_IS (1 << 10)
+#define HCR_FB (1 << 9)
+#define HCR_VA (1 << 8)
+#define HCR_VI (1 << 7)
+#define HCR_VF (1 << 6)
+#define HCR_AMO (1 << 5)
+#define HCR_IMO (1 << 4)
+#define HCR_FMO (1 << 3)
+#define HCR_PTW (1 << 2)
+#define HCR_SWIO (1 << 1)
+#define HCR_VM 1
+
+/*
+ * The bits we set in HCR:
+ * TAC: Trap ACTLR
+ * TSC: Trap SMC
+ * TSW: Trap cache operations by set/way
+ * TWI: Trap WFI
+ * TIDCP: Trap L2CTLR/L2ECTLR
+ * BSU_IS: Upgrade barriers to the inner shareable domain
+ * FB: Force broadcast of all maintainance operations
+ * AMO: Override CPSR.A and enable signaling with VA
+ * IMO: Override CPSR.I and enable signaling with VI
+ * FMO: Override CPSR.F and enable signaling with VF
+ * SWIO: Turn set/way invalidates into set/way clean+invalidate
+ */
+#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
+ HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
+ HCR_SWIO | HCR_TIDCP)
+#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
+
+/* System Control Register (SCTLR) bits */
+#define SCTLR_TE (1 << 30)
+#define SCTLR_EE (1 << 25)
+#define SCTLR_V (1 << 13)
+
+/* Hyp System Control Register (HSCTLR) bits */
+#define HSCTLR_TE (1 << 30)
+#define HSCTLR_EE (1 << 25)
+#define HSCTLR_FI (1 << 21)
+#define HSCTLR_WXN (1 << 19)
+#define HSCTLR_I (1 << 12)
+#define HSCTLR_C (1 << 2)
+#define HSCTLR_A (1 << 1)
+#define HSCTLR_M 1
+#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
+ HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
+
+/* TTBCR and HTCR Registers bits */
+#define TTBCR_EAE (1 << 31)
+#define TTBCR_IMP (1 << 30)
+#define TTBCR_SH1 (3 << 28)
+#define TTBCR_ORGN1 (3 << 26)
+#define TTBCR_IRGN1 (3 << 24)
+#define TTBCR_EPD1 (1 << 23)
+#define TTBCR_A1 (1 << 22)
+#define TTBCR_T1SZ (3 << 16)
+#define TTBCR_SH0 (3 << 12)
+#define TTBCR_ORGN0 (3 << 10)
+#define TTBCR_IRGN0 (3 << 8)
+#define TTBCR_EPD0 (1 << 7)
+#define TTBCR_T0SZ 3
+#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
+
+/* Hyp System Trap Register */
+#define HSTR_T(x) (1 << x)
+#define HSTR_TTEE (1 << 16)
+#define HSTR_TJDBX (1 << 17)
+
+/* Hyp Coprocessor Trap Register */
+#define HCPTR_TCP(x) (1 << x)
+#define HCPTR_TCP_MASK (0x3fff)
+#define HCPTR_TASE (1 << 15)
+#define HCPTR_TTA (1 << 20)
+#define HCPTR_TCPAC (1 << 31)
+
+/* Hyp Debug Configuration Register bits */
+#define HDCR_TDRA (1 << 11)
+#define HDCR_TDOSA (1 << 10)
+#define HDCR_TDA (1 << 9)
+#define HDCR_TDE (1 << 8)
+#define HDCR_HPME (1 << 7)
+#define HDCR_TPM (1 << 6)
+#define HDCR_TPMCR (1 << 5)
+#define HDCR_HPMN_MASK (0x1F)
+
+/*
+ * The architecture supports 40-bit IPA as input to the 2nd stage translations
+ * and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address
+ * space.
+ */
+#define KVM_PHYS_SHIFT (40)
+#define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT)
+#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL)
+#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30))
+#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
+#define S2_PGD_SIZE (1 << S2_PGD_ORDER)
+
+/* Virtualization Translation Control Register (VTCR) bits */
+#define VTCR_SH0 (3 << 12)
+#define VTCR_ORGN0 (3 << 10)
+#define VTCR_IRGN0 (3 << 8)
+#define VTCR_SL0 (3 << 6)
+#define VTCR_S (1 << 4)
+#define VTCR_T0SZ (0xf)
+#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \
+ VTCR_S | VTCR_T0SZ)
+#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
+#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */
+#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */
+#define KVM_VTCR_SL0 VTCR_SL_L1
+/* stage-2 input address range defined as 2^(32-T0SZ) */
+#define KVM_T0SZ (32 - KVM_PHYS_SHIFT)
+#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ)
+#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S)
+
+/* Virtualization Translation Table Base Register (VTTBR) bits */
+#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */
+#define VTTBR_X (14 - KVM_T0SZ)
+#else
+#define VTTBR_X (5 - KVM_T0SZ)
+#endif
+#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
+#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_VMID_SHIFT (48LLU)
+#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
+
+/* Hyp Syndrome Register (HSR) bits */
+#define HSR_EC_SHIFT (26)
+#define HSR_EC (0x3fU << HSR_EC_SHIFT)
+#define HSR_IL (1U << 25)
+#define HSR_ISS (HSR_IL - 1)
+#define HSR_ISV_SHIFT (24)
+#define HSR_ISV (1U << HSR_ISV_SHIFT)
+#define HSR_SRT_SHIFT (16)
+#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
+#define HSR_FSC (0x3f)
+#define HSR_FSC_TYPE (0x3c)
+#define HSR_SSE (1 << 21)
+#define HSR_WNR (1 << 6)
+#define HSR_CV_SHIFT (24)
+#define HSR_CV (1U << HSR_CV_SHIFT)
+#define HSR_COND_SHIFT (20)
+#define HSR_COND (0xfU << HSR_COND_SHIFT)
+
+#define FSC_FAULT (0x04)
+#define FSC_PERM (0x0c)
+
+/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
+#define HPFAR_MASK (~0xf)
+
+#define HSR_EC_UNKNOWN (0x00)
+#define HSR_EC_WFI (0x01)
+#define HSR_EC_CP15_32 (0x03)
+#define HSR_EC_CP15_64 (0x04)
+#define HSR_EC_CP14_MR (0x05)
+#define HSR_EC_CP14_LS (0x06)
+#define HSR_EC_CP_0_13 (0x07)
+#define HSR_EC_CP10_ID (0x08)
+#define HSR_EC_JAZELLE (0x09)
+#define HSR_EC_BXJ (0x0A)
+#define HSR_EC_CP14_64 (0x0C)
+#define HSR_EC_SVC_HYP (0x11)
+#define HSR_EC_HVC (0x12)
+#define HSR_EC_SMC (0x13)
+#define HSR_EC_IABT (0x20)
+#define HSR_EC_IABT_HYP (0x21)
+#define HSR_EC_DABT (0x24)
+#define HSR_EC_DABT_HYP (0x25)
+
+#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
+
+#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
new file mode 100644
index 000000000000..e4956f4e23e1
--- /dev/null
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_ASM_H__
+#define __ARM_KVM_ASM_H__
+
+/* 0 is reserved as an invalid value. */
+#define c0_MPIDR 1 /* MultiProcessor ID Register */
+#define c0_CSSELR 2 /* Cache Size Selection Register */
+#define c1_SCTLR 3 /* System Control Register */
+#define c1_ACTLR 4 /* Auxilliary Control Register */
+#define c1_CPACR 5 /* Coprocessor Access Control */
+#define c2_TTBR0 6 /* Translation Table Base Register 0 */
+#define c2_TTBR0_high 7 /* TTBR0 top 32 bits */
+#define c2_TTBR1 8 /* Translation Table Base Register 1 */
+#define c2_TTBR1_high 9 /* TTBR1 top 32 bits */
+#define c2_TTBCR 10 /* Translation Table Base Control R. */
+#define c3_DACR 11 /* Domain Access Control Register */
+#define c5_DFSR 12 /* Data Fault Status Register */
+#define c5_IFSR 13 /* Instruction Fault Status Register */
+#define c5_ADFSR 14 /* Auxilary Data Fault Status R */
+#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */
+#define c6_DFAR 16 /* Data Fault Address Register */
+#define c6_IFAR 17 /* Instruction Fault Address Register */
+#define c9_L2CTLR 18 /* Cortex A15 L2 Control Register */
+#define c10_PRRR 19 /* Primary Region Remap Register */
+#define c10_NMRR 20 /* Normal Memory Remap Register */
+#define c12_VBAR 21 /* Vector Base Address Register */
+#define c13_CID 22 /* Context ID Register */
+#define c13_TID_URW 23 /* Thread ID, User R/W */
+#define c13_TID_URO 24 /* Thread ID, User R/O */
+#define c13_TID_PRIV 25 /* Thread ID, Privileged */
+#define c14_CNTKCTL 26 /* Timer Control Register (PL1) */
+#define NR_CP15_REGS 27 /* Number of regs (incl. invalid) */
+
+#define ARM_EXCEPTION_RESET 0
+#define ARM_EXCEPTION_UNDEFINED 1
+#define ARM_EXCEPTION_SOFTWARE 2
+#define ARM_EXCEPTION_PREF_ABORT 3
+#define ARM_EXCEPTION_DATA_ABORT 4
+#define ARM_EXCEPTION_IRQ 5
+#define ARM_EXCEPTION_FIQ 6
+#define ARM_EXCEPTION_HVC 7
+
+#ifndef __ASSEMBLY__
+struct kvm;
+struct kvm_vcpu;
+
+extern char __kvm_hyp_init[];
+extern char __kvm_hyp_init_end[];
+
+extern char __kvm_hyp_exit[];
+extern char __kvm_hyp_exit_end[];
+
+extern char __kvm_hyp_vector[];
+
+extern char __kvm_hyp_code_start[];
+extern char __kvm_hyp_code_end[];
+
+extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+
+extern void __kvm_flush_vm_context(void);
+extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+
+extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+#endif
+
+#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
new file mode 100644
index 000000000000..4917c2f7e459
--- /dev/null
+++ b/arch/arm/include/asm/kvm_coproc.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2012 Rusty Russell IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_COPROC_H__
+#define __ARM_KVM_COPROC_H__
+#include <linux/kvm_host.h>
+
+void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
+
+struct kvm_coproc_target_table {
+ unsigned target;
+ const struct coproc_reg *table;
+ size_t num;
+};
+void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
+
+int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+void kvm_coproc_table_init(void);
+
+struct kvm_one_reg;
+int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
+#endif /* __ARM_KVM_COPROC_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
new file mode 100644
index 000000000000..fd611996bfb5
--- /dev/null
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_EMULATE_H__
+#define __ARM_KVM_EMULATE_H__
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmio.h>
+
+u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
+u32 *vcpu_spsr(struct kvm_vcpu *vcpu);
+
+int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run);
+void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
+void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
+void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+
+static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
+{
+ return 1;
+}
+
+static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu)
+{
+ return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc;
+}
+
+static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu)
+{
+ return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr;
+}
+
+static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
+{
+ *vcpu_cpsr(vcpu) |= PSR_T_BIT;
+}
+
+static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
+{
+ unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
+ return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
+}
+
+static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
+{
+ unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
+ return cpsr_mode > USR_MODE;;
+}
+
+static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
+{
+ return reg == 15;
+}
+
+#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
new file mode 100644
index 000000000000..d1736a53b12d
--- /dev/null
+++ b/arch/arm/include/asm/kvm_host.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_HOST_H__
+#define __ARM_KVM_HOST_H__
+
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmio.h>
+#include <asm/fpstate.h>
+#include <asm/kvm_arch_timer.h>
+
+#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
+#define KVM_USER_MEM_SLOTS 32
+#define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_HAVE_ONE_REG
+
+#define KVM_VCPU_MAX_FEATURES 1
+
+/* We don't currently support large pages. */
+#define KVM_HPAGE_GFN_SHIFT(x) 0
+#define KVM_NR_PAGE_SIZES 1
+#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
+
+#include <asm/kvm_vgic.h>
+
+struct kvm_vcpu;
+u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
+int kvm_target_cpu(void);
+int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
+void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
+
+struct kvm_arch {
+ /* VTTBR value associated with below pgd and vmid */
+ u64 vttbr;
+
+ /* Timer */
+ struct arch_timer_kvm timer;
+
+ /*
+ * Anything that is not used directly from assembly code goes
+ * here.
+ */
+
+ /* The VMID generation used for the virt. memory system */
+ u64 vmid_gen;
+ u32 vmid;
+
+ /* Stage-2 page table */
+ pgd_t *pgd;
+
+ /* Interrupt controller */
+ struct vgic_dist vgic;
+};
+
+#define KVM_NR_MEM_OBJS 40
+
+/*
+ * We don't want allocation failures within the mmu code, so we preallocate
+ * enough memory for a single page fault in a cache.
+ */
+struct kvm_mmu_memory_cache {
+ int nobjs;
+ void *objects[KVM_NR_MEM_OBJS];
+};
+
+struct kvm_vcpu_arch {
+ struct kvm_regs regs;
+
+ int target; /* Processor target */
+ DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
+
+ /* System control coprocessor (cp15) */
+ u32 cp15[NR_CP15_REGS];
+
+ /* The CPU type we expose to the VM */
+ u32 midr;
+
+ /* Exception Information */
+ u32 hsr; /* Hyp Syndrome Register */
+ u32 hxfar; /* Hyp Data/Inst Fault Address Register */
+ u32 hpfar; /* Hyp IPA Fault Address Register */
+
+ /* Floating point registers (VFP and Advanced SIMD/NEON) */
+ struct vfp_hard_struct vfp_guest;
+ struct vfp_hard_struct *vfp_host;
+
+ /* VGIC state */
+ struct vgic_cpu vgic_cpu;
+ struct arch_timer_cpu timer_cpu;
+
+ /*
+ * Anything that is not used directly from assembly code goes
+ * here.
+ */
+ /* dcache set/way operation pending */
+ int last_pcpu;
+ cpumask_t require_dcache_flush;
+
+ /* Don't run the guest on this vcpu */
+ bool pause;
+
+ /* IO related fields */
+ struct kvm_decode mmio_decode;
+
+ /* Interrupt related fields */
+ u32 irq_lines; /* IRQ and FIQ levels */
+
+ /* Hyp exception information */
+ u32 hyp_pc; /* PC when exception was taken from Hyp mode */
+
+ /* Cache some mmu pages needed inside spinlock regions */
+ struct kvm_mmu_memory_cache mmu_page_cache;
+
+ /* Detect first run of a vcpu */
+ bool has_run_once;
+};
+
+struct kvm_vm_stat {
+ u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+ u32 halt_wakeup;
+};
+
+struct kvm_vcpu_init;
+int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+ const struct kvm_vcpu_init *init);
+unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
+struct kvm_one_reg;
+int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+u64 kvm_call_hyp(void *hypfn, ...);
+void force_vm_exit(const cpumask_t *mask);
+
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+struct kvm;
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
+int kvm_unmap_hva_range(struct kvm *kvm,
+ unsigned long start, unsigned long end);
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+
+unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
+
+/* We do not have shadow page tables, hence the empty hooks */
+static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ return 0;
+}
+
+static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ return 0;
+}
+
+struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
+struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
+
+int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
+struct kvm_one_reg;
+int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+
+#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
new file mode 100644
index 000000000000..adcc0d7d3175
--- /dev/null
+++ b/arch/arm/include/asm/kvm_mmio.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_MMIO_H__
+#define __ARM_KVM_MMIO_H__
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+
+struct kvm_decode {
+ unsigned long rt;
+ bool sign_extend;
+};
+
+/*
+ * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
+ * which is an anonymous type. Use our own type instead.
+ */
+struct kvm_exit_mmio {
+ phys_addr_t phys_addr;
+ u8 data[8];
+ u32 len;
+ bool is_write;
+};
+
+static inline void kvm_prepare_mmio(struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
+{
+ run->mmio.phys_addr = mmio->phys_addr;
+ run->mmio.len = mmio->len;
+ run->mmio.is_write = mmio->is_write;
+ memcpy(run->mmio.data, mmio->data, mmio->len);
+ run->exit_reason = KVM_EXIT_MMIO;
+}
+
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ phys_addr_t fault_ipa);
+
+#endif /* __ARM_KVM_MMIO_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
new file mode 100644
index 000000000000..421a20b34874
--- /dev/null
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_MMU_H__
+#define __ARM_KVM_MMU_H__
+
+int create_hyp_mappings(void *from, void *to);
+int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
+void free_hyp_pmds(void);
+
+int kvm_alloc_stage2_pgd(struct kvm *kvm);
+void kvm_free_stage2_pgd(struct kvm *kvm);
+int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+ phys_addr_t pa, unsigned long size);
+
+int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
+
+phys_addr_t kvm_mmu_get_httbr(void);
+int kvm_mmu_init(void);
+void kvm_clear_hyp_idmap(void);
+
+static inline bool kvm_is_write_fault(unsigned long hsr)
+{
+ unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
+ if (hsr_ec == HSR_EC_IABT)
+ return false;
+ else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR))
+ return false;
+ else
+ return true;
+}
+
+#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h
new file mode 100644
index 000000000000..9a83d98bf170
--- /dev/null
+++ b/arch/arm/include/asm/kvm_psci.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2012 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_KVM_PSCI_H__
+#define __ARM_KVM_PSCI_H__
+
+bool kvm_psci_call(struct kvm_vcpu *vcpu);
+
+#endif /* __ARM_KVM_PSCI_H__ */
diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h
new file mode 100644
index 000000000000..ab97207d9cd3
--- /dev/null
+++ b/arch/arm/include/asm/kvm_vgic.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARM_KVM_VGIC_H
+#define __ASM_ARM_KVM_VGIC_H
+
+#include <linux/kernel.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/irqreturn.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/irqchip/arm-gic.h>
+
+#define VGIC_NR_IRQS 128
+#define VGIC_NR_SGIS 16
+#define VGIC_NR_PPIS 16
+#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
+#define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS)
+#define VGIC_MAX_CPUS KVM_MAX_VCPUS
+#define VGIC_MAX_LRS (1 << 6)
+
+/* Sanity checks... */
+#if (VGIC_MAX_CPUS > 8)
+#error Invalid number of CPU interfaces
+#endif
+
+#if (VGIC_NR_IRQS & 31)
+#error "VGIC_NR_IRQS must be a multiple of 32"
+#endif
+
+#if (VGIC_NR_IRQS > 1024)
+#error "VGIC_NR_IRQS must be <= 1024"
+#endif
+
+/*
+ * The GIC distributor registers describing interrupts have two parts:
+ * - 32 per-CPU interrupts (SGI + PPI)
+ * - a bunch of shared interrupts (SPI)
+ */
+struct vgic_bitmap {
+ union {
+ u32 reg[VGIC_NR_PRIVATE_IRQS / 32];
+ DECLARE_BITMAP(reg_ul, VGIC_NR_PRIVATE_IRQS);
+ } percpu[VGIC_MAX_CPUS];
+ union {
+ u32 reg[VGIC_NR_SHARED_IRQS / 32];
+ DECLARE_BITMAP(reg_ul, VGIC_NR_SHARED_IRQS);
+ } shared;
+};
+
+struct vgic_bytemap {
+ u32 percpu[VGIC_MAX_CPUS][VGIC_NR_PRIVATE_IRQS / 4];
+ u32 shared[VGIC_NR_SHARED_IRQS / 4];
+};
+
+struct vgic_dist {
+#ifdef CONFIG_KVM_ARM_VGIC
+ spinlock_t lock;
+ bool ready;
+
+ /* Virtual control interface mapping */
+ void __iomem *vctrl_base;
+
+ /* Distributor and vcpu interface mapping in the guest */
+ phys_addr_t vgic_dist_base;
+ phys_addr_t vgic_cpu_base;
+
+ /* Distributor enabled */
+ u32 enabled;
+
+ /* Interrupt enabled (one bit per IRQ) */
+ struct vgic_bitmap irq_enabled;
+
+ /* Interrupt 'pin' level */
+ struct vgic_bitmap irq_state;
+
+ /* Level-triggered interrupt in progress */
+ struct vgic_bitmap irq_active;
+
+ /* Interrupt priority. Not used yet. */
+ struct vgic_bytemap irq_priority;
+
+ /* Level/edge triggered */
+ struct vgic_bitmap irq_cfg;
+
+ /* Source CPU per SGI and target CPU */
+ u8 irq_sgi_sources[VGIC_MAX_CPUS][VGIC_NR_SGIS];
+
+ /* Target CPU for each IRQ */
+ u8 irq_spi_cpu[VGIC_NR_SHARED_IRQS];
+ struct vgic_bitmap irq_spi_target[VGIC_MAX_CPUS];
+
+ /* Bitmap indicating which CPU has something pending */
+ unsigned long irq_pending_on_cpu;
+#endif
+};
+
+struct vgic_cpu {
+#ifdef CONFIG_KVM_ARM_VGIC
+ /* per IRQ to LR mapping */
+ u8 vgic_irq_lr_map[VGIC_NR_IRQS];
+
+ /* Pending interrupts on this VCPU */
+ DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
+ DECLARE_BITMAP( pending_shared, VGIC_NR_SHARED_IRQS);
+
+ /* Bitmap of used/free list registers */
+ DECLARE_BITMAP( lr_used, VGIC_MAX_LRS);
+
+ /* Number of list registers on this CPU */
+ int nr_lr;
+
+ /* CPU vif control registers for world switch */
+ u32 vgic_hcr;
+ u32 vgic_vmcr;
+ u32 vgic_misr; /* Saved only */
+ u32 vgic_eisr[2]; /* Saved only */
+ u32 vgic_elrsr[2]; /* Saved only */
+ u32 vgic_apr;
+ u32 vgic_lr[VGIC_MAX_LRS];
+#endif
+};
+
+#define LR_EMPTY 0xff
+
+struct kvm;
+struct kvm_vcpu;
+struct kvm_run;
+struct kvm_exit_mmio;
+
+#ifdef CONFIG_KVM_ARM_VGIC
+int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr);
+int kvm_vgic_hyp_init(void);
+int kvm_vgic_init(struct kvm *kvm);
+int kvm_vgic_create(struct kvm *kvm);
+int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
+void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
+int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
+ bool level);
+int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
+bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio);
+
+#define irqchip_in_kernel(k) (!!((k)->arch.vgic.vctrl_base))
+#define vgic_initialized(k) ((k)->arch.vgic.ready)
+
+#else
+static inline int kvm_vgic_hyp_init(void)
+{
+ return 0;
+}
+
+static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
+{
+ return 0;
+}
+
+static inline int kvm_vgic_init(struct kvm *kvm)
+{
+ return 0;
+}
+
+static inline int kvm_vgic_create(struct kvm *kvm)
+{
+ return 0;
+}
+
+static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {}
+
+static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid,
+ unsigned int irq_num, bool level)
+{
+ return 0;
+}
+
+static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_exit_mmio *mmio)
+{
+ return false;
+}
+
+static inline int irqchip_in_kernel(struct kvm *kvm)
+{
+ return 0;
+}
+
+static inline bool vgic_initialized(struct kvm *kvm)
+{
+ return true;
+}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 917d4fcfd9b4..308ad7d6f98b 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -12,7 +12,6 @@
struct tag;
struct meminfo;
-struct sys_timer;
struct pt_regs;
struct smp_operations;
#ifdef CONFIG_SMP
@@ -48,7 +47,7 @@ struct machine_desc {
void (*map_io)(void);/* IO mapping function */
void (*init_early)(void);
void (*init_irq)(void);
- struct sys_timer *timer; /* system tick timer */
+ void (*init_time)(void);
void (*init_machine)(void);
void (*init_late)(void);
#ifdef CONFIG_MULTI_IRQ_HANDLER
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index 15cb035309f7..18c883023339 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -22,6 +22,7 @@ extern int show_fiq_list(struct seq_file *, int);
#ifdef CONFIG_MULTI_IRQ_HANDLER
extern void (*handle_arch_irq)(struct pt_regs *);
+extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
#endif
/*
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index db9fedb57f2c..5cf2e979b4be 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -23,6 +23,7 @@ struct hw_pci {
#endif
struct pci_ops *ops;
int nr_controllers;
+ void **private_data;
int (*setup)(int nr, struct pci_sys_data *);
struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
void (*preinit)(void);
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index 6ca945f534ab..90c12e1e695c 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -10,36 +10,6 @@
#ifndef __ASM_ARM_MACH_TIME_H
#define __ASM_ARM_MACH_TIME_H
-/*
- * This is our kernel timer structure.
- *
- * - init
- * Initialise the kernels jiffy timer source, claim interrupt
- * using setup_irq. This is called early on during initialisation
- * while interrupts are still disabled on the local CPU.
- * - suspend
- * Suspend the kernel jiffy timer source, if necessary. This
- * is called with interrupts disabled, after all normal devices
- * have been suspended. If no action is required, set this to
- * NULL.
- * - resume
- * Resume the kernel jiffy timer source, if necessary. This
- * is called with interrupts disabled before any normal devices
- * are resumed. If no action is required, set this to NULL.
- * - offset
- * Return the timer offset in microseconds since the last timer
- * interrupt. Note: this must take account of any unprocessed
- * timer interrupt which may be pending.
- */
-struct sys_timer {
- void (*init)(void);
- void (*suspend)(void);
- void (*resume)(void);
-#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
- unsigned long (*offset)(void);
-#endif
-};
-
extern void timer_tick(void);
struct timespec;
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 73cf03aa981e..57870ab313c5 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -36,23 +36,23 @@
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
-#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
-#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3)
+#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
+#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
/*
* The maximum size of a 26-bit user space task.
*/
-#define TASK_SIZE_26 UL(0x04000000)
+#define TASK_SIZE_26 (UL(1) << 26)
/*
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
*/
#ifndef CONFIG_THUMB2_KERNEL
-#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024)
+#define MODULES_VADDR (PAGE_OFFSET - SZ_16M)
#else
/* smaller range for Thumb-2 symbols relocation (2^24)*/
-#define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024)
+#define MODULES_VADDR (PAGE_OFFSET - SZ_8M)
#endif
#if TASK_SIZE > MODULES_VADDR
@@ -245,6 +245,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define __bus_to_pfn(x) __phys_to_pfn(x)
#endif
+#ifdef CONFIG_VIRT_TO_BUS
static inline __deprecated unsigned long virt_to_bus(void *x)
{
return __virt_to_bus((unsigned long)x);
@@ -254,6 +255,7 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
{
return (void *)__bus_to_virt(x);
}
+#endif
/*
* Conversion between a struct page and a physical address.
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 9f77e7804f3b..e3d55547e755 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -5,15 +5,15 @@
typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
- u64 id;
+ atomic64_t id;
#endif
- unsigned int vmalloc_seq;
+ unsigned int vmalloc_seq;
} mm_context_t;
#ifdef CONFIG_CPU_HAS_ASID
#define ASID_BITS 8
#define ASID_MASK ((~0ULL) << ASID_BITS)
-#define ASID(mm) ((mm)->context.id & ~ASID_MASK)
+#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK)
#else
#define ASID(mm) (0)
#endif
@@ -26,7 +26,7 @@ typedef struct {
* modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
*/
typedef struct {
- unsigned long end_brk;
+ unsigned long end_brk;
} mm_context_t;
#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index e1f644bc7cc5..863a6611323c 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -25,7 +25,7 @@ void __check_vmalloc_seq(struct mm_struct *mm);
#ifdef CONFIG_CPU_HAS_ASID
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
-#define init_new_context(tsk,mm) ({ mm->context.id = 0; })
+#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
#else /* !CONFIG_CPU_HAS_ASID */
diff --git a/arch/arm/include/asm/opcodes-sec.h b/arch/arm/include/asm/opcodes-sec.h
new file mode 100644
index 000000000000..bc3a9174417c
--- /dev/null
+++ b/arch/arm/include/asm/opcodes-sec.h
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#ifndef __ASM_ARM_OPCODES_SEC_H
+#define __ASM_ARM_OPCODES_SEC_H
+
+#include <asm/opcodes.h>
+
+#define __SMC(imm4) __inst_arm_thumb32( \
+ 0xE1600070 | (((imm4) & 0xF) << 0), \
+ 0xF7F08000 | (((imm4) & 0xF) << 16) \
+)
+
+#endif /* __ASM_ARM_OPCODES_SEC_H */
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h
index 74e211a6fb24..e796c598513b 100644
--- a/arch/arm/include/asm/opcodes.h
+++ b/arch/arm/include/asm/opcodes.h
@@ -10,6 +10,7 @@
#define __ASM_ARM_OPCODES_H
#ifndef __ASSEMBLY__
+#include <linux/linkage.h>
extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
#endif
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 53426c66352a..12f71a190422 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -92,6 +92,7 @@ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
static inline void outer_flush_all(void) { }
static inline void outer_inv_all(void) { }
static inline void outer_disable(void) { }
+static inline void outer_resume(void) { }
#endif
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index d7952824c5c4..18f5cef82ad5 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -32,6 +32,9 @@
#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
#define PMD_BIT4 (_AT(pmdval_t, 0))
#define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
+#define PMD_APTABLE_SHIFT (61)
+#define PMD_APTABLE (_AT(pgdval_t, 3) << PGD_APTABLE_SHIFT)
+#define PMD_PXNTABLE (_AT(pgdval_t, 1) << 59)
/*
* - section
@@ -41,9 +44,11 @@
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
#define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
+#define PMD_SECT_AP1 (_AT(pmdval_t, 1) << 6)
#define PMD_SECT_TEX(x) (_AT(pmdval_t, 0))
/*
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index a3f37929940a..6ef8afd1b64c 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -104,11 +104,29 @@
*/
#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
+/*
+ * 2nd stage PTE definitions for LPAE.
+ */
+#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */
+#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
+#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
+#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
+#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */
+
+/*
+ * Hyp-mode PL2 PTE definitions for LPAE.
+ */
+#define L_PTE_HYP L_PTE_USER
+
#ifndef __ASSEMBLY__
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!(pud_val(pud) & 2))
#define pud_present(pud) (pud_val(pud))
+#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+ PMD_TYPE_TABLE)
+#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
+ PMD_TYPE_SECT)
#define pud_clear(pudp) \
do { \
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 9c82f988c0e3..80d6fc4dbe4a 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -70,6 +70,9 @@ extern void __pgd_error(const char *file, int line, pgd_t);
extern pgprot_t pgprot_user;
extern pgprot_t pgprot_kernel;
+extern pgprot_t pgprot_hyp_device;
+extern pgprot_t pgprot_s2;
+extern pgprot_t pgprot_s2_device;
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
@@ -82,6 +85,10 @@ extern pgprot_t pgprot_kernel;
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
#define PAGE_KERNEL_EXEC pgprot_kernel
+#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP)
+#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
+#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
+#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY)
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
@@ -240,7 +247,8 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE;
+ const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
+ L_PTE_NONE | L_PTE_VALID;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
new file mode 100644
index 000000000000..ce0dbe7c1625
--- /dev/null
+++ b/arch/arm/include/asm/psci.h
@@ -0,0 +1,36 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#ifndef __ASM_ARM_PSCI_H
+#define __ASM_ARM_PSCI_H
+
+#define PSCI_POWER_STATE_TYPE_STANDBY 0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+
+struct psci_power_state {
+ u16 id;
+ u8 type;
+ u8 affinity_level;
+};
+
+struct psci_operations {
+ int (*cpu_suspend)(struct psci_power_state state,
+ unsigned long entry_point);
+ int (*cpu_off)(struct psci_power_state state);
+ int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+ int (*migrate)(unsigned long cpuid);
+};
+
+extern struct psci_operations psci_ops;
+
+#endif /* __ASM_ARM_PSCI_H */
diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h
index 9a0ea6ab988f..c0eb412aff04 100644
--- a/arch/arm/include/asm/signal.h
+++ b/arch/arm/include/asm/signal.h
@@ -16,23 +16,7 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
-struct old_sigaction {
- __sighandler_t sa_handler;
- old_sigset_t sa_mask;
- unsigned long sa_flags;
- __sigrestore_t sa_restorer;
-};
-
-struct sigaction {
- __sighandler_t sa_handler;
- unsigned long sa_flags;
- __sigrestore_t sa_restorer;
- sigset_t sa_mask; /* mask last for extensibility */
-};
-
-struct k_sigaction {
- struct sigaction sa;
-};
+#define __ARCH_HAS_SA_RESTORER
#include <asm/sigcontext.h>
#endif
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h
index 4eb6d005ffaa..18d169373612 100644
--- a/arch/arm/include/asm/smp_scu.h
+++ b/arch/arm/include/asm/smp_scu.h
@@ -6,9 +6,32 @@
#define SCU_PM_POWEROFF 3
#ifndef __ASSEMBLER__
+
+#include <asm/cputype.h>
+
+static inline bool scu_a9_has_base(void)
+{
+ return read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
+}
+
+static inline unsigned long scu_a9_get_base(void)
+{
+ unsigned long pa;
+
+ asm("mrc p15, 4, %0, c15, c0, 0" : "=r" (pa));
+
+ return pa;
+}
+
unsigned int scu_get_core_count(void __iomem *);
-void scu_enable(void __iomem *);
int scu_power_mode(void __iomem *, unsigned int);
+
+#ifdef CONFIG_SMP
+void scu_enable(void __iomem *scu_base);
+#else
+static inline void scu_enable(void __iomem *scu_base) {}
+#endif
+
#endif
#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index b4ca707d0a69..6220e9fdf4c7 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -119,22 +119,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- unsigned long tmp;
- u32 slock;
-
smp_mb();
-
- __asm__ __volatile__(
-" mov %1, #1\n"
-"1: ldrex %0, [%2]\n"
-" uadd16 %0, %0, %1\n"
-" strex %1, %0, [%2]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (slock), "=&r" (tmp)
- : "r" (&lock->slock)
- : "cc");
-
+ lock->tickets.owner++;
dsb_sev();
}
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 6e924d3a77eb..4db8c8820f0d 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -34,10 +34,13 @@
#define TLB_V6_D_ASID (1 << 17)
#define TLB_V6_I_ASID (1 << 18)
+#define TLB_V6_BP (1 << 19)
+
/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
-#define TLB_V7_UIS_PAGE (1 << 19)
-#define TLB_V7_UIS_FULL (1 << 20)
-#define TLB_V7_UIS_ASID (1 << 21)
+#define TLB_V7_UIS_PAGE (1 << 20)
+#define TLB_V7_UIS_FULL (1 << 21)
+#define TLB_V7_UIS_ASID (1 << 22)
+#define TLB_V7_UIS_BP (1 << 23)
#define TLB_BARRIER (1 << 28)
#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
@@ -150,7 +153,8 @@
#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
TLB_V6_I_FULL | TLB_V6_D_FULL | \
TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
- TLB_V6_I_ASID | TLB_V6_D_ASID)
+ TLB_V6_I_ASID | TLB_V6_D_ASID | \
+ TLB_V6_BP)
#ifdef CONFIG_CPU_TLB_V6
# define v6wbi_possible_flags v6wbi_tlb_flags
@@ -166,9 +170,11 @@
#endif
#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
- TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
+ TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
+ TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
- TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
+ TLB_V6_U_FULL | TLB_V6_U_PAGE | \
+ TLB_V6_U_ASID | TLB_V6_BP)
#ifdef CONFIG_CPU_TLB_V7
@@ -430,6 +436,20 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
}
}
+static inline void local_flush_bp_all(void)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_V7_UIS_BP))
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
+ else if (tlb_flag(TLB_V6_BP))
+ asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
+
+ if (tlb_flag(TLB_BARRIER))
+ isb();
+}
+
/*
* flush_pmd_entry
*
@@ -480,6 +500,7 @@ static inline void clean_pmd_entry(void *pmd)
#define flush_tlb_kernel_page local_flush_tlb_kernel_page
#define flush_tlb_range local_flush_tlb_range
#define flush_tlb_kernel_range local_flush_tlb_kernel_range
+#define flush_bp_all local_flush_bp_all
#else
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
@@ -487,6 +508,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
extern void flush_tlb_kernel_page(unsigned long kaddr);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_bp_all(void);
#endif
/*
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 21a2700d2957..e4ddfb39ca34 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -26,8 +26,6 @@
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_SYS_RT_SIGACTION
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_OLD_MMAP
#define __ARCH_WANT_SYS_OLD_SELECT
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 86164df86cb4..50af92bac737 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -24,9 +24,9 @@
/*
* Flag indicating that the kernel was not entered in the same mode on every
* CPU. The zImage loader stashes this value in an SPSR, so we need an
- * architecturally defined flag bit here (the N flag, as it happens)
+ * architecturally defined flag bit here.
*/
-#define BOOT_CPU_MODE_MISMATCH (1<<31)
+#define BOOT_CPU_MODE_MISMATCH PSR_N_BIT
#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
index 94b4e9020b02..8b1f37bfeeec 100644
--- a/arch/arm/include/asm/xen/events.h
+++ b/arch/arm/include/asm/xen/events.h
@@ -2,6 +2,7 @@
#define _ASM_ARM_XEN_EVENTS_H
#include <asm/ptrace.h>
+#include <asm/atomic.h>
enum ipi_vector {
XEN_PLACEHOLDER_VECTOR,
@@ -15,4 +16,8 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return raw_irqs_disabled_flags(regs->ARM_cpsr);
}
+#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \
+ atomic64_t, \
+ counter), (val))
+
#endif /* _ASM_ARM_XEN_EVENTS_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index c6b9096cef95..30cdacb675af 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -1,6 +1,7 @@
#ifndef _ASM_ARM_XEN_PAGE_H
#define _ASM_ARM_XEN_PAGE_H
+#include <asm/mach/map.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -86,4 +87,7 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
return __set_phys_to_machine(pfn, mfn);
}
+
+#define xen_remap(cookie, size) __arm_ioremap((cookie), (size), MT_MEMORY);
+
#endif /* _ASM_ARM_XEN_PAGE_H */