summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/common/gic.c25
-rw-r--r--arch/arm/include/asm/memory.h2
-rw-r--r--arch/arm/include/asm/smp_scu.h8
-rw-r--r--arch/arm/kernel/smp_scu.c2
-rw-r--r--arch/arm/mach-exynos/Kconfig2
-rw-r--r--arch/arm/mach-highbank/highbank.c3
-rw-r--r--arch/arm/mach-highbank/sysregs.h4
-rw-r--r--arch/arm/mach-realview/include/mach/irqs-eb.h2
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/avr32/include/asm/dma-mapping.h10
-rw-r--r--arch/blackfin/include/asm/dma-mapping.h10
-rw-r--r--arch/c6x/include/asm/dma-mapping.h15
-rw-r--r--arch/cris/include/asm/dma-mapping.h10
-rw-r--r--arch/frv/include/asm/dma-mapping.h15
-rw-r--r--arch/m68k/include/asm/dma-mapping.h10
-rw-r--r--arch/m68k/include/asm/processor.h1
-rw-r--r--arch/mips/bcm47xx/Kconfig3
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-l2c.c9
-rw-r--r--arch/mips/include/asm/dsp.h2
-rw-r--r--arch/mips/include/asm/inst.h1
-rw-r--r--arch/mips/include/asm/mach-pnx833x/war.h2
-rw-r--r--arch/mips/include/asm/pgtable-64.h1
-rw-r--r--arch/mips/include/uapi/asm/Kbuild1
-rw-r--r--arch/mips/include/uapi/asm/break.h (renamed from arch/mips/include/asm/break.h)0
-rw-r--r--arch/mips/kernel/ftrace.c36
-rw-r--r--arch/mips/kernel/mcount.S7
-rw-r--r--arch/mips/kernel/vpe.c2
-rw-r--r--arch/mips/lantiq/irq.c2
-rw-r--r--arch/mips/lib/delay.c2
-rw-r--r--arch/mips/mm/ioremap.c6
-rw-r--r--arch/mips/mm/mmap.c6
-rw-r--r--arch/mips/netlogic/xlr/setup.c5
-rw-r--r--arch/mips/pci/pci-ar71xx.c2
-rw-r--r--arch/mips/pci/pci-ar724x.c2
-rw-r--r--arch/mn10300/include/asm/dma-mapping.h15
-rw-r--r--arch/parisc/include/asm/dma-mapping.h15
-rw-r--r--arch/powerpc/mm/hash_low_64.S62
-rw-r--r--arch/tile/Kconfig2
-rw-r--r--arch/tile/include/asm/io.h6
-rw-r--r--arch/tile/include/asm/irqflags.h32
-rw-r--r--arch/tile/include/uapi/arch/interrupts_32.h394
-rw-r--r--arch/tile/include/uapi/arch/interrupts_64.h346
-rw-r--r--arch/tile/kernel/intvec_64.S4
-rw-r--r--arch/tile/kernel/process.c2
-rw-r--r--arch/tile/kernel/reboot.c2
-rw-r--r--arch/tile/kernel/setup.c5
-rw-r--r--arch/tile/kernel/stack.c3
-rw-r--r--arch/tile/lib/cacheflush.c2
-rw-r--r--arch/tile/lib/cpumask.c2
-rw-r--r--arch/tile/lib/exports.c2
-rw-r--r--arch/tile/mm/homecache.c1
-rw-r--r--arch/x86/ia32/ia32entry.S4
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c7
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c2
-rw-r--r--arch/x86/tools/insn_sanity.c10
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h15
57 files changed, 685 insertions, 464 deletions
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 36ae03a3f5d1..87dfa9026c5b 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -351,6 +351,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
irq_set_chained_handler(irq, gic_handle_cascade_irq);
}
+static u8 gic_get_cpumask(struct gic_chip_data *gic)
+{
+ void __iomem *base = gic_data_dist_base(gic);
+ u32 mask, i;
+
+ for (i = mask = 0; i < 32; i += 4) {
+ mask = readl_relaxed(base + GIC_DIST_TARGET + i);
+ mask |= mask >> 16;
+ mask |= mask >> 8;
+ if (mask)
+ break;
+ }
+
+ if (!mask)
+ pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
+
+ return mask;
+}
+
static void __init gic_dist_init(struct gic_chip_data *gic)
{
unsigned int i;
@@ -369,7 +388,9 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
/*
* Set all global interrupts to this CPU only.
*/
- cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0);
+ cpumask = gic_get_cpumask(gic);
+ cpumask |= cpumask << 8;
+ cpumask |= cpumask << 16;
for (i = 32; i < gic_irqs; i += 4)
writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
@@ -400,7 +421,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
* Get what the GIC says our CPU mask is.
*/
BUG_ON(cpu >= NR_GIC_CPU_IF);
- cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0);
+ cpu_mask = gic_get_cpumask(gic);
gic_cpu_map[cpu] = cpu_mask;
/*
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 73cf03aa981e..1c4df27f9332 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -37,7 +37,7 @@
*/
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
-#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3)
+#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
/*
* The maximum size of a 26-bit user space task.
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h
index 4eb6d005ffaa..86dff32a0737 100644
--- a/arch/arm/include/asm/smp_scu.h
+++ b/arch/arm/include/asm/smp_scu.h
@@ -7,8 +7,14 @@
#ifndef __ASSEMBLER__
unsigned int scu_get_core_count(void __iomem *);
-void scu_enable(void __iomem *);
int scu_power_mode(void __iomem *, unsigned int);
+
+#ifdef CONFIG_SMP
+void scu_enable(void __iomem *scu_base);
+#else
+static inline void scu_enable(void __iomem *scu_base) {}
+#endif
+
#endif
#endif
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index b9f015e843d8..45eac87ed66a 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -75,7 +75,7 @@ void scu_enable(void __iomem *scu_base)
int scu_power_mode(void __iomem *scu_base, unsigned int mode)
{
unsigned int val;
- int cpu = cpu_logical_map(smp_processor_id());
+ int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
if (mode > 3 || mode == 1 || cpu > 3)
return -EINVAL;
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index e103c290bc9e..85afb031b676 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT
select CPU_EXYNOS4210
select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD
select PINCTRL
- select PINCTRL_EXYNOS4
+ select PINCTRL_EXYNOS
select USE_OF
help
Machine support for Samsung Exynos4 machine with device tree enabled.
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 981dc1e1da51..e6c061282939 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -28,6 +28,7 @@
#include <asm/arch_timer.h>
#include <asm/cacheflush.h>
+#include <asm/cputype.h>
#include <asm/smp_plat.h>
#include <asm/smp_twd.h>
#include <asm/hardware/arm_timer.h>
@@ -59,7 +60,7 @@ static void __init highbank_scu_map_io(void)
void highbank_set_cpu_jump(int cpu, void *jump_addr)
{
- cpu = cpu_logical_map(cpu);
+ cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu));
__cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
diff --git a/arch/arm/mach-highbank/sysregs.h b/arch/arm/mach-highbank/sysregs.h
index 70af9d13fcef..5995df7f2622 100644
--- a/arch/arm/mach-highbank/sysregs.h
+++ b/arch/arm/mach-highbank/sysregs.h
@@ -37,7 +37,7 @@ extern void __iomem *sregs_base;
static inline void highbank_set_core_pwr(void)
{
- int cpu = cpu_logical_map(smp_processor_id());
+ int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
if (scu_base_addr)
scu_power_mode(scu_base_addr, SCU_PM_POWEROFF);
else
@@ -46,7 +46,7 @@ static inline void highbank_set_core_pwr(void)
static inline void highbank_clear_core_pwr(void)
{
- int cpu = cpu_logical_map(smp_processor_id());
+ int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
if (scu_base_addr)
scu_power_mode(scu_base_addr, SCU_PM_NORMAL);
else
diff --git a/arch/arm/mach-realview/include/mach/irqs-eb.h b/arch/arm/mach-realview/include/mach/irqs-eb.h
index d6b5073692d2..44754230fdcc 100644
--- a/arch/arm/mach-realview/include/mach/irqs-eb.h
+++ b/arch/arm/mach-realview/include/mach/irqs-eb.h
@@ -115,7 +115,7 @@
/*
* Only define NR_IRQS if less than NR_IRQS_EB
*/
-#define NR_IRQS_EB (IRQ_EB_GIC_START + 96)
+#define NR_IRQS_EB (IRQ_EB_GIC_START + 128)
#if defined(CONFIG_MACH_REALVIEW_EB) \
&& (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB))
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 076c26d43864..dda3904dc64c 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (is_coherent || nommu())
addr = __alloc_simple_buffer(dev, size, gfp, &page);
- else if (gfp & GFP_ATOMIC)
+ else if (!(gfp & __GFP_WAIT))
addr = __alloc_from_pool(size, &page);
else if (!IS_ENABLED(CONFIG_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h
index aaf5199d8fcb..b3d18f9f3e8d 100644
--- a/arch/avr32/include/asm/dma-mapping.h
+++ b/arch/avr32/include/asm/dma-mapping.h
@@ -336,4 +336,14 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
#endif /* __ASM_AVR32_DMA_MAPPING_H */
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h
index bbf461076a0a..054d9ec57d9d 100644
--- a/arch/blackfin/include/asm/dma-mapping.h
+++ b/arch/blackfin/include/asm/dma-mapping.h
@@ -154,4 +154,14 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
_dma_sync((dma_addr_t)vaddr, size, dir);
}
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
#endif /* _BLACKFIN_DMA_MAPPING_H */
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h
index 3c694065030f..88bd0d899bdb 100644
--- a/arch/c6x/include/asm/dma-mapping.h
+++ b/arch/c6x/include/asm/dma-mapping.h
@@ -89,4 +89,19 @@ extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
+/* Not supported for now */
+static inline int dma_mmap_coherent(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size)
+{
+ return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
+{
+ return -EINVAL;
+}
+
#endif /* _ASM_C6X_DMA_MAPPING_H */
diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h
index 8588b2ccf854..2f0f654f1b44 100644
--- a/arch/cris/include/asm/dma-mapping.h
+++ b/arch/cris/include/asm/dma-mapping.h
@@ -158,5 +158,15 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
{
}
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
#endif
diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h
index dfb811002c64..1746a2b8e6e7 100644
--- a/arch/frv/include/asm/dma-mapping.h
+++ b/arch/frv/include/asm/dma-mapping.h
@@ -132,4 +132,19 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
flush_write_buffers();
}
+/* Not supported for now */
+static inline int dma_mmap_coherent(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size)
+{
+ return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
+{
+ return -EINVAL;
+}
+
#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h
index 3e6b8445af6a..292805f0762e 100644
--- a/arch/m68k/include/asm/dma-mapping.h
+++ b/arch/m68k/include/asm/dma-mapping.h
@@ -115,4 +115,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
#include <asm-generic/dma-mapping-broken.h>
#endif
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
#endif /* _M68K_DMA_MAPPING_H */
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index ae700f49e51d..b0768a657920 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -130,7 +130,6 @@ extern int handle_kernel_fault(struct pt_regs *regs);
#define start_thread(_regs, _pc, _usp) \
do { \
(_regs)->pc = (_pc); \
- ((struct switch_stack *)(_regs))[-1].a6 = 0; \
setframeformat(_regs); \
if (current->mm) \
(_regs)->d5 = current->mm->start_data; \
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
index d7af29f1fcf0..ba611927749b 100644
--- a/arch/mips/bcm47xx/Kconfig
+++ b/arch/mips/bcm47xx/Kconfig
@@ -8,8 +8,10 @@ config BCM47XX_SSB
select SSB_DRIVER_EXTIF
select SSB_EMBEDDED
select SSB_B43_PCI_BRIDGE if PCI
+ select SSB_DRIVER_PCICORE if PCI
select SSB_PCICORE_HOSTMODE if PCI
select SSB_DRIVER_GPIO
+ select GPIOLIB
default y
help
Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support.
@@ -25,6 +27,7 @@ config BCM47XX_BCMA
select BCMA_HOST_PCI if PCI
select BCMA_DRIVER_PCI_HOSTMODE if PCI
select BCMA_DRIVER_GPIO
+ select GPIOLIB
default y
help
Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
index 9f883bf76953..33b72144db31 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -30,6 +30,7 @@
* measurement, and debugging facilities.
*/
+#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-l2c.h>
@@ -285,22 +286,22 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
*/
static void fault_in(uint64_t addr, int len)
{
- volatile char *ptr;
- volatile char dummy;
+ char *ptr;
+
/*
* Adjust addr and length so we get all cache lines even for
* small ranges spanning two cache lines.
*/
len += addr & CVMX_CACHE_LINE_MASK;
addr &= ~CVMX_CACHE_LINE_MASK;
- ptr = (volatile char *)cvmx_phys_to_ptr(addr);
+ ptr = cvmx_phys_to_ptr(addr);
/*
* Invalidate L1 cache to make sure all loads result in data
* being in L2.
*/
CVMX_DCACHE_INVALIDATE;
while (len > 0) {
- dummy += *ptr;
+ ACCESS_ONCE(*ptr);
len -= CVMX_CACHE_LINE_SIZE;
ptr += CVMX_CACHE_LINE_SIZE;
}
diff --git a/arch/mips/include/asm/dsp.h b/arch/mips/include/asm/dsp.h
index e9bfc0813c72..7bfad0520e25 100644
--- a/arch/mips/include/asm/dsp.h
+++ b/arch/mips/include/asm/dsp.h
@@ -16,7 +16,7 @@
#include <asm/mipsregs.h>
#define DSP_DEFAULT 0x00000000
-#define DSP_MASK 0x3ff
+#define DSP_MASK 0x3f
#define __enable_dsp_hazard() \
do { \
diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h
index ab84064283db..33c34adbecfa 100644
--- a/arch/mips/include/asm/inst.h
+++ b/arch/mips/include/asm/inst.h
@@ -353,6 +353,7 @@ union mips_instruction {
struct u_format u_format;
struct c_format c_format;
struct r_format r_format;
+ struct p_format p_format;
struct f_format f_format;
struct ma_format ma_format;
struct b_format b_format;
diff --git a/arch/mips/include/asm/mach-pnx833x/war.h b/arch/mips/include/asm/mach-pnx833x/war.h
index edaa06d9d492..e410df4e1b3a 100644
--- a/arch/mips/include/asm/mach-pnx833x/war.h
+++ b/arch/mips/include/asm/mach-pnx833x/war.h
@@ -21,4 +21,4 @@
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
-#endif /* __ASM_MIPS_MACH_PNX8550_WAR_H */
+#endif /* __ASM_MIPS_MACH_PNX833X_WAR_H */
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index c63191055e69..013d5f781263 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -230,6 +230,7 @@ static inline void pud_clear(pud_t *pudp)
#else
#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
#define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
#endif
#define __pgd_offset(address) pgd_index(address)
diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild
index a1a0452ac185..77d4fb33f75a 100644
--- a/arch/mips/include/uapi/asm/Kbuild
+++ b/arch/mips/include/uapi/asm/Kbuild
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
header-y += auxvec.h
header-y += bitsperlong.h
+header-y += break.h
header-y += byteorder.h
header-y += cachectl.h
header-y += errno.h
diff --git a/arch/mips/include/asm/break.h b/arch/mips/include/uapi/asm/break.h
index 9161e684cb4c..9161e684cb4c 100644
--- a/arch/mips/include/asm/break.h
+++ b/arch/mips/include/uapi/asm/break.h
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 6a2d758dd8e9..83fa1460e294 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -25,6 +25,12 @@
#define MCOUNT_OFFSET_INSNS 4
#endif
+/* Arch override because MIPS doesn't need to run this from stop_machine() */
+void arch_ftrace_update_code(int command)
+{
+ ftrace_modify_all_code(command);
+}
+
/*
* Check if the address is in kernel space
*
@@ -89,6 +95,24 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
return 0;
}
+#ifndef CONFIG_64BIT
+static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
+ unsigned int new_code2)
+{
+ int faulted;
+
+ safe_store_code(new_code1, ip, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+ ip += 4;
+ safe_store_code(new_code2, ip, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+ flush_icache_range(ip, ip + 8); /* original ip + 12 */
+ return 0;
+}
+#endif
+
/*
* The details about the calling site of mcount on MIPS
*
@@ -131,8 +155,18 @@ int ftrace_make_nop(struct module *mod,
* needed.
*/
new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
-
+#ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new);
+#else
+ /*
+ * On 32 bit MIPS platforms, gcc adds a stack adjust
+ * instruction in the delay slot after the branch to
+ * mcount and expects mcount to restore the sp on return.
+ * This is based on a legacy API and does nothing but
+ * waste instructions so it's being removed at runtime.
+ */
+ return ftrace_modify_code_2(ip, new, INSN_NOP);
+#endif
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 4c968e7efb74..165867673357 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -46,9 +46,8 @@
PTR_L a5, PT_R9(sp)
PTR_L a6, PT_R10(sp)
PTR_L a7, PT_R11(sp)
- PTR_ADDIU sp, PT_SIZE
#else
- PTR_ADDIU sp, (PT_SIZE + 8)
+ PTR_ADDIU sp, PT_SIZE
#endif
.endm
@@ -69,7 +68,9 @@ NESTED(ftrace_caller, PT_SIZE, ra)
.globl _mcount
_mcount:
b ftrace_stub
- nop
+ addiu sp,sp,8
+
+ /* When tracing is activated, it calls ftrace_caller+8 (aka here) */
lw t1, function_trace_stop
bnez t1, ftrace_stub
nop
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index eec690af6581..147cec19621d 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -705,7 +705,7 @@ static int vpe_run(struct vpe * v)
printk(KERN_WARNING
"VPE loader: TC %d is already in use.\n",
- t->index);
+ v->tc->index);
return -ENOEXEC;
}
} else {
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index f36acd1b3808..a7935bf0fecb 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -408,7 +408,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
#endif
/* tell oprofile which irq to use */
- cp0_perfcount_irq = LTQ_PERF_IRQ;
+ cp0_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
/*
* if the timer irq is not one of the mips irqs we need to
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c
index dc81ca8dc0dd..288f7954988d 100644
--- a/arch/mips/lib/delay.c
+++ b/arch/mips/lib/delay.c
@@ -21,7 +21,7 @@ void __delay(unsigned long loops)
" .set noreorder \n"
" .align 3 \n"
"1: bnez %0, 1b \n"
-#if __SIZEOF_LONG__ == 4
+#if BITS_PER_LONG == 32
" subu %0, 1 \n"
#else
" dsubu %0, 1 \n"
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 7657fd21cd3f..cacfd31e8ec9 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -190,9 +190,3 @@ void __iounmap(const volatile void __iomem *addr)
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(__iounmap);
-
-int __virt_addr_valid(const volatile void *kaddr)
-{
- return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
-}
-EXPORT_SYMBOL_GPL(__virt_addr_valid);
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index d9be7540a6be..7e5fe2790d8a 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -192,3 +192,9 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
return ret;
}
+
+int __virt_addr_valid(const volatile void *kaddr)
+{
+ return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
+}
+EXPORT_SYMBOL_GPL(__virt_addr_valid);
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
index 4e7f49d3d5a8..c5ce6992ac4c 100644
--- a/arch/mips/netlogic/xlr/setup.c
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -193,8 +193,11 @@ static void nlm_init_node(void)
void __init prom_init(void)
{
- int i, *argv, *envp; /* passed as 32 bit ptrs */
+ int *argv, *envp; /* passed as 32 bit ptrs */
struct psb_info *prom_infop;
+#ifdef CONFIG_SMP
+ int i;
+#endif
/* truncate to 32 bit and sign extend all args */
argv = (int *)(long)(int)fw_arg1;
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index 1552522b8718..6eaa4f2d0e38 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -24,7 +24,7 @@
#include <asm/mach-ath79/pci.h>
#define AR71XX_PCI_MEM_BASE 0x10000000
-#define AR71XX_PCI_MEM_SIZE 0x08000000
+#define AR71XX_PCI_MEM_SIZE 0x07000000
#define AR71XX_PCI_WIN0_OFFS 0x10000000
#define AR71XX_PCI_WIN1_OFFS 0x11000000
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 86d77a666458..c11c75be2d7e 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -21,7 +21,7 @@
#define AR724X_PCI_CTRL_SIZE 0x100
#define AR724X_PCI_MEM_BASE 0x10000000
-#define AR724X_PCI_MEM_SIZE 0x08000000
+#define AR724X_PCI_MEM_SIZE 0x04000000
#define AR724X_PCI_REG_RESET 0x18
#define AR724X_PCI_REG_INT_STATUS 0x4c
diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h
index c1be4397b1ed..a18abfc558eb 100644
--- a/arch/mn10300/include/asm/dma-mapping.h
+++ b/arch/mn10300/include/asm/dma-mapping.h
@@ -168,4 +168,19 @@ void dma_cache_sync(void *vaddr, size_t size,
mn10300_dcache_flush_inv();
}
+/* Not supported for now */
+static inline int dma_mmap_coherent(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size)
+{
+ return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
+{
+ return -EINVAL;
+}
+
#endif
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index 467bbd510eac..106b395688e1 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -238,4 +238,19 @@ void * sba_get_iommu(struct parisc_device *dev);
/* At the moment, we panic on error for IOMMU resource exaustion */
#define dma_mapping_error(dev, x) 0
+/* This API cannot be supported on PA-RISC */
+static inline int dma_mmap_coherent(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size)
+{
+ return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
+{
+ return -EINVAL;
+}
+
#endif
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 56585086413a..7443481a315c 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -115,11 +115,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
sldi r29,r5,SID_SHIFT - VPN_SHIFT
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
or r29,r28,r29
-
- /* Calculate hash value for primary slot and store it in r28 */
- rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
- rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
- xor r28,r5,r0
+ /*
+ * Calculate hash value for primary slot and store it in r28
+ * r3 = va, r5 = vsid
+ * r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
+ */
+ rldicl r0,r3,64-12,48
+ xor r28,r5,r0 /* hash */
b 4f
3: /* Calc vpn and put it in r29 */
@@ -130,11 +132,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
/*
* calculate hash value for primary slot and
* store it in r28 for 1T segment
+ * r3 = va, r5 = vsid
*/
- rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
- clrldi r5,r5,40 /* vsid & 0xffffff */
- rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
- xor r28,r28,r5
+ sldi r28,r5,25 /* vsid << 25 */
+ /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
+ rldicl r0,r3,64-12,36
+ xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */
@@ -407,11 +410,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
*/
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
or r29,r28,r29
-
- /* Calculate hash value for primary slot and store it in r28 */
- rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
- rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
- xor r28,r5,r0
+ /*
+ * Calculate hash value for primary slot and store it in r28
+ * r3 = va, r5 = vsid
+ * r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
+ */
+ rldicl r0,r3,64-12,48
+ xor r28,r5,r0 /* hash */
b 4f
3: /* Calc vpn and put it in r29 */
@@ -426,11 +431,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
/*
* Calculate hash value for primary slot and
* store it in r28 for 1T segment
+ * r3 = va, r5 = vsid
*/
- rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
- clrldi r5,r5,40 /* vsid & 0xffffff */
- rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
- xor r28,r28,r5
+ sldi r28,r5,25 /* vsid << 25 */
+ /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
+ rldicl r0,r3,64-12,36
+ xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */
@@ -752,25 +758,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
or r29,r28,r29
- /* Calculate hash value for primary slot and store it in r28 */
- rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
- rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */
- xor r28,r5,r0
+ /* Calculate hash value for primary slot and store it in r28
+ * r3 = va, r5 = vsid
+ * r0 = (va >> 16) & ((1ul << (28 - 16)) -1)
+ */
+ rldicl r0,r3,64-16,52
+ xor r28,r5,r0 /* hash */
b 4f
3: /* Calc vpn and put it in r29 */
sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
or r29,r28,r29
-
/*
* calculate hash value for primary slot and
* store it in r28 for 1T segment
+ * r3 = va, r5 = vsid
*/
- rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
- clrldi r5,r5,40 /* vsid & 0xffffff */
- rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */
- xor r28,r28,r5
+ sldi r28,r5,25 /* vsid << 25 */
+ /* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */
+ rldicl r0,r3,64-16,40
+ xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 875d008828b8..1bb7ad4aeff4 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -140,6 +140,8 @@ config ARCH_DEFCONFIG
source "init/Kconfig"
+source "kernel/Kconfig.freezer"
+
menu "Tilera-specific configuration"
config NR_CPUS
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index 2a9b293fece6..31672918064c 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -250,7 +250,9 @@ static inline void writeq(u64 val, unsigned long addr)
#define iowrite32 writel
#define iowrite64 writeq
-static inline void memset_io(void *dst, int val, size_t len)
+#if CHIP_HAS_MMIO() || defined(CONFIG_PCI)
+
+static inline void memset_io(volatile void *dst, int val, size_t len)
{
int x;
BUG_ON((unsigned long)dst & 0x3);
@@ -277,6 +279,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
writel(*(u32 *)(src + x), dst + x);
}
+#endif
+
/*
* The Tile architecture does not support IOPORT, even with PCI.
* Unfortunately we can't yet simply not declare these methods,
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index b4e96fef2cf8..241c0bb60b12 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -18,32 +18,20 @@
#include <arch/interrupts.h>
#include <arch/chip.h>
-#if !defined(__tilegx__) && defined(__ASSEMBLY__)
-
/*
* The set of interrupts we want to allow when interrupts are nominally
* disabled. The remainder are effectively "NMI" interrupts from
* the point of view of the generic Linux code. Note that synchronous
* interrupts (aka "non-queued") are not blocked by the mask in any case.
*/
-#if CHIP_HAS_AUX_PERF_COUNTERS()
-#define LINUX_MASKABLE_INTERRUPTS_HI \
- (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
-#else
-#define LINUX_MASKABLE_INTERRUPTS_HI \
- (~(INT_MASK_HI(INT_PERF_COUNT)))
-#endif
-
-#else
-
-#if CHIP_HAS_AUX_PERF_COUNTERS()
-#define LINUX_MASKABLE_INTERRUPTS \
- (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT)))
-#else
#define LINUX_MASKABLE_INTERRUPTS \
- (~(INT_MASK(INT_PERF_COUNT)))
-#endif
+ (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT)))
+#if CHIP_HAS_SPLIT_INTR_MASK()
+/* The same macro, but for the two 32-bit SPRs separately. */
+#define LINUX_MASKABLE_INTERRUPTS_LO (-1)
+#define LINUX_MASKABLE_INTERRUPTS_HI \
+ (~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32))))
#endif
#ifndef __ASSEMBLY__
@@ -126,7 +114,7 @@
* to know our current state.
*/
DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
-#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR)
+#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
/* Disable interrupts. */
#define arch_local_irq_disable() \
@@ -165,7 +153,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Prevent the given interrupt from being enabled next time we enable irqs. */
#define arch_local_irq_mask(interrupt) \
- (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt))
+ (__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt)))
/* Prevent the given interrupt from being enabled immediately. */
#define arch_local_irq_mask_now(interrupt) do { \
@@ -175,7 +163,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Allow the given interrupt to be enabled next time we enable irqs. */
#define arch_local_irq_unmask(interrupt) \
- (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt))
+ (__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt)))
/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
#define arch_local_irq_unmask_now(interrupt) do { \
@@ -250,7 +238,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Disable interrupts. */
#define IRQ_DISABLE(tmp0, tmp1) \
{ \
- movei tmp0, -1; \
+ movei tmp0, LINUX_MASKABLE_INTERRUPTS_LO; \
moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \
}; \
{ \
diff --git a/arch/tile/include/uapi/arch/interrupts_32.h b/arch/tile/include/uapi/arch/interrupts_32.h
index 96b5710505b6..2efe3f68b2d6 100644
--- a/arch/tile/include/uapi/arch/interrupts_32.h
+++ b/arch/tile/include/uapi/arch/interrupts_32.h
@@ -15,6 +15,7 @@
#ifndef __ARCH_INTERRUPTS_H__
#define __ARCH_INTERRUPTS_H__
+#ifndef __KERNEL__
/** Mask for an interrupt. */
/* Note: must handle breaking interrupts into high and low words manually. */
#define INT_MASK_LO(intno) (1 << (intno))
@@ -23,6 +24,7 @@
#ifndef __ASSEMBLER__
#define INT_MASK(intno) (1ULL << (intno))
#endif
+#endif
/** Where a given interrupt executes */
@@ -92,216 +94,216 @@
#ifndef __ASSEMBLER__
#define QUEUED_INTERRUPTS ( \
- INT_MASK(INT_MEM_ERROR) | \
- INT_MASK(INT_DMATLB_MISS) | \
- INT_MASK(INT_DMATLB_ACCESS) | \
- INT_MASK(INT_SNITLB_MISS) | \
- INT_MASK(INT_SN_NOTIFY) | \
- INT_MASK(INT_SN_FIREWALL) | \
- INT_MASK(INT_IDN_FIREWALL) | \
- INT_MASK(INT_UDN_FIREWALL) | \
- INT_MASK(INT_TILE_TIMER) | \
- INT_MASK(INT_IDN_TIMER) | \
- INT_MASK(INT_UDN_TIMER) | \
- INT_MASK(INT_DMA_NOTIFY) | \
- INT_MASK(INT_IDN_CA) | \
- INT_MASK(INT_UDN_CA) | \
- INT_MASK(INT_IDN_AVAIL) | \
- INT_MASK(INT_UDN_AVAIL) | \
- INT_MASK(INT_PERF_COUNT) | \
- INT_MASK(INT_INTCTRL_3) | \
- INT_MASK(INT_INTCTRL_2) | \
- INT_MASK(INT_INTCTRL_1) | \
- INT_MASK(INT_INTCTRL_0) | \
- INT_MASK(INT_BOOT_ACCESS) | \
- INT_MASK(INT_WORLD_ACCESS) | \
- INT_MASK(INT_I_ASID) | \
- INT_MASK(INT_D_ASID) | \
- INT_MASK(INT_DMA_ASID) | \
- INT_MASK(INT_SNI_ASID) | \
- INT_MASK(INT_DMA_CPL) | \
- INT_MASK(INT_SN_CPL) | \
- INT_MASK(INT_DOUBLE_FAULT) | \
- INT_MASK(INT_AUX_PERF_COUNT) | \
+ (1ULL << INT_MEM_ERROR) | \
+ (1ULL << INT_DMATLB_MISS) | \
+ (1ULL << INT_DMATLB_ACCESS) | \
+ (1ULL << INT_SNITLB_MISS) | \
+ (1ULL << INT_SN_NOTIFY) | \
+ (1ULL << INT_SN_FIREWALL) | \
+ (1ULL << INT_IDN_FIREWALL) | \
+ (1ULL << INT_UDN_FIREWALL) | \
+ (1ULL << INT_TILE_TIMER) | \
+ (1ULL << INT_IDN_TIMER) | \
+ (1ULL << INT_UDN_TIMER) | \
+ (1ULL << INT_DMA_NOTIFY) | \
+ (1ULL << INT_IDN_CA) | \
+ (1ULL << INT_UDN_CA) | \
+ (1ULL << INT_IDN_AVAIL) | \
+ (1ULL << INT_UDN_AVAIL) | \
+ (1ULL << INT_PERF_COUNT) | \
+ (1ULL << INT_INTCTRL_3) | \
+ (1ULL << INT_INTCTRL_2) | \
+ (1ULL << INT_INTCTRL_1) | \
+ (1ULL << INT_INTCTRL_0) | \
+ (1ULL << INT_BOOT_ACCESS) | \
+ (1ULL << INT_WORLD_ACCESS) | \
+ (1ULL << INT_I_ASID) | \
+ (1ULL << INT_D_ASID) | \
+ (1ULL << INT_DMA_ASID) | \
+ (1ULL << INT_SNI_ASID) | \
+ (1ULL << INT_DMA_CPL) | \
+ (1ULL << INT_SN_CPL) | \
+ (1ULL << INT_DOUBLE_FAULT) | \
+ (1ULL << INT_AUX_PERF_COUNT) | \
0)
#define NONQUEUED_INTERRUPTS ( \
- INT_MASK(INT_ITLB_MISS) | \
- INT_MASK(INT_ILL) | \
- INT_MASK(INT_GPV) | \
- INT_MASK(INT_SN_ACCESS) | \
- INT_MASK(INT_IDN_ACCESS) | \
- INT_MASK(INT_UDN_ACCESS) | \
- INT_MASK(INT_IDN_REFILL) | \
- INT_MASK(INT_UDN_REFILL) | \
- INT_MASK(INT_IDN_COMPLETE) | \
- INT_MASK(INT_UDN_COMPLETE) | \
- INT_MASK(INT_SWINT_3) | \
- INT_MASK(INT_SWINT_2) | \
- INT_MASK(INT_SWINT_1) | \
- INT_MASK(INT_SWINT_0) | \
- INT_MASK(INT_UNALIGN_DATA) | \
- INT_MASK(INT_DTLB_MISS) | \
- INT_MASK(INT_DTLB_ACCESS) | \
- INT_MASK(INT_SN_STATIC_ACCESS) | \
+ (1ULL << INT_ITLB_MISS) | \
+ (1ULL << INT_ILL) | \
+ (1ULL << INT_GPV) | \
+ (1ULL << INT_SN_ACCESS) | \
+ (1ULL << INT_IDN_ACCESS) | \
+ (1ULL << INT_UDN_ACCESS) | \
+ (1ULL << INT_IDN_REFILL) | \
+ (1ULL << INT_UDN_REFILL) | \
+ (1ULL << INT_IDN_COMPLETE) | \
+ (1ULL << INT_UDN_COMPLETE) | \
+ (1ULL << INT_SWINT_3) | \
+ (1ULL << INT_SWINT_2) | \
+ (1ULL << INT_SWINT_1) | \
+ (1ULL << INT_SWINT_0) | \
+ (1ULL << INT_UNALIGN_DATA) | \
+ (1ULL << INT_DTLB_MISS) | \
+ (1ULL << INT_DTLB_ACCESS) | \
+ (1ULL << INT_SN_STATIC_ACCESS) | \
0)
#define CRITICAL_MASKED_INTERRUPTS ( \
- INT_MASK(INT_MEM_ERROR) | \
- INT_MASK(INT_DMATLB_MISS) | \
- INT_MASK(INT_DMATLB_ACCESS) | \
- INT_MASK(INT_SNITLB_MISS) | \
- INT_MASK(INT_SN_NOTIFY) | \
- INT_MASK(INT_SN_FIREWALL) | \
- INT_MASK(INT_IDN_FIREWALL) | \
- INT_MASK(INT_UDN_FIREWALL) | \
- INT_MASK(INT_TILE_TIMER) | \
- INT_MASK(INT_IDN_TIMER) | \
- INT_MASK(INT_UDN_TIMER) | \
- INT_MASK(INT_DMA_NOTIFY) | \
- INT_MASK(INT_IDN_CA) | \
- INT_MASK(INT_UDN_CA) | \
- INT_MASK(INT_IDN_AVAIL) | \
- INT_MASK(INT_UDN_AVAIL) | \
- INT_MASK(INT_PERF_COUNT) | \
- INT_MASK(INT_INTCTRL_3) | \
- INT_MASK(INT_INTCTRL_2) | \
- INT_MASK(INT_INTCTRL_1) | \
- INT_MASK(INT_INTCTRL_0) | \
- INT_MASK(INT_AUX_PERF_COUNT) | \
+ (1ULL << INT_MEM_ERROR) | \
+ (1ULL << INT_DMATLB_MISS) | \
+ (1ULL << INT_DMATLB_ACCESS) | \
+ (1ULL << INT_SNITLB_MISS) | \
+ (1ULL << INT_SN_NOTIFY) | \
+ (1ULL << INT_SN_FIREWALL) | \
+ (1ULL << INT_IDN_FIREWALL) | \
+ (1ULL << INT_UDN_FIREWALL) | \
+ (1ULL << INT_TILE_TIMER) | \
+ (1ULL << INT_IDN_TIMER) | \
+ (1ULL << INT_UDN_TIMER) | \
+ (1ULL << INT_DMA_NOTIFY) | \
+ (1ULL << INT_IDN_CA) | \
+ (1ULL << INT_UDN_CA) | \
+ (1ULL << INT_IDN_AVAIL) | \
+ (1ULL << INT_UDN_AVAIL) | \
+ (1ULL << INT_PERF_COUNT) | \
+ (1ULL << INT_INTCTRL_3) | \
+ (1ULL << INT_INTCTRL_2) | \
+ (1ULL << INT_INTCTRL_1) | \
+ (1ULL << INT_INTCTRL_0) | \
+ (1ULL << INT_AUX_PERF_COUNT) | \
0)
#define CRITICAL_UNMASKED_INTERRUPTS ( \
- INT_MASK(INT_ITLB_MISS) | \
- INT_MASK(INT_ILL) | \
- INT_MASK(INT_GPV) | \
- INT_MASK(INT_SN_ACCESS) | \
- INT_MASK(INT_IDN_ACCESS) | \
- INT_MASK(INT_UDN_ACCESS) | \
- INT_MASK(INT_IDN_REFILL) | \
- INT_MASK(INT_UDN_REFILL) | \
- INT_MASK(INT_IDN_COMPLETE) | \
- INT_MASK(INT_UDN_COMPLETE) | \
- INT_MASK(INT_SWINT_3) | \
- INT_MASK(INT_SWINT_2) | \
- INT_MASK(INT_SWINT_1) | \
- INT_MASK(INT_SWINT_0) | \
- INT_MASK(INT_UNALIGN_DATA) | \
- INT_MASK(INT_DTLB_MISS) | \
- INT_MASK(INT_DTLB_ACCESS) | \
- INT_MASK(INT_BOOT_ACCESS) | \
- INT_MASK(INT_WORLD_ACCESS) | \
- INT_MASK(INT_I_ASID) | \
- INT_MASK(INT_D_ASID) | \
- INT_MASK(INT_DMA_ASID) | \
- INT_MASK(INT_SNI_ASID) | \
- INT_MASK(INT_DMA_CPL) | \
- INT_MASK(INT_SN_CPL) | \
- INT_MASK(INT_DOUBLE_FAULT) | \
- INT_MASK(INT_SN_STATIC_ACCESS) | \
+ (1ULL << INT_ITLB_MISS) | \
+ (1ULL << INT_ILL) | \
+ (1ULL << INT_GPV) | \
+ (1ULL << INT_SN_ACCESS) | \
+ (1ULL << INT_IDN_ACCESS) | \
+ (1ULL << INT_UDN_ACCESS) | \
+ (1ULL << INT_IDN_REFILL) | \
+ (1ULL << INT_UDN_REFILL) | \
+ (1ULL << INT_IDN_COMPLETE) | \
+ (1ULL << INT_UDN_COMPLETE) | \
+ (1ULL << INT_SWINT_3) | \
+ (1ULL << INT_SWINT_2) | \
+ (1ULL << INT_SWINT_1) | \
+ (1ULL << INT_SWINT_0) | \
+ (1ULL << INT_UNALIGN_DATA) | \
+ (1ULL << INT_DTLB_MISS) | \
+ (1ULL << INT_DTLB_ACCESS) | \
+ (1ULL << INT_BOOT_ACCESS) | \
+ (1ULL << INT_WORLD_ACCESS) | \
+ (1ULL << INT_I_ASID) | \
+ (1ULL << INT_D_ASID) | \
+ (1ULL << INT_DMA_ASID) | \
+ (1ULL << INT_SNI_ASID) | \
+ (1ULL << INT_DMA_CPL) | \
+ (1ULL << INT_SN_CPL) | \
+ (1ULL << INT_DOUBLE_FAULT) | \
+ (1ULL << INT_SN_STATIC_ACCESS) | \
0)
#define MASKABLE_INTERRUPTS ( \
- INT_MASK(INT_MEM_ERROR) | \
- INT_MASK(INT_IDN_REFILL) | \
- INT_MASK(INT_UDN_REFILL) | \
- INT_MASK(INT_IDN_COMPLETE) | \
- INT_MASK(INT_UDN_COMPLETE) | \
- INT_MASK(INT_DMATLB_MISS) | \
- INT_MASK(INT_DMATLB_ACCESS) | \
- INT_MASK(INT_SNITLB_MISS) | \
- INT_MASK(INT_SN_NOTIFY) | \
- INT_MASK(INT_SN_FIREWALL) | \
- INT_MASK(INT_IDN_FIREWALL) | \
- INT_MASK(INT_UDN_FIREWALL) | \
- INT_MASK(INT_TILE_TIMER) | \
- INT_MASK(INT_IDN_TIMER) | \
- INT_MASK(INT_UDN_TIMER) | \
- INT_MASK(INT_DMA_NOTIFY) | \
- INT_MASK(INT_IDN_CA) | \
- INT_MASK(INT_UDN_CA) | \
- INT_MASK(INT_IDN_AVAIL) | \
- INT_MASK(INT_UDN_AVAIL) | \
- INT_MASK(INT_PERF_COUNT) | \
- INT_MASK(INT_INTCTRL_3) | \
- INT_MASK(INT_INTCTRL_2) | \
- INT_MASK(INT_INTCTRL_1) | \
- INT_MASK(INT_INTCTRL_0) | \
- INT_MASK(INT_AUX_PERF_COUNT) | \
+ (1ULL << INT_MEM_ERROR) | \
+ (1ULL << INT_IDN_REFILL) | \
+ (1ULL << INT_UDN_REFILL) | \
+ (1ULL << INT_IDN_COMPLETE) | \
+ (1ULL << INT_UDN_COMPLETE) | \
+ (1ULL << INT_DMATLB_MISS) | \
+ (1ULL << INT_DMATLB_ACCESS) | \
+ (1ULL << INT_SNITLB_MISS) | \
+ (1ULL << INT_SN_NOTIFY) | \
+ (1ULL << INT_SN_FIREWALL) | \
+ (1ULL << INT_IDN_FIREWALL) | \
+ (1ULL << INT_UDN_FIREWALL) | \
+ (1ULL << INT_TILE_TIMER) | \
+ (1ULL << INT_IDN_TIMER) | \
+ (1ULL << INT_UDN_TIMER) | \
+ (1ULL << INT_DMA_NOTIFY) | \
+ (1ULL << INT_IDN_CA) | \
+ (1ULL << INT_UDN_CA) | \
+ (1ULL << INT_IDN_AVAIL) | \
+ (1ULL << INT_UDN_AVAIL) | \
+ (1ULL << INT_PERF_COUNT) | \
+ (1ULL << INT_INTCTRL_3) | \
+ (1ULL << INT_INTCTRL_2) | \
+ (1ULL << INT_INTCTRL_1) | \
+ (1ULL << INT_INTCTRL_0) | \
+ (1ULL << INT_AUX_PERF_COUNT) | \
0)
#define UNMASKABLE_INTERRUPTS ( \
- INT_MASK(INT_ITLB_MISS) | \
- INT_MASK(INT_ILL) | \
- INT_MASK(INT_GPV) | \
- INT_MASK(INT_SN_ACCESS) | \
- INT_MASK(INT_IDN_ACCESS) | \
- INT_MASK(INT_UDN_ACCESS) | \
- INT_MASK(INT_SWINT_3) | \
- INT_MASK(INT_SWINT_2) | \
- INT_MASK(INT_SWINT_1) | \
- INT_MASK(INT_SWINT_0) | \
- INT_MASK(INT_UNALIGN_DATA) | \
- INT_MASK(INT_DTLB_MISS) | \
- INT_MASK(INT_DTLB_ACCESS) | \
- INT_MASK(INT_BOOT_ACCESS) | \
- INT_MASK(INT_WORLD_ACCESS) | \
- INT_MASK(INT_I_ASID) | \
- INT_MASK(INT_D_ASID) | \
- INT_MASK(INT_DMA_ASID) | \
- INT_MASK(INT_SNI_ASID) | \
- INT_MASK(INT_DMA_CPL) | \
- INT_MASK(INT_SN_CPL) | \
- INT_MASK(INT_DOUBLE_FAULT) | \
- INT_MASK(INT_SN_STATIC_ACCESS) | \
+ (1ULL << INT_ITLB_MISS) | \
+ (1ULL << INT_ILL) | \
+ (1ULL << INT_GPV) | \
+ (1ULL << INT_SN_ACCESS) | \
+ (1ULL << INT_IDN_ACCESS) | \
+ (1ULL << INT_UDN_ACCESS) | \
+ (1ULL << INT_SWINT_3) | \
+ (1ULL << INT_SWINT_2) | \
+ (1ULL << INT_SWINT_1) | \
+ (1ULL << INT_SWINT_0) | \
+ (1ULL << INT_UNALIGN_DATA) | \
+ (1ULL << INT_DTLB_MISS) | \
+ (1ULL << INT_DTLB_ACCESS) | \
+ (1ULL << INT_BOOT_ACCESS) | \
+ (1ULL << INT_WORLD_ACCESS) | \
+ (1ULL << INT_I_ASID) | \
+ (1ULL << INT_D_ASID) | \
+ (1ULL << INT_DMA_ASID) | \
+ (1ULL << INT_SNI_ASID) | \
+ (1ULL << INT_DMA_CPL) | \
+ (1ULL << INT_SN_CPL) | \
+ (1ULL << INT_DOUBLE_FAULT) | \
+ (1ULL << INT_SN_STATIC_ACCESS) | \
0)
#define SYNC_INTERRUPTS ( \
- INT_MASK(INT_ITLB_MISS) | \
- INT_MASK(INT_ILL) | \
- INT_MASK(INT_GPV) | \
- INT_MASK(INT_SN_ACCESS) | \
- INT_MASK(INT_IDN_ACCESS) | \
- INT_MASK(INT_UDN_ACCESS) | \
- INT_MASK(INT_IDN_REFILL) | \
- INT_MASK(INT_UDN_REFILL) | \
- INT_MASK(INT_IDN_COMPLETE) | \
- INT_MASK(INT_UDN_COMPLETE) | \
- INT_MASK(INT_SWINT_3) | \
- INT_MASK(INT_SWINT_2) | \
- INT_MASK(INT_SWINT_1) | \
- INT_MASK(INT_SWINT_0) | \
- INT_MASK(INT_UNALIGN_DATA) | \
- INT_MASK(INT_DTLB_MISS) | \
- INT_MASK(INT_DTLB_ACCESS) | \
- INT_MASK(INT_SN_STATIC_ACCESS) | \
+ (1ULL << INT_ITLB_MISS) | \
+ (1ULL << INT_ILL) | \
+ (1ULL << INT_GPV) | \
+ (1ULL << INT_SN_ACCESS) | \
+ (1ULL << INT_IDN_ACCESS) | \
+ (1ULL << INT_UDN_ACCESS) | \
+ (1ULL << INT_IDN_REFILL) | \
+ (1ULL << INT_UDN_REFILL) | \
+ (1ULL << INT_IDN_COMPLETE) | \
+ (1ULL << INT_UDN_COMPLETE) | \
+ (1ULL << INT_SWINT_3) | \
+ (1ULL << INT_SWINT_2) | \
+ (1ULL << INT_SWINT_1) | \
+ (1ULL << INT_SWINT_0) | \
+ (1ULL << INT_UNALIGN_DATA) | \
+ (1ULL << INT_DTLB_MISS) | \
+ (1ULL << INT_DTLB_ACCESS) | \
+ (1ULL << INT_SN_STATIC_ACCESS) | \
0)
#define NON_SYNC_INTERRUPTS ( \
- INT_MASK(INT_MEM_ERROR) | \
- INT_MASK(INT_DMATLB_MISS) | \
- INT_MASK(INT_DMATLB_ACCESS) | \
- INT_MASK(INT_SNITLB_MISS) | \
- INT_MASK(INT_SN_NOTIFY) | \
- INT_MASK(INT_SN_FIREWALL) | \
- INT_MASK(INT_IDN_FIREWALL) | \
- INT_MASK(INT_UDN_FIREWALL) | \
- INT_MASK(INT_TILE_TIMER) | \
- INT_MASK(INT_IDN_TIMER) | \
- INT_MASK(INT_UDN_TIMER) | \
- INT_MASK(INT_DMA_NOTIFY) | \
- INT_MASK(INT_IDN_CA) | \
- INT_MASK(INT_UDN_CA) | \
- INT_MASK(INT_IDN_AVAIL) | \
- INT_MASK(INT_UDN_AVAIL) | \
- INT_MASK(INT_PERF_COUNT) | \
- INT_MASK(INT_INTCTRL_3) | \
- INT_MASK(INT_INTCTRL_2) | \
- INT_MASK(INT_INTCTRL_1) | \
- INT_MASK(INT_INTCTRL_0) | \
- INT_MASK(INT_BOOT_ACCESS) | \
- INT_MASK(INT_WORLD_ACCESS) | \
- INT_MASK(INT_I_ASID) | \
- INT_MASK(INT_D_ASID) | \
- INT_MASK(INT_DMA_ASID) | \
- INT_MASK(INT_SNI_ASID) | \
- INT_MASK(INT_DMA_CPL) | \
- INT_MASK(INT_SN_CPL) | \
- INT_MASK(INT_DOUBLE_FAULT) | \
- INT_MASK(INT_AUX_PERF_COUNT) | \
+ (1ULL << INT_MEM_ERROR) | \
+ (1ULL << INT_DMATLB_MISS) | \
+ (1ULL << INT_DMATLB_ACCESS) | \
+ (1ULL << INT_SNITLB_MISS) | \
+ (1ULL << INT_SN_NOTIFY) | \
+ (1ULL << INT_SN_FIREWALL) | \
+ (1ULL << INT_IDN_FIREWALL) | \
+ (1ULL << INT_UDN_FIREWALL) | \
+ (1ULL << INT_TILE_TIMER) | \
+ (1ULL << INT_IDN_TIMER) | \
+ (1ULL << INT_UDN_TIMER) | \
+ (1ULL << INT_DMA_NOTIFY) | \
+ (1ULL << INT_IDN_CA) | \
+ (1ULL << INT_UDN_CA) | \
+ (1ULL << INT_IDN_AVAIL) | \
+ (1ULL << INT_UDN_AVAIL) | \
+ (1ULL << INT_PERF_COUNT) | \
+ (1ULL << INT_INTCTRL_3) | \
+ (1ULL << INT_INTCTRL_2) | \
+ (1ULL << INT_INTCTRL_1) | \
+ (1ULL << INT_INTCTRL_0) | \
+ (1ULL << INT_BOOT_ACCESS) | \
+ (1ULL << INT_WORLD_ACCESS) | \
+ (1ULL << INT_I_ASID) | \
+ (1ULL << INT_D_ASID) | \
+ (1ULL << INT_DMA_ASID) | \
+ (1ULL << INT_SNI_ASID) | \
+ (1ULL << INT_DMA_CPL) | \
+ (1ULL << INT_SN_CPL) | \
+ (1ULL << INT_DOUBLE_FAULT) | \
+ (1ULL << INT_AUX_PERF_COUNT) | \
0)
#endif /* !__ASSEMBLER__ */
#endif /* !__ARCH_INTERRUPTS_H__ */
diff --git a/arch/tile/include/uapi/arch/interrupts_64.h b/arch/tile/include/uapi/arch/interrupts_64.h
index 5bb58b2e4e6f..13c9f9182348 100644
--- a/arch/tile/include/uapi/arch/interrupts_64.h
+++ b/arch/tile/include/uapi/arch/interrupts_64.h
@@ -15,6 +15,7 @@
#ifndef __ARCH_INTERRUPTS_H__
#define __ARCH_INTERRUPTS_H__
+#ifndef __KERNEL__
/** Mask for an interrupt. */
#ifdef __ASSEMBLER__
/* Note: must handle breaking interrupts into high and low words manually. */
@@ -22,6 +23,7 @@
#else
#define INT_MASK(intno) (1ULL << (intno))
#endif
+#endif
/** Where a given interrupt executes */
@@ -85,192 +87,192 @@
#ifndef __ASSEMBLER__
#define QUEUED_INTERRUPTS ( \
- INT_MASK(INT_MEM_ERROR) | \
- INT_MASK(INT_IDN_COMPLETE) | \
- INT_MASK(INT_UDN_COMPLETE) | \
- INT_MASK(INT_IDN_FIREWALL) | \
- INT_MASK(INT_UDN_FIREWALL) | \
- INT_MASK(INT_TILE_TIMER) | \
- INT_MASK(INT_AUX_TILE_TIMER) | \
- INT_MASK(INT_IDN_TIMER) | \
- INT_MASK(INT_UDN_TIMER) | \
- INT_MASK(INT_IDN_AVAIL) | \
- INT_MASK(INT_UDN_AVAIL) | \
- INT_MASK(INT_IPI_3) | \
- INT_MASK(INT_IPI_2) | \
- INT_MASK(INT_IPI_1) | \
- INT_MASK(INT_IPI_0) | \
- INT_MASK(INT_PERF_COUNT) | \
- INT_MASK(INT_AUX_PERF_COUNT) | \
- INT_MASK(INT_INTCTRL_3) | \
- INT_MASK(INT_INTCTRL_2) | \
- INT_MASK(INT_INTCTRL_1) | \
- INT_MASK(INT_INTCTRL_0) | \
- INT_MASK(INT_BOOT_ACCESS) | \
- INT_MASK(INT_WORLD_ACCESS) | \
- INT_MASK(INT_I_ASID) | \
- INT_MASK(INT_D_ASID) | \
- INT_MASK(INT_DOUBLE_FAULT) | \
+ (1ULL << INT_MEM_ERROR) | \
+ (1ULL << INT_IDN_COMPLETE) | \
+ (1ULL << INT_UDN_COMPLETE) | \
+ (1ULL << INT_IDN_FIREWALL) | \
+ (1ULL << INT_UDN_FIREWALL) | \
+ (1ULL << INT_TILE_TIMER) | \
+ (1ULL << INT_AUX_TILE_TIMER) | \
+ (1ULL << INT_IDN_TIMER) | \
+ (1ULL << INT_UDN_TIMER) | \
+ (1ULL << INT_IDN_AVAIL) | \
+ (1ULL << INT_UDN_AVAIL) | \
+ (1ULL << INT_IPI_3) | \
+ (1ULL << INT_IPI_2) | \
+ (1ULL << INT_IPI_1) | \
+ (1ULL << INT_IPI_0) | \
+ (1ULL << INT_PERF_COUNT) | \
+ (1ULL << INT_AUX_PERF_COUNT) | \
+ (1ULL << INT_INTCTRL_3) | \
+ (1ULL << INT_INTCTRL_2) | \
+ (1ULL << INT_INTCTRL_1) | \
+ (1ULL << INT_INTCTRL_0) | \
+ (1ULL << INT_BOOT_ACCESS) | \
+ (1ULL << INT_WORLD_ACCESS) | \
+ (1ULL << INT_I_ASID) | \
+ (1ULL << INT_D_ASID) | \
+ (1ULL << INT_DOUBLE_FAULT) | \
0)
#define NONQUEUED_INTERRUPTS ( \
- INT_MASK(INT_SINGLE_STEP_3) | \
- INT_MASK(INT_SINGLE_STEP_2) | \
- INT_MASK(INT_SINGLE_STEP_1) | \
- INT_MASK(INT_SINGLE_STEP_0) | \
- INT_MASK(INT_ITLB_MISS) | \
- INT_MASK(INT_ILL) | \
- INT_MASK(INT_GPV) | \
- INT_MASK(INT_IDN_ACCESS) | \
- INT_MASK(INT_UDN_ACCESS) | \
- INT_MASK(INT_SWINT_3) | \
- INT_MASK(INT_SWINT_2) | \
- INT_MASK(INT_SWINT_1) | \
- INT_MASK(INT_SWINT_0) | \
- INT_MASK(INT_ILL_TRANS) | \
- INT_MASK(INT_UNALIGN_DATA) | \
- INT_MASK(INT_DTLB_MISS) | \
- INT_MASK(INT_DTLB_ACCESS) | \
+ (1ULL << INT_SINGLE_STEP_3) | \
+ (1ULL << INT_SINGLE_STEP_2) | \
+ (1ULL << INT_SINGLE_STEP_1) | \
+ (1ULL << INT_SINGLE_STEP_0) | \
+ (1ULL << INT_ITLB_MISS) | \
+ (1ULL << INT_ILL) | \
+ (1ULL << INT_GPV) | \
+ (1ULL << INT_IDN_ACCESS) | \
+ (1ULL << INT_UDN_ACCESS) | \
+ (1ULL << INT_SWINT_3) | \
+ (1ULL << INT_SWINT_2) | \
+ (1ULL << INT_SWINT_1) | \
+ (1ULL << INT_SWINT_0) | \
+ (1ULL << INT_ILL_TRANS) | \
+ (1ULL << INT_UNALIGN_DATA) | \
+ (1ULL << INT_DTLB_MISS) | \
+ (1ULL << INT_DTLB_ACCESS) | \
0)
#define CRITICAL_MASKED_INTERRUPTS ( \
- INT_MASK(INT_MEM_ERROR) | \
- INT_MASK(INT_SINGLE_STEP_3) | \
- INT_MASK(INT_SINGLE_STEP_2) | \
- INT_MASK(INT_SINGLE_STEP_1) | \
- INT_MASK(INT_SINGLE_STEP_0) | \
- INT_MASK(INT_IDN_COMPLETE) | \
- INT_MASK(INT_UDN_COMPLETE) | \
- INT_MASK(INT_IDN_FIREWALL) | \
- INT_MASK(INT_UDN_FIREWALL) | \
- INT_MASK(INT_TILE_TIMER) | \
- INT_MASK(INT_AUX_TILE_TIMER) | \
- INT_MASK(INT_IDN_TIMER) | \
- INT_MASK(INT_UDN_TIMER) | \
- INT_MASK(INT_IDN_AVAIL) | \
- INT_MASK(INT_UDN_AVAIL) | \
- INT_MASK(INT_IPI_3) | \
- INT_MASK(INT_IPI_2) | \
- INT_MASK(INT_IPI_1) | \
- INT_MASK(INT_IPI_0) | \
- INT_MASK(INT_PERF_COUNT) | \
- INT_MASK(INT_AUX_PERF_COUNT) | \
- INT_MASK(INT_INTCTRL_3) | \
- INT_MASK(INT_INTCTRL_2) | \
- INT_MASK(INT_INTCTRL_1) | \
- INT_MASK(INT_INTCTRL_0) | \
+ (1ULL << INT_MEM_ERROR) | \
+ (1ULL << INT_SINGLE_STEP_3) | \
+ (1ULL << INT_SINGLE_STEP_2) | \
+ (1ULL << INT_SINGLE_STEP_1) | \
+ (1ULL << INT_SINGLE_STEP_0) | \
+ (1ULL << INT_IDN_COMPLETE) | \
+ (1ULL << INT_UDN_COMPLETE) | \
+ (1ULL << INT_IDN_FIREWALL) | \
+ (1ULL << INT_UDN_FIREWALL) | \
+ (1ULL << INT_TILE_TIMER) | \
+ (1ULL << INT_AUX_TILE_TIMER) | \
+ (1ULL << INT_IDN_TIMER) | \
+ (1ULL << INT_UDN_TIMER) | \
+ (1ULL << INT_IDN_AVAIL) | \
+ (1ULL << INT_UDN_AVAIL) | \
+ (1ULL << INT_IPI_3) | \
+ (1ULL << INT_IPI_2) | \
+ (1ULL << INT_IPI_1) | \
+ (1ULL << INT_IPI_0) | \
+ (1ULL << INT_PERF_COUNT) | \
+ (1ULL << INT_AUX_PERF_COUNT) | \
+ (1ULL << INT_INTCTRL_3) | \
+ (1ULL << INT_INTCTRL_2) | \
+ (1ULL << INT_INTCTRL_1) | \
+ (1ULL << INT_INTCTRL_0) | \
0)
#define CRITICAL_UNMASKED_INTERRUPTS ( \
- INT_MASK(INT_ITLB_MISS) | \
- INT_MASK(INT_ILL) | \
- INT_MASK(INT_GPV) | \
- INT_MASK(INT_IDN_ACCESS) | \
- INT_MASK(INT_UDN_ACCESS) | \
- INT_MASK(INT_SWINT_3) | \
- INT_MASK(INT_SWINT_2) | \
- INT_MASK(INT_SWINT_1) | \
- INT_MASK(INT_SWINT_0) | \
- INT_MASK(INT_ILL_TRANS) | \
- INT_MASK(INT_UNALIGN_DATA) | \
- INT_MASK(INT_DTLB_MISS) | \
- INT_MASK(INT_DTLB_ACCESS) | \
- INT_MASK(INT_BOOT_ACCESS) | \
- INT_MASK(INT_WORLD_ACCESS) | \
- INT_MASK(INT_I_ASID) | \
- INT_MASK(INT_D_ASID) | \
- INT_MASK(INT_DOUBLE_FAULT) | \
+ (1ULL << INT_ITLB_MISS) | \
+ (1ULL << INT_ILL) | \
+ (1ULL << INT_GPV) | \
+ (1ULL << INT_IDN_ACCESS) | \
+ (1ULL << INT_UDN_ACCESS) | \
+ (1ULL << INT_SWINT_3) | \
+ (1ULL << INT_SWINT_2) | \
+ (1ULL << INT_SWINT_1) | \
+ (1ULL << INT_SWINT_0) | \
+ (1ULL << INT_ILL_TRANS) | \
+ (1ULL << INT_UNALIGN_DATA) | \
+ (1ULL << INT_DTLB_MISS) | \
+ (1ULL << INT_DTLB_ACCESS) | \
+ (1ULL << INT_BOOT_ACCESS) | \
+ (1ULL << INT_WORLD_ACCESS) | \
+ (1ULL << INT_I_ASID) | \
+ (1ULL << INT_D_ASID) | \
+ (1ULL << INT_DOUBLE_FAULT) | \
0)
#define MASKABLE_INTERRUPTS ( \
- INT_MASK(INT_MEM_ERROR) | \
- INT_MASK(INT_SINGLE_STEP_3) | \
- INT_MASK(INT_SINGLE_STEP_2) | \
- INT_MASK(INT_SINGLE_STEP_1) | \
- INT_MASK(INT_SINGLE_STEP_0) | \
- INT_MASK(INT_IDN_COMPLETE) | \
- INT_MASK(INT_UDN_COMPLETE) | \
- INT_MASK(INT_IDN_FIREWALL) | \
- INT_MASK(INT_UDN_FIREWALL) | \
- INT_MASK(INT_TILE_TIMER) | \
- INT_MASK(INT_AUX_TILE_TIMER) | \
- INT_MASK(INT_IDN_TIMER) | \
- INT_MASK(INT_UDN_TIMER) | \
- INT_MASK(INT_IDN_AVAIL) | \
- INT_MASK(INT_UDN_AVAIL) | \
- INT_MASK(INT_IPI_3) | \
- INT_MASK(INT_IPI_2) | \
- INT_MASK(INT_IPI_1) | \
- INT_MASK(INT_IPI_0) | \
- INT_MASK(INT_PERF_COUNT) | \
- INT_MASK(INT_AUX_PERF_COUNT) | \
- INT_MASK(INT_INTCTRL_3) | \
- INT_MASK(INT_INTCTRL_2) | \
- INT_MASK(INT_INTCTRL_1) | \
- INT_MASK(INT_INTCTRL_0) | \
+ (1ULL << INT_MEM_ERROR) | \
+ (1ULL << INT_SINGLE_STEP_3) | \
+ (1ULL << INT_SINGLE_STEP_2) | \
+ (1ULL << INT_SINGLE_STEP_1) | \
+ (1ULL << INT_SINGLE_STEP_0) | \
+ (1ULL << INT_IDN_COMPLETE) | \
+ (1ULL << INT_UDN_COMPLETE) | \
+ (1ULL << INT_IDN_FIREWALL) | \
+ (1ULL << INT_UDN_FIREWALL) | \
+ (1ULL << INT_TILE_TIMER) | \
+ (1ULL << INT_AUX_TILE_TIMER) | \
+ (1ULL << INT_IDN_TIMER) | \
+ (1ULL << INT_UDN_TIMER) | \
+ (1ULL << INT_IDN_AVAIL) | \
+ (1ULL << INT_UDN_AVAIL) | \
+ (1ULL << INT_IPI_3) | \
+ (1ULL << INT_IPI_2) | \
+ (1ULL << INT_IPI_1) | \
+ (1ULL << INT_IPI_0) | \
+ (1ULL << INT_PERF_COUNT) | \
+ (1ULL << INT_AUX_PERF_COUNT) | \
+ (1ULL << INT_INTCTRL_3) | \
+ (1ULL << INT_INTCTRL_2) | \
+ (1ULL << INT_INTCTRL_1) | \
+ (1ULL << INT_INTCTRL_0) | \
0)
#define UNMASKABLE_INTERRUPTS ( \
- INT_MASK(INT_ITLB_MISS) | \
- INT_MASK(INT_ILL) | \
- INT_MASK(INT_GPV) | \
- INT_MASK(INT_IDN_ACCESS) | \
- INT_MASK(INT_UDN_ACCESS) | \
- INT_MASK(INT_SWINT_3) | \
- INT_MASK(INT_SWINT_2) | \
- INT_MASK(INT_SWINT_1) | \
- INT_MASK(INT_SWINT_0) | \
- INT_MASK(INT_ILL_TRANS) | \
- INT_MASK(INT_UNALIGN_DATA) | \
- INT_MASK(INT_DTLB_MISS) | \
- INT_MASK(INT_DTLB_ACCESS) | \
- INT_MASK(INT_BOOT_ACCESS) | \
- INT_MASK(INT_WORLD_ACCESS) | \
- INT_MASK(INT_I_ASID) | \
- INT_MASK(INT_D_ASID) | \
- INT_MASK(INT_DOUBLE_FAULT) | \
+ (1ULL << INT_ITLB_MISS) | \
+ (1ULL << INT_ILL) | \
+ (1ULL << INT_GPV) | \
+ (1ULL << INT_IDN_ACCESS) | \
+ (1ULL << INT_UDN_ACCESS) | \
+ (1ULL << INT_SWINT_3) | \
+ (1ULL << INT_SWINT_2) | \
+ (1ULL << INT_SWINT_1) | \
+ (1ULL << INT_SWINT_0) | \
+ (1ULL << INT_ILL_TRANS) | \
+ (1ULL << INT_UNALIGN_DATA) | \
+ (1ULL << INT_DTLB_MISS) | \
+ (1ULL << INT_DTLB_ACCESS) | \
+ (1ULL << INT_BOOT_ACCESS) | \
+ (1ULL << INT_WORLD_ACCESS) | \
+ (1ULL << INT_I_ASID) | \
+ (1ULL << INT_D_ASID) | \
+ (1ULL << INT_DOUBLE_FAULT) | \
0)
#define SYNC_INTERRUPTS ( \
- INT_MASK(INT_SINGLE_STEP_3) | \
- INT_MASK(INT_SINGLE_STEP_2) | \
- INT_MASK(INT_SINGLE_STEP_1) | \
- INT_MASK(INT_SINGLE_STEP_0) | \
- INT_MASK(INT_IDN_COMPLETE) | \
- INT_MASK(INT_UDN_COMPLETE) | \
- INT_MASK(INT_ITLB_MISS) | \
- INT_MASK(INT_ILL) | \
- INT_MASK(INT_GPV) | \
- INT_MASK(INT_IDN_ACCESS) | \
- INT_MASK(INT_UDN_ACCESS) | \
- INT_MASK(INT_SWINT_3) | \
- INT_MASK(INT_SWINT_2) | \
- INT_MASK(INT_SWINT_1) | \
- INT_MASK(INT_SWINT_0) | \
- INT_MASK(INT_ILL_TRANS) | \
- INT_MASK(INT_UNALIGN_DATA) | \
- INT_MASK(INT_DTLB_MISS) | \
- INT_MASK(INT_DTLB_ACCESS) | \
+ (1ULL << INT_SINGLE_STEP_3) | \
+ (1ULL << INT_SINGLE_STEP_2) | \
+ (1ULL << INT_SINGLE_STEP_1) | \
+ (1ULL << INT_SINGLE_STEP_0) | \
+ (1ULL << INT_IDN_COMPLETE) | \
+ (1ULL << INT_UDN_COMPLETE) | \
+ (1ULL << INT_ITLB_MISS) | \
+ (1ULL << INT_ILL) | \
+ (1ULL << INT_GPV) | \
+ (1ULL << INT_IDN_ACCESS) | \
+ (1ULL << INT_UDN_ACCESS) | \
+ (1ULL << INT_SWINT_3) | \
+ (1ULL << INT_SWINT_2) | \
+ (1ULL << INT_SWINT_1) | \
+ (1ULL << INT_SWINT_0) | \
+ (1ULL << INT_ILL_TRANS) | \
+ (1ULL << INT_UNALIGN_DATA) | \
+ (1ULL << INT_DTLB_MISS) | \
+ (1ULL << INT_DTLB_ACCESS) | \
0)
#define NON_SYNC_INTERRUPTS ( \
- INT_MASK(INT_MEM_ERROR) | \
- INT_MASK(INT_IDN_FIREWALL) | \
- INT_MASK(INT_UDN_FIREWALL) | \
- INT_MASK(INT_TILE_TIMER) | \
- INT_MASK(INT_AUX_TILE_TIMER) | \
- INT_MASK(INT_IDN_TIMER) | \
- INT_MASK(INT_UDN_TIMER) | \
- INT_MASK(INT_IDN_AVAIL) | \
- INT_MASK(INT_UDN_AVAIL) | \
- INT_MASK(INT_IPI_3) | \
- INT_MASK(INT_IPI_2) | \
- INT_MASK(INT_IPI_1) | \
- INT_MASK(INT_IPI_0) | \
- INT_MASK(INT_PERF_COUNT) | \
- INT_MASK(INT_AUX_PERF_COUNT) | \
- INT_MASK(INT_INTCTRL_3) | \
- INT_MASK(INT_INTCTRL_2) | \
- INT_MASK(INT_INTCTRL_1) | \
- INT_MASK(INT_INTCTRL_0) | \
- INT_MASK(INT_BOOT_ACCESS) | \
- INT_MASK(INT_WORLD_ACCESS) | \
- INT_MASK(INT_I_ASID) | \
- INT_MASK(INT_D_ASID) | \
- INT_MASK(INT_DOUBLE_FAULT) | \
+ (1ULL << INT_MEM_ERROR) | \
+ (1ULL << INT_IDN_FIREWALL) | \
+ (1ULL << INT_UDN_FIREWALL) | \
+ (1ULL << INT_TILE_TIMER) | \
+ (1ULL << INT_AUX_TILE_TIMER) | \
+ (1ULL << INT_IDN_TIMER) | \
+ (1ULL << INT_UDN_TIMER) | \
+ (1ULL << INT_IDN_AVAIL) | \
+ (1ULL << INT_UDN_AVAIL) | \
+ (1ULL << INT_IPI_3) | \
+ (1ULL << INT_IPI_2) | \
+ (1ULL << INT_IPI_1) | \
+ (1ULL << INT_IPI_0) | \
+ (1ULL << INT_PERF_COUNT) | \
+ (1ULL << INT_AUX_PERF_COUNT) | \
+ (1ULL << INT_INTCTRL_3) | \
+ (1ULL << INT_INTCTRL_2) | \
+ (1ULL << INT_INTCTRL_1) | \
+ (1ULL << INT_INTCTRL_0) | \
+ (1ULL << INT_BOOT_ACCESS) | \
+ (1ULL << INT_WORLD_ACCESS) | \
+ (1ULL << INT_I_ASID) | \
+ (1ULL << INT_D_ASID) | \
+ (1ULL << INT_DOUBLE_FAULT) | \
0)
#endif /* !__ASSEMBLER__ */
#endif /* !__ARCH_INTERRUPTS_H__ */
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 54bc9a6678e8..4ea080902654 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -1035,7 +1035,9 @@ handle_syscall:
/* Ensure that the syscall number is within the legal range. */
{
moveli r20, hw2(sys_call_table)
+#ifdef CONFIG_COMPAT
blbs r30, .Lcompat_syscall
+#endif
}
{
cmpltu r21, TREG_SYSCALL_NR_NAME, r21
@@ -1093,6 +1095,7 @@ handle_syscall:
j .Lresume_userspace /* jump into middle of interrupt_return */
}
+#ifdef CONFIG_COMPAT
.Lcompat_syscall:
/*
* Load the base of the compat syscall table in r20, and
@@ -1117,6 +1120,7 @@ handle_syscall:
{ move r15, r4; addxi r4, r4, 0 }
{ move r16, r5; addxi r5, r5, 0 }
j .Lload_syscall_pointer
+#endif
.Linvalid_syscall:
/* Report an invalid syscall back to the user program */
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 0e5661e7d00d..caf93ae11793 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -159,7 +159,7 @@ static void save_arch_state(struct thread_struct *t);
int copy_thread(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct *p)
{
- struct pt_regs *childregs = task_pt_regs(p), *regs = current_pt_regs();
+ struct pt_regs *childregs = task_pt_regs(p);
unsigned long ksp;
unsigned long *callee_regs;
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c
index baa3d905fee2..d1b5c913ae72 100644
--- a/arch/tile/kernel/reboot.c
+++ b/arch/tile/kernel/reboot.c
@@ -16,6 +16,7 @@
#include <linux/reboot.h>
#include <linux/smp.h>
#include <linux/pm.h>
+#include <linux/export.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <hv/hypervisor.h>
@@ -49,3 +50,4 @@ void machine_restart(char *cmd)
/* No interesting distinction to be made here. */
void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 6a649a4462d3..d1e15f7b59c6 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -31,6 +31,7 @@
#include <linux/timex.h>
#include <linux/hugetlb.h>
#include <linux/start_kernel.h>
+#include <linux/screen_info.h>
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
@@ -49,6 +50,10 @@ static inline int ABS(int x) { return x >= 0 ? x : -x; }
/* Chip information */
char chip_model[64] __write_once;
+#ifdef CONFIG_VT
+struct screen_info screen_info;
+#endif
+
struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index b2f44c28dda6..ed258b8ae320 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -112,7 +112,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
p->pc, p->sp, p->ex1);
p = NULL;
}
- if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
+ if (!kbt->profile || ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) == 0)
return p;
return NULL;
}
@@ -484,6 +484,7 @@ void save_stack_trace(struct stack_trace *trace)
{
save_stack_trace_tsk(NULL, trace);
}
+EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c
index db4fb89e12d8..8f8ad814b139 100644
--- a/arch/tile/lib/cacheflush.c
+++ b/arch/tile/lib/cacheflush.c
@@ -12,6 +12,7 @@
* more details.
*/
+#include <linux/export.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <arch/icache.h>
@@ -165,3 +166,4 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
__insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf);
#endif
}
+EXPORT_SYMBOL_GPL(finv_buffer_remote);
diff --git a/arch/tile/lib/cpumask.c b/arch/tile/lib/cpumask.c
index fdc403614d12..75947edccb26 100644
--- a/arch/tile/lib/cpumask.c
+++ b/arch/tile/lib/cpumask.c
@@ -16,6 +16,7 @@
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/smp.h>
+#include <linux/export.h>
/*
* Allow cropping out bits beyond the end of the array.
@@ -50,3 +51,4 @@ int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits)
} while (*bp != '\0' && *bp != '\n');
return 0;
}
+EXPORT_SYMBOL(bitmap_parselist_crop);
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index dd5f0a33fdaf..4385cb6fa00a 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -55,6 +55,8 @@ EXPORT_SYMBOL(hv_dev_poll_cancel);
EXPORT_SYMBOL(hv_dev_close);
EXPORT_SYMBOL(hv_sysconf);
EXPORT_SYMBOL(hv_confstr);
+EXPORT_SYMBOL(hv_get_rtc);
+EXPORT_SYMBOL(hv_set_rtc);
/* libgcc.a */
uint32_t __udivsi3(uint32_t dividend, uint32_t divisor);
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 5f7868dcd6d4..1ae911939a18 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -408,6 +408,7 @@ void homecache_change_page_home(struct page *page, int order, int home)
__set_pte(ptep, pte_set_home(pteval, home));
}
}
+EXPORT_SYMBOL(homecache_change_page_home);
struct page *homecache_alloc_pages(gfp_t gfp_mask,
unsigned int order, int home)
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 102ff7cb3e41..142c4ceff112 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -207,7 +207,7 @@ sysexit_from_sys_call:
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jnz ia32_ret_from_sys_call
TRACE_IRQS_ON
- sti
+ ENABLE_INTERRUPTS(CLBR_NONE)
movl %eax,%esi /* second arg, syscall return value */
cmpl $-MAX_ERRNO,%eax /* is it an error ? */
jbe 1f
@@ -217,7 +217,7 @@ sysexit_from_sys_call:
call __audit_syscall_exit
movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
- cli
+ DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jz \exit
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index fe9edec6698a..84c1309c4c0c 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -298,8 +298,7 @@ struct _cache_attr {
unsigned int);
};
-#ifdef CONFIG_AMD_NB
-
+#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
/*
* L3 cache descriptors
*/
@@ -524,9 +523,9 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
static struct _cache_attr subcaches =
__ATTR(subcaches, 0644, show_subcaches, store_subcaches);
-#else /* CONFIG_AMD_NB */
+#else
#define amd_init_l3_cache(x, y)
-#endif /* CONFIG_AMD_NB */
+#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
static int
__cpuinit cpuid4_cache_lookup_regs(int index,
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 93b9e1181f83..4914e94ad6e8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2019,7 +2019,10 @@ __init int intel_pmu_init(void)
break;
case 28: /* Atom */
- case 54: /* Cedariew */
+ case 38: /* Lincroft */
+ case 39: /* Penwell */
+ case 53: /* Cloverview */
+ case 54: /* Cedarview */
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -2084,6 +2087,7 @@ __init int intel_pmu_init(void)
pr_cont("SandyBridge events, ");
break;
case 58: /* IvyBridge */
+ case 62: /* IvyBridge EP */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index f2af39f5dc3d..4820c232a0b9 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -19,7 +19,7 @@ static const u64 p6_perfmon_event_map[] =
};
-static __initconst u64 p6_hw_cache_event_ids
+static u64 p6_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c
index cc2f8c131286..872eb60e7806 100644
--- a/arch/x86/tools/insn_sanity.c
+++ b/arch/x86/tools/insn_sanity.c
@@ -55,7 +55,7 @@ static FILE *input_file; /* Input file name */
static void usage(const char *err)
{
if (err)
- fprintf(stderr, "Error: %s\n\n", err);
+ fprintf(stderr, "%s: Error: %s\n\n", prog, err);
fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog);
fprintf(stderr, "\t-y 64bit mode\n");
fprintf(stderr, "\t-n 32bit mode\n");
@@ -269,7 +269,13 @@ int main(int argc, char **argv)
insns++;
}
- fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed);
+ fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n",
+ prog,
+ (errors) ? "Failure" : "Success",
+ insns,
+ (input_file) ? "given" : "random",
+ errors,
+ seed);
return errors ? 1 : 0;
}
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 4acb5feba1fb..172a02a6ad14 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -170,4 +170,19 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
consistent_sync(vaddr, size, direction);
}
+/* Not supported for now */
+static inline int dma_mmap_coherent(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size)
+{
+ return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
+{
+ return -EINVAL;
+}
+
#endif /* _XTENSA_DMA_MAPPING_H */