summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/kernel/module.c4
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410.c18
-rw-r--r--arch/arm/mm/alignment.c56
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/proc-arm946.S3
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/include/asm/kdump.h10
-rw-r--r--arch/powerpc/include/asm/reg.h8
-rw-r--r--arch/powerpc/kernel/cputable.c11
-rw-r--r--arch/powerpc/kernel/iomap.c2
-rw-r--r--arch/powerpc/kernel/machine_kexec.c10
-rw-r--r--arch/powerpc/kernel/perf_callchain.c20
-rw-r--r--arch/powerpc/kernel/prom_init.c14
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S4
-rw-r--r--arch/powerpc/platforms/Kconfig2
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c4
-rw-r--r--arch/powerpc/platforms/pseries/io_event_irq.c8
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c34
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c19
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h12
-rw-r--r--arch/powerpc/platforms/pseries/setup.c5
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c5
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
25 files changed, 156 insertions, 103 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 2c71a8f3535a..5ebc5d922ea1 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -195,8 +195,7 @@ config VECTORS_BASE
The base address of exception vectors.
config ARM_PATCH_PHYS_VIRT
- bool "Patch physical to virtual translations at runtime (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "Patch physical to virtual translations at runtime"
depends on !XIP_KERNEL && MMU
depends on !ARCH_REALVIEW || !SPARSEMEM
help
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 05b377616fd5..cc2020c2c709 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -323,7 +323,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
#endif
s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
if (s && !is_smp())
+#ifdef CONFIG_SMP_ON_UP
fixup_smp((void *)s->sh_addr, s->sh_size);
+#else
+ return -EINVAL;
+#endif
return 0;
}
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index 9026249233ad..af0c2fe1ea37 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -65,7 +65,7 @@
#include <plat/iic.h>
#include <plat/pm.h>
-#include <sound/wm8915.h>
+#include <sound/wm8996.h>
#include <sound/wm8962.h>
#include <sound/wm9081.h>
@@ -614,7 +614,7 @@ static struct wm831x_pdata glenfarclas_pmic_pdata __initdata = {
.disable_touch = true,
};
-static struct wm8915_retune_mobile_config wm8915_retune[] = {
+static struct wm8996_retune_mobile_config wm8996_retune[] = {
{
.name = "Sub LPF",
.rate = 48000,
@@ -635,12 +635,12 @@ static struct wm8915_retune_mobile_config wm8915_retune[] = {
},
};
-static struct wm8915_pdata wm8915_pdata __initdata = {
+static struct wm8996_pdata wm8996_pdata __initdata = {
.ldo_ena = S3C64XX_GPN(7),
.gpio_base = CODEC_GPIO_BASE,
.micdet_def = 1,
- .inl_mode = WM8915_DIFFERRENTIAL_1,
- .inr_mode = WM8915_DIFFERRENTIAL_1,
+ .inl_mode = WM8996_DIFFERRENTIAL_1,
+ .inr_mode = WM8996_DIFFERRENTIAL_1,
.irq_flags = IRQF_TRIGGER_RISING,
@@ -652,8 +652,8 @@ static struct wm8915_pdata wm8915_pdata __initdata = {
0x020e, /* GPIO5 == CLKOUT */
},
- .retune_mobile_cfgs = wm8915_retune,
- .num_retune_mobile_cfgs = ARRAY_SIZE(wm8915_retune),
+ .retune_mobile_cfgs = wm8996_retune,
+ .num_retune_mobile_cfgs = ARRAY_SIZE(wm8996_retune),
};
static struct wm8962_pdata wm8962_pdata __initdata = {
@@ -679,8 +679,8 @@ static struct i2c_board_info i2c_devs1[] __initdata = {
.platform_data = &glenfarclas_pmic_pdata },
{ I2C_BOARD_INFO("wm1250-ev1", 0x27) },
- { I2C_BOARD_INFO("wm8915", 0x1a),
- .platform_data = &wm8915_pdata,
+ { I2C_BOARD_INFO("wm8996", 0x1a),
+ .platform_data = &wm8996_pdata,
.irq = GLENFARCLAS_PMIC_IRQ_BASE + WM831X_IRQ_GPIO_2,
},
{ I2C_BOARD_INFO("wm9081", 0x6c),
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index be7c638b648b..cfbcf8b95599 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -22,6 +22,7 @@
#include <linux/sched.h>
#include <linux/uaccess.h>
+#include <asm/system.h>
#include <asm/unaligned.h>
#include "fault.h"
@@ -95,6 +96,33 @@ static const char *usermode_action[] = {
"signal+warn"
};
+/* Return true if and only if the ARMv6 unaligned access model is in use. */
+static bool cpu_is_v6_unaligned(void)
+{
+ return cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U);
+}
+
+static int safe_usermode(int new_usermode, bool warn)
+{
+ /*
+ * ARMv6 and later CPUs can perform unaligned accesses for
+ * most single load and store instructions up to word size.
+ * LDM, STM, LDRD and STRD still need to be handled.
+ *
+ * Ignoring the alignment fault is not an option on these
+ * CPUs since we spin re-faulting the instruction without
+ * making any progress.
+ */
+ if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) {
+ new_usermode |= UM_FIXUP;
+
+ if (warn)
+ printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
+ }
+
+ return new_usermode;
+}
+
static int alignment_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "User:\t\t%lu\n", ai_user);
@@ -125,7 +153,7 @@ static ssize_t alignment_proc_write(struct file *file, const char __user *buffer
if (get_user(mode, buffer))
return -EFAULT;
if (mode >= '0' && mode <= '5')
- ai_usermode = mode - '0';
+ ai_usermode = safe_usermode(mode - '0', true);
}
return count;
}
@@ -886,9 +914,16 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (ai_usermode & UM_FIXUP)
goto fixup;
- if (ai_usermode & UM_SIGNAL)
- force_sig(SIGBUS, current);
- else {
+ if (ai_usermode & UM_SIGNAL) {
+ siginfo_t si;
+
+ si.si_signo = SIGBUS;
+ si.si_errno = 0;
+ si.si_code = BUS_ADRALN;
+ si.si_addr = (void __user *)addr;
+
+ force_sig_info(si.si_signo, &si, current);
+ } else {
/*
* We're about to disable the alignment trap and return to
* user space. But if an interrupt occurs before actually
@@ -926,20 +961,11 @@ static int __init alignment_init(void)
return -ENOMEM;
#endif
- /*
- * ARMv6 and later CPUs can perform unaligned accesses for
- * most single load and store instructions up to word size.
- * LDM, STM, LDRD and STRD still need to be handled.
- *
- * Ignoring the alignment fault is not an option on these
- * CPUs since we spin re-faulting the instruction without
- * making any progress.
- */
- if (cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U)) {
+ if (cpu_is_v6_unaligned()) {
cr_alignment &= ~CR_A;
cr_no_alignment &= ~CR_A;
set_cr(cr_alignment);
- ai_usermode = UM_FIXUP;
+ ai_usermode = safe_usermode(ai_usermode, false);
}
hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN,
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 2fee782077c1..91bca355cd31 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -441,7 +441,7 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s)
static inline void poison_init_mem(void *s, size_t count)
{
u32 *p = (u32 *)s;
- while ((count = count - 4))
+ for (; count != 0; count -= 4)
*p++ = 0xe7fddef0;
}
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index f8f7ea34bfc5..683af3a182b7 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -410,6 +410,7 @@ __arm946_proc_info:
.long 0x41009460
.long 0xff00fff0
.long 0
+ .long 0
b __arm946_setup
.long cpu_arch_name
.long cpu_elf_name
@@ -418,6 +419,6 @@ __arm946_proc_info:
.long arm946_processor_functions
.long 0
.long 0
- .long arm940_cache_fns
+ .long arm946_cache_fns
.size __arm946_proc_info, . - __arm946_proc_info
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 1f780b95c0f0..938986e412f1 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -22,7 +22,6 @@ static __always_inline bool arch_static_branch(struct jump_label_key *key)
asm goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
- ".align 4\n\t"
JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
".popsection \n\t"
: : "i" (key) : : l_yes);
@@ -41,7 +40,6 @@ struct jump_entry {
jump_label_t code;
jump_label_t target;
jump_label_t key;
- jump_label_t pad;
};
#endif /* _ASM_POWERPC_JUMP_LABEL_H */
diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h
index 6857af58b02e..bffd062adf79 100644
--- a/arch/powerpc/include/asm/kdump.h
+++ b/arch/powerpc/include/asm/kdump.h
@@ -3,17 +3,7 @@
#include <asm/page.h>
-/*
- * If CONFIG_RELOCATABLE is enabled we can place the kdump kernel anywhere.
- * To keep enough space in the RMO for the first stage kernel on 64bit, we
- * place it at 64MB. If CONFIG_RELOCATABLE is not enabled we must place
- * the second stage at 32MB.
- */
-#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC64)
-#define KDUMP_KERNELBASE 0x4000000
-#else
#define KDUMP_KERNELBASE 0x2000000
-#endif
/* How many bytes to reserve at zero for kdump. The reserve limit should
* be greater or equal to the trampoline's end address.
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index e8aaf6fce38b..559da199edb5 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1003,7 +1003,6 @@
#define PV_970 0x0039
#define PV_POWER5 0x003A
#define PV_POWER5p 0x003B
-#define PV_POWER7 0x003F
#define PV_970FX 0x003C
#define PV_POWER6 0x003E
#define PV_POWER7 0x003F
@@ -1024,13 +1023,16 @@
#define mtmsrd(v) __mtmsrd((v), 0)
#define mtmsr(v) mtmsrd(v)
#else
-#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v) : "memory")
+#define mtmsr(v) asm volatile("mtmsr %0" : \
+ : "r" ((unsigned long)(v)) \
+ : "memory")
#endif
#define mfspr(rn) ({unsigned long rval; \
asm volatile("mfspr %0," __stringify(rn) \
: "=r" (rval)); rval;})
-#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)\
+#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : \
+ : "r" ((unsigned long)(v)) \
: "memory")
#ifdef __powerpc64__
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 9fb933248ab6..fa44ff538861 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2051,7 +2051,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
static struct cpu_spec the_cpu_spec;
-static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s)
+static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
+ struct cpu_spec *s)
{
struct cpu_spec *t = &the_cpu_spec;
struct cpu_spec old;
@@ -2114,6 +2115,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s)
t->cpu_setup(offset, t);
}
#endif /* CONFIG_PPC64 || CONFIG_BOOKE */
+
+ return t;
}
struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
@@ -2124,10 +2127,8 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
s = PTRRELOC(s);
for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) {
- if ((pvr & s->pvr_mask) == s->pvr_value) {
- setup_cpu_spec(offset, s);
- return s;
- }
+ if ((pvr & s->pvr_mask) == s->pvr_value)
+ return setup_cpu_spec(offset, s);
}
BUG();
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index 1577434f4088..b25f6325fc70 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -117,6 +117,7 @@ void ioport_unmap(void __iomem *addr)
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
+#ifdef CONFIG_PCI
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
{
resource_size_t start = pci_resource_start(dev, bar);
@@ -146,3 +147,4 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
EXPORT_SYMBOL(pci_iomap);
EXPORT_SYMBOL(pci_iounmap);
+#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 6658a1589955..9ce1672afb59 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -136,12 +136,16 @@ void __init reserve_crashkernel(void)
crashk_res.start = KDUMP_KERNELBASE;
#else
if (!crashk_res.start) {
+#ifdef CONFIG_PPC64
/*
- * unspecified address, choose a region of specified size
- * can overlap with initrd (ignoring corruption when retained)
- * ppc64 requires kernel and some stacks to be in first segemnt
+ * On 64bit we split the RMO in half but cap it at half of
+ * a small SLB (128MB) since the crash kernel needs to place
+ * itself and some stacks to be in the first segment.
*/
+ crashk_res.start = min(0x80000000ULL, (ppc64_rma_size / 2));
+#else
crashk_res.start = KDUMP_KERNELBASE;
+#endif
}
crash_base = PAGE_ALIGN(crashk_res.start);
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c
index d05ae4204bbf..564c1d8bdb5c 100644
--- a/arch/powerpc/kernel/perf_callchain.c
+++ b/arch/powerpc/kernel/perf_callchain.c
@@ -154,8 +154,12 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
((unsigned long)ptr & 7))
return -EFAULT;
- if (!__get_user_inatomic(*ret, ptr))
+ pagefault_disable();
+ if (!__get_user_inatomic(*ret, ptr)) {
+ pagefault_enable();
return 0;
+ }
+ pagefault_enable();
return read_user_stack_slow(ptr, ret, 8);
}
@@ -166,8 +170,12 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
((unsigned long)ptr & 3))
return -EFAULT;
- if (!__get_user_inatomic(*ret, ptr))
+ pagefault_disable();
+ if (!__get_user_inatomic(*ret, ptr)) {
+ pagefault_enable();
return 0;
+ }
+ pagefault_enable();
return read_user_stack_slow(ptr, ret, 4);
}
@@ -294,11 +302,17 @@ static inline int current_is_64bit(void)
*/
static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
{
+ int rc;
+
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
((unsigned long)ptr & 3))
return -EFAULT;
- return __get_user_inatomic(*ret, ptr);
+ pagefault_disable();
+ rc = __get_user_inatomic(*ret, ptr);
+ pagefault_enable();
+
+ return rc;
}
static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index c016033ba78d..a909f4e9343b 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1020,7 +1020,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align)
}
if (addr == 0)
return 0;
- RELOC(alloc_bottom) = addr;
+ RELOC(alloc_bottom) = addr + size;
prom_debug(" -> %x\n", addr);
prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
@@ -1830,11 +1830,13 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
if (room > DEVTREE_CHUNK_SIZE)
room = DEVTREE_CHUNK_SIZE;
if (room < PAGE_SIZE)
- prom_panic("No memory for flatten_device_tree (no room)");
+ prom_panic("No memory for flatten_device_tree "
+ "(no room)\n");
chunk = alloc_up(room, 0);
if (chunk == 0)
- prom_panic("No memory for flatten_device_tree (claim failed)");
- *mem_end = RELOC(alloc_top);
+ prom_panic("No memory for flatten_device_tree "
+ "(claim failed)\n");
+ *mem_end = chunk + room;
}
ret = (void *)*mem_start;
@@ -2042,7 +2044,7 @@ static void __init flatten_device_tree(void)
/*
* Check how much room we have between alloc top & bottom (+/- a
- * few pages), crop to 4Mb, as this is our "chuck" size
+ * few pages), crop to 1MB, as this is our "chunk" size
*/
room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000;
if (room > DEVTREE_CHUNK_SIZE)
@@ -2053,7 +2055,7 @@ static void __init flatten_device_tree(void)
mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
if (mem_start == 0)
prom_panic("Can't allocate initial device-tree chunk\n");
- mem_end = RELOC(alloc_top);
+ mem_end = mem_start + room;
/* Get root of tree */
root = call_prom("peer", 1, 1, (phandle)0);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 6dd33581a228..de2950135e6e 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1251,7 +1251,7 @@ BEGIN_FTR_SECTION
reg = 0
.rept 32
li r6,reg*16+VCPU_VSRS
- stxvd2x reg,r6,r3
+ STXVD2X(reg,r6,r3)
reg = reg + 1
.endr
FTR_SECTION_ELSE
@@ -1313,7 +1313,7 @@ BEGIN_FTR_SECTION
reg = 0
.rept 32
li r7,reg*16+VCPU_VSRS
- lxvd2x reg,r7,r4
+ LXVD2X(reg,r7,r4)
reg = reg + 1
.endr
FTR_SECTION_ELSE
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index d0af7fb2f344..b9ba86191aed 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -24,7 +24,7 @@ source "arch/powerpc/platforms/wsp/Kconfig"
config KVM_GUEST
bool "KVM Guest support"
- default y
+ default n
---help---
This option enables various optimizations for running under the KVM
hypervisor. Overhead for the kernel when not running inside KVM should
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index e9190073bb97..0e8656370063 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -181,7 +181,7 @@ static void dtl_stop(struct dtl *dtl)
lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
- unregister_dtl(hwcpu, __pa(dtl->buf));
+ unregister_dtl(hwcpu);
}
static u64 dtl_current_index(struct dtl *dtl)
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index bc0288501f17..83a3ca2fd282 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -135,7 +135,7 @@ static void pseries_mach_cpu_die(void)
get_lppaca()->idle = 0;
if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
- unregister_slb_shadow(hwcpu, __pa(get_slb_shadow()));
+ unregister_slb_shadow(hwcpu);
/*
* Call to start_secondary_resume() will not return.
@@ -150,7 +150,7 @@ static void pseries_mach_cpu_die(void)
WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE);
set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
- unregister_slb_shadow(hwcpu, __pa(get_slb_shadow()));
+ unregister_slb_shadow(hwcpu);
rtas_stop_self();
/* Should never get here... */
diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c
index c829e6067d54..2c4dd1fb8333 100644
--- a/arch/powerpc/platforms/pseries/io_event_irq.c
+++ b/arch/powerpc/platforms/pseries/io_event_irq.c
@@ -212,17 +212,15 @@ static int __init ioei_init(void)
struct device_node *np;
ioei_check_exception_token = rtas_token("check-exception");
- if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE) {
- pr_warning("IO Event IRQ not supported on this system !\n");
+ if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE)
return -ENODEV;
- }
+
np = of_find_node_by_path("/event-sources/ibm,io-events");
if (np) {
request_event_sources_irqs(np, ioei_interrupt, "IO_EVENT");
+ pr_info("IBM I/O event interrupts enabled\n");
of_node_put(np);
} else {
- pr_err("io_event_irq: No ibm,io-events on system! "
- "IO Event interrupt disabled.\n");
return -ENODEV;
}
return 0;
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 54cf3a4aa16b..7d94bdc63d50 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -25,20 +25,30 @@ static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
{
/* Don't risk a hypervisor call if we're crashing */
if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) {
- unsigned long addr;
+ int ret;
+ int cpu = smp_processor_id();
+ int hwcpu = hard_smp_processor_id();
- addr = __pa(get_slb_shadow());
- if (unregister_slb_shadow(hard_smp_processor_id(), addr))
- printk("SLB shadow buffer deregistration of "
- "cpu %u (hw_cpu_id %d) failed\n",
- smp_processor_id(),
- hard_smp_processor_id());
+ if (get_lppaca()->dtl_enable_mask) {
+ ret = unregister_dtl(hwcpu);
+ if (ret) {
+ pr_err("WARNING: DTL deregistration for cpu "
+ "%d (hw %d) failed with %d\n",
+ cpu, hwcpu, ret);
+ }
+ }
+
+ ret = unregister_slb_shadow(hwcpu);
+ if (ret) {
+ pr_err("WARNING: SLB shadow buffer deregistration "
+ "for cpu %d (hw %d) failed with %d\n",
+ cpu, hwcpu, ret);
+ }
- addr = __pa(get_lppaca());
- if (unregister_vpa(hard_smp_processor_id(), addr)) {
- printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
- "failed\n", smp_processor_id(),
- hard_smp_processor_id());
+ ret = unregister_vpa(hwcpu);
+ if (ret) {
+ pr_err("WARNING: VPA deregistration for cpu %d "
+ "(hw %d) failed with %d\n", cpu, hwcpu, ret);
}
}
}
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index f7205d344efd..c9a29dae8c05 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -67,9 +67,8 @@ void vpa_init(int cpu)
ret = register_vpa(hwcpu, addr);
if (ret) {
- printk(KERN_ERR "WARNING: vpa_init: VPA registration for "
- "cpu %d (hw %d) of area %lx returns %ld\n",
- cpu, hwcpu, addr, ret);
+ pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
+ "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
return;
}
/*
@@ -80,10 +79,9 @@ void vpa_init(int cpu)
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
ret = register_slb_shadow(hwcpu, addr);
if (ret)
- printk(KERN_ERR
- "WARNING: vpa_init: SLB shadow buffer "
- "registration for cpu %d (hw %d) of area %lx "
- "returns %ld\n", cpu, hwcpu, addr, ret);
+ pr_err("WARNING: SLB shadow buffer registration for "
+ "cpu %d (hw %d) of area %lx failed with %ld\n",
+ cpu, hwcpu, addr, ret);
}
/*
@@ -100,8 +98,9 @@ void vpa_init(int cpu)
dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
ret = register_dtl(hwcpu, __pa(dtl));
if (ret)
- pr_warn("DTL registration failed for cpu %d (%ld)\n",
- cpu, ret);
+ pr_err("WARNING: DTL registration of cpu %d (hw %d) "
+ "failed with %ld\n", smp_processor_id(),
+ hwcpu, ret);
lppaca_of(cpu).dtl_enable_mask = 2;
}
}
@@ -204,7 +203,7 @@ static void pSeries_lpar_hptab_clear(void)
unsigned long ptel;
} ptes[4];
long lpar_rc;
- int i, j;
+ unsigned long i, j;
/* Read in batches of 4,
* invalidate only valid entries not in the VRMA
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index 4bf21207d7d3..41c24c146d6a 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -53,9 +53,9 @@ static inline long vpa_call(unsigned long flags, unsigned long cpu,
return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
}
-static inline long unregister_vpa(unsigned long cpu, unsigned long vpa)
+static inline long unregister_vpa(unsigned long cpu)
{
- return vpa_call(0x5, cpu, vpa);
+ return vpa_call(0x5, cpu, 0);
}
static inline long register_vpa(unsigned long cpu, unsigned long vpa)
@@ -63,9 +63,9 @@ static inline long register_vpa(unsigned long cpu, unsigned long vpa)
return vpa_call(0x1, cpu, vpa);
}
-static inline long unregister_slb_shadow(unsigned long cpu, unsigned long vpa)
+static inline long unregister_slb_shadow(unsigned long cpu)
{
- return vpa_call(0x7, cpu, vpa);
+ return vpa_call(0x7, cpu, 0);
}
static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
@@ -73,9 +73,9 @@ static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
return vpa_call(0x3, cpu, vpa);
}
-static inline long unregister_dtl(unsigned long cpu, unsigned long vpa)
+static inline long unregister_dtl(unsigned long cpu)
{
- return vpa_call(0x6, cpu, vpa);
+ return vpa_call(0x6, cpu, 0);
}
static inline long register_dtl(unsigned long cpu, unsigned long vpa)
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index d00e52926b71..0969fd98c4fa 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -324,8 +324,9 @@ static int alloc_dispatch_logs(void)
dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
ret = register_dtl(hard_smp_processor_id(), __pa(dtl));
if (ret)
- pr_warn("DTL registration failed for boot cpu %d (%d)\n",
- smp_processor_id(), ret);
+ pr_err("WARNING: DTL registration of cpu %d (hw %d) failed "
+ "with %d\n", smp_processor_id(),
+ hard_smp_processor_id(), ret);
get_paca()->lppaca_ptr->dtl_enable_mask = 2;
return 0;
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index a59ba96d2c21..dbfe96bc878a 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -655,8 +655,6 @@ struct ppc4xx_pciex_hwops
static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops;
-#ifdef CONFIG_44x
-
static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
unsigned int sdr_offset,
unsigned int mask,
@@ -688,6 +686,7 @@ static int __init ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port *port)
return 0;
}
+
static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port)
{
printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
@@ -718,6 +717,8 @@ static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port)
printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
}
+#ifdef CONFIG_44x
+
/* Check various reset bits of the 440SPe PCIe core */
static int __init ppc440spe_pciex_check_reset(struct device_node *np)
{
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 45fbb8f7f549..f88af2c2a561 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1590,6 +1590,7 @@ static __init int intel_pmu_init(void)
break;
case 42: /* SandyBridge */
+ case 45: /* SandyBridge, "Romely-EP" */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));