From 2d6f0c3ae609a73ce6753395daaab3204cd33918 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 29 Nov 2013 14:55:22 +1100 Subject: powerpc: Fix build break with PPC_EARLY_DEBUG_BOOTX=y A kernel configured with PPC_EARLY_DEBUG_BOOTX=y but PPC_PMAC=n and PPC_MAPLE=n will fail to link: btext.c:(.text+0x2d0fc): undefined reference to `.rmci_off' btext.c:(.text+0x2d214): undefined reference to `.rmci_on' Fix it by making the build of rmci_on/off() depend on PPC_EARLY_DEBUG_BOOTX, which also enable the only code that uses them. Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/misc_64.S | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index e59caf874d05..64bf8db12b15 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -246,8 +246,8 @@ _GLOBAL(__bswapdi2) or r3,r7,r9 blr -#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) +#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX _GLOBAL(rmci_on) sync isync @@ -277,6 +277,9 @@ _GLOBAL(rmci_off) isync sync blr +#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ + +#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) /* * Do an IO access in real mode -- cgit v1.2.3 From e641eb03ab2b0f065fa5e64b4202fb5b0441b427 Mon Sep 17 00:00:00 2001 From: Mahesh Salgaonkar Date: Mon, 9 Dec 2013 15:33:39 +0530 Subject: powerpc: Fix up the kdump base cap to 128M The current logic sets the kdump base to min of 2G or ppc64_rma_size/2. On PowerNV kernel the first memory block 'memory@0' can be very large, equal to the DIMM size with ppc64_rma_size value capped to 1G. Hence on PowerNV, kdump base is set to 512M resulting kdump to fail while allocating paca array. This is because, paca need its memory from RMA region capped at 256M (see allocate_pacas()). This patch lowers the kdump base cap to 128M so that kdump kernel can successfully get memory below 256M for paca allocation. Signed-off-by: Mahesh Salgaonkar Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/machine_kexec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 88a7fb458dfd..75d4f7340da8 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -148,7 +148,7 @@ void __init reserve_crashkernel(void) * a small SLB (128MB) since the crash kernel needs to place * itself and some stacks to be in the first segment. */ - crashk_res.start = min(0x80000000ULL, (ppc64_rma_size / 2)); + crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2)); #else crashk_res.start = KDUMP_KERNELBASE; #endif -- cgit v1.2.3 From 36aa1b180e7398e2bd27642760bfaa4ad8c65ab6 Mon Sep 17 00:00:00 2001 From: Ulrich Weigand Date: Thu, 12 Dec 2013 15:59:34 +1100 Subject: powerpc: PTRACE_PEEKUSR always returns FPR0 There is a bug in using ptrace to access FPRs via PTRACE_PEEKUSR / PTRACE_POKEUSR. In effect, trying to access any of the FPRs always really accesses FPR0, which does seriously break debugging :-) The problem seems to have been introduced by commit 3ad26e5c4459d (Merge branch 'for-kvm' into next). [ It is indeed a merge conflict between Paul's FPU/VSX state rework and my LE patches - Anton ] Signed-off-by: Ulrich Weigand Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/ptrace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 75fb40498b41..2e3d2bf536c5 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -1555,7 +1555,7 @@ long arch_ptrace(struct task_struct *child, long request, flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&tmp, &child->thread.fp_state.fpr, + memcpy(&tmp, &child->thread.TS_FPR(fpidx), sizeof(long)); else tmp = child->thread.fp_state.fpscr; @@ -1588,7 +1588,7 @@ long arch_ptrace(struct task_struct *child, long request, flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&child->thread.fp_state.fpr, &data, + memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long)); else child->thread.fp_state.fpscr = data; -- cgit v1.2.3 From 01666c8ee2b6afcd31de2064fbb7c097a75e5089 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 12 Dec 2013 15:59:35 +1100 Subject: powerpc: Fix endian issue in setup-common.c During on LE boot we see: Partition configured for 1073741824 cpus, operating system maximum is 2048. Clearly missing a byteswap here. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/setup-common.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index febc80445d25..bc76cc6b419c 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void) if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && (dn = of_find_node_by_path("/rtas"))) { int num_addr_cell, num_size_cell, maxcpus; - const unsigned int *ireg; + const __be32 *ireg; num_addr_cell = of_n_addr_cells(dn); num_size_cell = of_n_size_cells(dn); @@ -489,7 +489,7 @@ void __init smp_setup_cpu_maps(void) if (!ireg) goto out; - maxcpus = ireg[num_addr_cell + num_size_cell]; + maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell); /* Double maxcpus for processors which have SMT capability */ if (cpu_has_feature(CPU_FTR_SMT)) -- cgit v1.2.3 From f8a1883a833bbad8e6b5ec4f0918b7797e652d65 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 12 Dec 2013 15:59:36 +1100 Subject: powerpc: Fix topology core_id endian issue on LE builds cpu_to_core_id() is missing a byteswap: cat /sys/devices/system/cpu/cpu63/topology/core_id 201326592 Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/smp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index a3b64f3bf9a2..c1cf4a1522d9 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -580,7 +580,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) int cpu_to_core_id(int cpu) { struct device_node *np; - const int *reg; + const __be32 *reg; int id = -1; np = of_get_cpu_node(cpu, NULL); @@ -591,7 +591,7 @@ int cpu_to_core_id(int cpu) if (!reg) goto out; - id = *reg; + id = be32_to_cpup(reg); out: of_node_put(np); return id; -- cgit v1.2.3 From a29e30efa3a1b253fd1252731aa774a3544c5c3b Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 12 Dec 2013 15:59:41 +1100 Subject: powerpc: Fix endian issues in crash dump code A couple more device tree properties that need byte swapping. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/crash_dump.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 779a78c26435..11c1d069d920 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -124,15 +124,15 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) { unsigned long addr; - const u32 *basep, *sizep; + const __be32 *basep, *sizep; unsigned int rtas_start = 0, rtas_end = 0; basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); sizep = of_get_property(rtas.dev, "rtas-size", NULL); if (basep && sizep) { - rtas_start = *basep; - rtas_end = *basep + *sizep; + rtas_start = be32_to_cpup(basep); + rtas_end = rtas_start + be32_to_cpup(sizep); } for (addr = begin; addr < end; addr += PAGE_SIZE) { -- cgit v1.2.3 From 286e4f90a72c0b0621dde0294af6ed4b0baddabb Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 23 Dec 2013 12:19:51 +1100 Subject: powerpc: Align p_end p_end is an 8 byte value embedded in the text section. This means it is only 4 byte aligned when it should be 8 byte aligned. Fix this by adding an explicit alignment. This fixes an issue where POWER7 little endian builds with CONFIG_RELOCATABLE=y fail to boot. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/head_64.S | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 2ae41aba4053..fad2abdcbdfe 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -470,6 +470,7 @@ _STATIC(__after_prom_start) mtctr r8 bctr +.balign 8 p_end: .llong _end - _stext 4: /* Now copy the rest of the kernel up to _end */ -- cgit v1.2.3 From 7d4151b5098fb0bf7f6f8d1156e1ab9d83260580 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Sat, 28 Dec 2013 13:01:47 -0800 Subject: powerpc: Fix alignment of secondary cpu spin vars Commit 5c0484e25ec0 ('powerpc: Endian safe trampoline') resulted in losing proper alignment of the spinlock variables used when booting secondary CPUs, causing some quite odd issues with failing to boot on PA Semi-based systems. This showed itself on ppc64_defconfig, but not on pasemi_defconfig, so it had gone unnoticed when I initially tested the LE patch set. Fix is to add explicit alignment instead of relying on good luck. :) [ It appears that there is a different issue with PA Semi systems however this fix is definitely correct so applying anyway -- BenH ] Fixes: 5c0484e25ec0 ('powerpc: Endian safe trampoline') Reported-by: Christian Zigotzky Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=67811 Signed-off-by: Olof Johansson Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/head_64.S | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index fad2abdcbdfe..4f0946de2d5c 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1) * of the function that the cpu should jump to to continue * initialization. */ + .balign 8 .globl __secondary_hold_spinloop __secondary_hold_spinloop: .llong 0x0 -- cgit v1.2.3