summaryrefslogtreecommitdiff
path: root/arch/ppc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc/kernel')
-rw-r--r--arch/ppc/kernel/Makefile26
-rw-r--r--arch/ppc/kernel/align.c4
-rw-r--r--arch/ppc/kernel/asm-offsets.c3
-rw-r--r--arch/ppc/kernel/bitops.c126
-rw-r--r--arch/ppc/kernel/cpu_setup_6xx.S6
-rw-r--r--arch/ppc/kernel/cpu_setup_power4.S6
-rw-r--r--arch/ppc/kernel/cputable.c1041
-rw-r--r--arch/ppc/kernel/entry.S12
-rw-r--r--arch/ppc/kernel/fpu.S133
-rw-r--r--arch/ppc/kernel/head.S100
-rw-r--r--arch/ppc/kernel/head_44x.S32
-rw-r--r--arch/ppc/kernel/head_4xx.S68
-rw-r--r--arch/ppc/kernel/head_8xx.S42
-rw-r--r--arch/ppc/kernel/head_booke.h4
-rw-r--r--arch/ppc/kernel/head_fsl_booke.S47
-rw-r--r--arch/ppc/kernel/idle.c3
-rw-r--r--arch/ppc/kernel/irq.c3
-rw-r--r--arch/ppc/kernel/l2cr.S2
-rw-r--r--arch/ppc/kernel/machine_kexec.c2
-rw-r--r--arch/ppc/kernel/misc.S235
-rw-r--r--arch/ppc/kernel/pci.c33
-rw-r--r--arch/ppc/kernel/perfmon.c96
-rw-r--r--arch/ppc/kernel/perfmon_fsl_booke.c2
-rw-r--r--arch/ppc/kernel/ppc_ksyms.c39
-rw-r--r--arch/ppc/kernel/process.c142
-rw-r--r--arch/ppc/kernel/ptrace.c507
-rw-r--r--arch/ppc/kernel/setup.c39
-rw-r--r--arch/ppc/kernel/signal.c771
-rw-r--r--arch/ppc/kernel/smp.c22
-rw-r--r--arch/ppc/kernel/syscalls.c268
-rw-r--r--arch/ppc/kernel/time.c14
-rw-r--r--arch/ppc/kernel/traps.c50
-rw-r--r--arch/ppc/kernel/vecemu.c345
-rw-r--r--arch/ppc/kernel/vector.S217
-rw-r--r--arch/ppc/kernel/vmlinux.lds.S26
35 files changed, 449 insertions, 4017 deletions
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index b1457a8a9c0f..c610ca933a25 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for the linux kernel.
#
+ifneq ($(CONFIG_PPC_MERGE),y)
extra-$(CONFIG_PPC_STD_MMU) := head.o
extra-$(CONFIG_40x) := head_4xx.o
@@ -9,13 +10,12 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
extra-$(CONFIG_8xx) := head_8xx.o
extra-$(CONFIG_6xx) += idle_6xx.o
extra-$(CONFIG_POWER4) += idle_power4.o
-extra-$(CONFIG_PPC_FPU) += fpu.o
extra-y += vmlinux.lds
obj-y := entry.o traps.o irq.o idle.o time.o misc.o \
- process.o signal.o ptrace.o align.o \
- semaphore.o syscalls.o setup.o \
- cputable.o ppc_htab.o perfmon.o
+ process.o align.o \
+ setup.o \
+ ppc_htab.o
obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
obj-$(CONFIG_POWER4) += cpu_setup_power4.o
@@ -25,7 +25,6 @@ obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_KGDB) += ppc-stub.o
obj-$(CONFIG_SMP) += smp.o smp-tbsync.o
obj-$(CONFIG_TAU) += temp.o
-obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
ifndef CONFIG_E200
obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o
endif
@@ -35,3 +34,20 @@ ifndef CONFIG_MATH_EMULATION
obj-$(CONFIG_8xx) += softemu8xx.o
endif
+# These are here while we do the architecture merge
+
+else
+obj-y := irq.o idle.o \
+ align.o
+obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
+obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o
+obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_KGDB) += ppc-stub.o
+obj-$(CONFIG_TAU) += temp.o
+ifndef CONFIG_E200
+obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o
+endif
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+endif
diff --git a/arch/ppc/kernel/align.c b/arch/ppc/kernel/align.c
index ff81da9598d8..ab398c4b70b6 100644
--- a/arch/ppc/kernel/align.c
+++ b/arch/ppc/kernel/align.c
@@ -375,7 +375,7 @@ fix_alignment(struct pt_regs *regs)
#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
- cvt_fd(&data.f, &data.d, &current->thread.fpscr);
+ cvt_fd(&data.f, &data.d, &current->thread);
preempt_enable();
#else
return 0;
@@ -385,7 +385,7 @@ fix_alignment(struct pt_regs *regs)
#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
- cvt_df(&data.d, &data.f, &current->thread.fpscr);
+ cvt_df(&data.d, &data.f, &current->thread);
preempt_enable();
#else
return 0;
diff --git a/arch/ppc/kernel/asm-offsets.c b/arch/ppc/kernel/asm-offsets.c
index d9ad1d776d0e..968261d69572 100644
--- a/arch/ppc/kernel/asm-offsets.c
+++ b/arch/ppc/kernel/asm-offsets.c
@@ -130,10 +130,10 @@ main(void)
DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
+ DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
@@ -141,6 +141,7 @@ main(void)
DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
DEFINE(pbe_next, offsetof(struct pbe, next));
+ DEFINE(TASK_SIZE, TASK_SIZE);
DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
return 0;
}
diff --git a/arch/ppc/kernel/bitops.c b/arch/ppc/kernel/bitops.c
deleted file mode 100644
index 7f53d193968b..000000000000
--- a/arch/ppc/kernel/bitops.c
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (C) 1996 Paul Mackerras.
- */
-
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-
-/*
- * If the bitops are not inlined in bitops.h, they are defined here.
- * -- paulus
- */
-#if !__INLINE_BITOPS
-void set_bit(int nr, volatile void * addr)
-{
- unsigned long old;
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- __asm__ __volatile__(SMP_WMB "\n\
-1: lwarx %0,0,%3 \n\
- or %0,%0,%2 \n"
- PPC405_ERR77(0,%3)
-" stwcx. %0,0,%3 \n\
- bne 1b"
- SMP_MB
- : "=&r" (old), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc" );
-}
-
-void clear_bit(int nr, volatile void *addr)
-{
- unsigned long old;
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- __asm__ __volatile__(SMP_WMB "\n\
-1: lwarx %0,0,%3 \n\
- andc %0,%0,%2 \n"
- PPC405_ERR77(0,%3)
-" stwcx. %0,0,%3 \n\
- bne 1b"
- SMP_MB
- : "=&r" (old), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc");
-}
-
-void change_bit(int nr, volatile void *addr)
-{
- unsigned long old;
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- __asm__ __volatile__(SMP_WMB "\n\
-1: lwarx %0,0,%3 \n\
- xor %0,%0,%2 \n"
- PPC405_ERR77(0,%3)
-" stwcx. %0,0,%3 \n\
- bne 1b"
- SMP_MB
- : "=&r" (old), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc");
-}
-
-int test_and_set_bit(int nr, volatile void *addr)
-{
- unsigned int old, t;
- unsigned int mask = 1 << (nr & 0x1f);
- volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
-
- __asm__ __volatile__(SMP_WMB "\n\
-1: lwarx %0,0,%4 \n\
- or %1,%0,%3 \n"
- PPC405_ERR77(0,%4)
-" stwcx. %1,0,%4 \n\
- bne 1b"
- SMP_MB
- : "=&r" (old), "=&r" (t), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc");
-
- return (old & mask) != 0;
-}
-
-int test_and_clear_bit(int nr, volatile void *addr)
-{
- unsigned int old, t;
- unsigned int mask = 1 << (nr & 0x1f);
- volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
-
- __asm__ __volatile__(SMP_WMB "\n\
-1: lwarx %0,0,%4 \n\
- andc %1,%0,%3 \n"
- PPC405_ERR77(0,%4)
-" stwcx. %1,0,%4 \n\
- bne 1b"
- SMP_MB
- : "=&r" (old), "=&r" (t), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc");
-
- return (old & mask) != 0;
-}
-
-int test_and_change_bit(int nr, volatile void *addr)
-{
- unsigned int old, t;
- unsigned int mask = 1 << (nr & 0x1f);
- volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
-
- __asm__ __volatile__(SMP_WMB "\n\
-1: lwarx %0,0,%4 \n\
- xor %1,%0,%3 \n"
- PPC405_ERR77(0,%4)
-" stwcx. %1,0,%4 \n\
- bne 1b"
- SMP_MB
- : "=&r" (old), "=&r" (t), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc");
-
- return (old & mask) != 0;
-}
-#endif /* !__INLINE_BITOPS */
diff --git a/arch/ppc/kernel/cpu_setup_6xx.S b/arch/ppc/kernel/cpu_setup_6xx.S
index ba396438ede3..55ed7716636f 100644
--- a/arch/ppc/kernel/cpu_setup_6xx.S
+++ b/arch/ppc/kernel/cpu_setup_6xx.S
@@ -17,8 +17,6 @@
#include <asm/asm-offsets.h>
#include <asm/cache.h>
-_GLOBAL(__setup_cpu_601)
- blr
_GLOBAL(__setup_cpu_603)
b setup_common_caches
_GLOBAL(__setup_cpu_604)
@@ -292,10 +290,10 @@ _GLOBAL(__init_fpu_registers)
#define CS_SIZE 32
.data
- .balign L1_CACHE_LINE_SIZE
+ .balign L1_CACHE_BYTES
cpu_state_storage:
.space CS_SIZE
- .balign L1_CACHE_LINE_SIZE,0
+ .balign L1_CACHE_BYTES,0
.text
/* Called in normal context to backup CPU 0 state. This
diff --git a/arch/ppc/kernel/cpu_setup_power4.S b/arch/ppc/kernel/cpu_setup_power4.S
index 7e4fbb653724..d7bfd60e21fc 100644
--- a/arch/ppc/kernel/cpu_setup_power4.S
+++ b/arch/ppc/kernel/cpu_setup_power4.S
@@ -63,8 +63,6 @@ _GLOBAL(__970_cpu_preinit)
isync
blr
-_GLOBAL(__setup_cpu_power4)
- blr
_GLOBAL(__setup_cpu_ppc970)
mfspr r0,SPRN_HID0
li r11,5 /* clear DOZE and SLEEP */
@@ -88,10 +86,10 @@ _GLOBAL(__setup_cpu_ppc970)
#define CS_SIZE 32
.data
- .balign L1_CACHE_LINE_SIZE
+ .balign L1_CACHE_BYTES
cpu_state_storage:
.space CS_SIZE
- .balign L1_CACHE_LINE_SIZE,0
+ .balign L1_CACHE_BYTES,0
.text
/* Called in normal context to backup CPU 0 state. This
diff --git a/arch/ppc/kernel/cputable.c b/arch/ppc/kernel/cputable.c
deleted file mode 100644
index 6b76cf58d9e0..000000000000
--- a/arch/ppc/kernel/cputable.c
+++ /dev/null
@@ -1,1041 +0,0 @@
-/*
- * arch/ppc/kernel/cputable.c
- *
- * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/string.h>
-#include <linux/sched.h>
-#include <linux/threads.h>
-#include <linux/init.h>
-#include <asm/cputable.h>
-
-struct cpu_spec* cur_cpu_spec[NR_CPUS];
-
-extern void __setup_cpu_601(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_603(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_604(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_750(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_750cx(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_750fx(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_7400(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_7410(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_745x(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_power3(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_power4(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_ppc970(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-extern void __setup_cpu_generic(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
-
-#define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \
- !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
- !defined(CONFIG_BOOKE))
-
-/* This table only contains "desktop" CPUs, it need to be filled with embedded
- * ones as well...
- */
-#define COMMON_PPC (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \
- PPC_FEATURE_HAS_MMU)
-
-/* We only set the altivec features if the kernel was compiled with altivec
- * support
- */
-#ifdef CONFIG_ALTIVEC
-#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC
-#define PPC_FEATURE_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
-#else
-#define CPU_FTR_ALTIVEC_COMP 0
-#define PPC_FEATURE_ALTIVEC_COMP 0
-#endif
-
-/* We only set the spe features if the kernel was compiled with
- * spe support
- */
-#ifdef CONFIG_SPE
-#define PPC_FEATURE_SPE_COMP PPC_FEATURE_HAS_SPE
-#else
-#define PPC_FEATURE_SPE_COMP 0
-#endif
-
-/* We need to mark all pages as being coherent if we're SMP or we
- * have a 74[45]x and an MPC107 host bridge.
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE)
-#define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT
-#else
-#define CPU_FTR_COMMON 0
-#endif
-
-/* The powersave features NAP & DOZE seems to confuse BDI when
- debugging. So if a BDI is used, disable theses
- */
-#ifndef CONFIG_BDI_SWITCH
-#define CPU_FTR_MAYBE_CAN_DOZE CPU_FTR_CAN_DOZE
-#define CPU_FTR_MAYBE_CAN_NAP CPU_FTR_CAN_NAP
-#else
-#define CPU_FTR_MAYBE_CAN_DOZE 0
-#define CPU_FTR_MAYBE_CAN_NAP 0
-#endif
-
-struct cpu_spec cpu_specs[] = {
-#if CLASSIC_PPC
- { /* 601 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00010000,
- .cpu_name = "601",
- .cpu_features = CPU_FTR_COMMON | CPU_FTR_601 |
- CPU_FTR_HPTE_TABLE,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_601_INSTR |
- PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_601
- },
- { /* 603 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00030000,
- .cpu_name = "603",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
- CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603
- },
- { /* 603e */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00060000,
- .cpu_name = "603e",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
- CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603
- },
- { /* 603ev */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00070000,
- .cpu_name = "603ev",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
- CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603
- },
- { /* 604 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00040000,
- .cpu_name = "604",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 2,
- .cpu_setup = __setup_cpu_604
- },
- { /* 604e */
- .pvr_mask = 0xfffff000,
- .pvr_value = 0x00090000,
- .cpu_name = "604e",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_604
- },
- { /* 604r */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00090000,
- .cpu_name = "604r",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_604
- },
- { /* 604ev */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x000a0000,
- .cpu_name = "604ev",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_604
- },
- { /* 740/750 (0x4202, don't support TAU ?) */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x00084202,
- .cpu_name = "740/750",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
- CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_HPTE_TABLE |
- CPU_FTR_MAYBE_CAN_NAP,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_750
- },
- { /* 750CX (80100 and 8010x?) */
- .pvr_mask = 0xfffffff0,
- .pvr_value = 0x00080100,
- .cpu_name = "750CX",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
- CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
- CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_750cx
- },
- { /* 750CX (82201 and 82202) */
- .pvr_mask = 0xfffffff0,
- .pvr_value = 0x00082200,
- .cpu_name = "750CX",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
- CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
- CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_750cx
- },
- { /* 750CXe (82214) */
- .pvr_mask = 0xfffffff0,
- .pvr_value = 0x00082210,
- .cpu_name = "750CXe",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
- CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
- CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_750cx
- },
- { /* 750CXe "Gekko" (83214) */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x00083214,
- .cpu_name = "750CXe",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
- CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
- CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_750cx
- },
- { /* 745/755 */
- .pvr_mask = 0xfffff000,
- .pvr_value = 0x00083000,
- .cpu_name = "745/755",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
- CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
- CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_750
- },
- { /* 750FX rev 1.x */
- .pvr_mask = 0xffffff00,
- .pvr_value = 0x70000100,
- .cpu_name = "750FX",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
- CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
- CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
- CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_750
- },
- { /* 750FX rev 2.0 must disable HID0[DPM] */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x70000200,
- .cpu_name = "750FX",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
- CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
- CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
- CPU_FTR_NO_DPM,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_750
- },
- { /* 750FX (All revs except 2.0) */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x70000000,
- .cpu_name = "750FX",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
- CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
- CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
- CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_750fx
- },
- { /* 750GX */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x70020000,
- .cpu_name = "750GX",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB |
- CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE |
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_DUAL_PLL_750FX |
- CPU_FTR_HAS_HIGH_BATS,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_750fx
- },
- { /* 740/750 (L2CR bit need fixup for 740) */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00080000,
- .cpu_name = "740/750",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
- CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
- CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_750
- },
- { /* 7400 rev 1.1 ? (no TAU) */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x000c1101,
- .cpu_name = "7400 (1.1)",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
- CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
- CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_7400
- },
- { /* 7400 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x000c0000,
- .cpu_name = "7400",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
- CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
- CPU_FTR_MAYBE_CAN_NAP,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_7400
- },
- { /* 7410 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x800c0000,
- .cpu_name = "7410",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
- CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
- CPU_FTR_MAYBE_CAN_NAP,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_7410
- },
- { /* 7450 2.0 - no doze/nap */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x80000200,
- .cpu_name = "7450",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
- CPU_FTR_NEED_COHERENT,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
- },
- { /* 7450 2.1 */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x80000201,
- .cpu_name = "7450",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
- CPU_FTR_NEED_COHERENT,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
- },
- { /* 7450 2.3 and newer */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x80000000,
- .cpu_name = "7450",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
- },
- { /* 7455 rev 1.x */
- .pvr_mask = 0xffffff00,
- .pvr_value = 0x80010100,
- .cpu_name = "7455",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
- CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
- },
- { /* 7455 rev 2.0 */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x80010200,
- .cpu_name = "7455",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
- CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
- },
- { /* 7455 others */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x80010000,
- .cpu_name = "7455",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
- CPU_FTR_NEED_COHERENT,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
- },
- { /* 7447/7457 Rev 1.0 */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x80020100,
- .cpu_name = "7447/7457",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
- CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
- },
- { /* 7447/7457 Rev 1.1 */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x80020101,
- .cpu_name = "7447/7457",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
- CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
- },
- { /* 7447/7457 Rev 1.2 and later */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x80020000,
- .cpu_name = "7447/7457",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
- CPU_FTR_NEED_COHERENT,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
- },
- { /* 7447A */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x80030000,
- .cpu_name = "7447A",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
- CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
- },
- { /* 7448 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x80040000,
- .cpu_name = "7448",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
- CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
- CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
- },
- { /* 82xx (8240, 8245, 8260 are all 603e cores) */
- .pvr_mask = 0x7fff0000,
- .pvr_value = 0x00810000,
- .cpu_name = "82xx",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
- CPU_FTR_USE_TB,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603
- },
- { /* All G2_LE (603e core, plus some) have the same pvr */
- .pvr_mask = 0x7fff0000,
- .pvr_value = 0x00820000,
- .cpu_name = "G2_LE",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB |
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603
- },
- { /* e300 (a 603e core, plus some) on 83xx */
- .pvr_mask = 0x7fff0000,
- .pvr_value = 0x00830000,
- .cpu_name = "e300",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB |
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603
- },
- { /* default match, we assume split I/D cache & TB (non-601)... */
- .pvr_mask = 0x00000000,
- .pvr_value = 0x00000000,
- .cpu_name = "(generic PPC)",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_HPTE_TABLE,
- .cpu_user_features = COMMON_PPC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_generic
- },
-#endif /* CLASSIC_PPC */
-#ifdef CONFIG_PPC64BRIDGE
- { /* Power3 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00400000,
- .cpu_name = "Power3 (630)",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_HPTE_TABLE,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3
- },
- { /* Power3+ */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00410000,
- .cpu_name = "Power3 (630+)",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_HPTE_TABLE,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3
- },
- { /* I-star */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00360000,
- .cpu_name = "I-star",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_HPTE_TABLE,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3
- },
- { /* S-star */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00370000,
- .cpu_name = "S-star",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_HPTE_TABLE,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 8,
- .cpu_setup = __setup_cpu_power3
- },
-#endif /* CONFIG_PPC64BRIDGE */
-#ifdef CONFIG_POWER4
- { /* Power4 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00350000,
- .cpu_name = "Power4",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_HPTE_TABLE,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 8,
- .cpu_setup = __setup_cpu_power4
- },
- { /* PPC970 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00390000,
- .cpu_name = "PPC970",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_HPTE_TABLE |
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_MAYBE_CAN_NAP,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_64 |
- PPC_FEATURE_ALTIVEC_COMP,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 8,
- .cpu_setup = __setup_cpu_ppc970
- },
- { /* PPC970FX */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x003c0000,
- .cpu_name = "PPC970FX",
- .cpu_features = CPU_FTR_COMMON |
- CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_HPTE_TABLE |
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_MAYBE_CAN_NAP,
- .cpu_user_features = COMMON_PPC | PPC_FEATURE_64 |
- PPC_FEATURE_ALTIVEC_COMP,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 8,
- .cpu_setup = __setup_cpu_ppc970
- },
-#endif /* CONFIG_POWER4 */
-#ifdef CONFIG_8xx
- { /* 8xx */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00500000,
- .cpu_name = "8xx",
- /* CPU_FTR_MAYBE_CAN_DOZE is possible,
- * if the 8xx code is there.... */
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .icache_bsize = 16,
- .dcache_bsize = 16,
- },
-#endif /* CONFIG_8xx */
-#ifdef CONFIG_40x
- { /* 403GC */
- .pvr_mask = 0xffffff00,
- .pvr_value = 0x00200200,
- .cpu_name = "403GC",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .icache_bsize = 16,
- .dcache_bsize = 16,
- },
- { /* 403GCX */
- .pvr_mask = 0xffffff00,
- .pvr_value = 0x00201400,
- .cpu_name = "403GCX",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
- .icache_bsize = 16,
- .dcache_bsize = 16,
- },
- { /* 403G ?? */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00200000,
- .cpu_name = "403G ??",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .icache_bsize = 16,
- .dcache_bsize = 16,
- },
- { /* 405GP */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x40110000,
- .cpu_name = "405GP",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- { /* STB 03xxx */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x40130000,
- .cpu_name = "STB03xxx",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- { /* STB 04xxx */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x41810000,
- .cpu_name = "STB04xxx",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- { /* NP405L */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x41610000,
- .cpu_name = "NP405L",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- { /* NP4GS3 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x40B10000,
- .cpu_name = "NP4GS3",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- { /* NP405H */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x41410000,
- .cpu_name = "NP405H",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- { /* 405GPr */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x50910000,
- .cpu_name = "405GPr",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- { /* STBx25xx */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x51510000,
- .cpu_name = "STBx25xx",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- { /* 405LP */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x41F10000,
- .cpu_name = "405LP",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- { /* Xilinx Virtex-II Pro */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x20010000,
- .cpu_name = "Virtex-II Pro",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- { /* 405EP */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x51210000,
- .cpu_name = "405EP",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
-
-#endif /* CONFIG_40x */
-#ifdef CONFIG_44x
- {
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x40000850,
- .cpu_name = "440EP Rev. A",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = COMMON_PPC, /* 440EP has an FPU */
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- {
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x400008d3,
- .cpu_name = "440EP Rev. B",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = COMMON_PPC, /* 440EP has an FPU */
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- { /* 440GP Rev. B */
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x40000440,
- .cpu_name = "440GP Rev. B",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- { /* 440GP Rev. C */
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x40000481,
- .cpu_name = "440GP Rev. C",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- { /* 440GX Rev. A */
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x50000850,
- .cpu_name = "440GX Rev. A",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- { /* 440GX Rev. B */
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x50000851,
- .cpu_name = "440GX Rev. B",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- { /* 440GX Rev. C */
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x50000892,
- .cpu_name = "440GX Rev. C",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- { /* 440GX Rev. F */
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x50000894,
- .cpu_name = "440GX Rev. F",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
- { /* 440SP Rev. A */
- .pvr_mask = 0xff000fff,
- .pvr_value = 0x53000891,
- .cpu_name = "440SP Rev. A",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- },
-#endif /* CONFIG_44x */
-#ifdef CONFIG_FSL_BOOKE
- { /* e200z5 */
- .pvr_mask = 0xfff00000,
- .pvr_value = 0x81000000,
- .cpu_name = "e200z5",
- /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
- .cpu_features = CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_EFP_SINGLE |
- PPC_FEATURE_UNIFIED_CACHE,
- .dcache_bsize = 32,
- },
- { /* e200z6 */
- .pvr_mask = 0xfff00000,
- .pvr_value = 0x81100000,
- .cpu_name = "e200z6",
- /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
- .cpu_features = CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
- PPC_FEATURE_HAS_EFP_SINGLE |
- PPC_FEATURE_UNIFIED_CACHE,
- .dcache_bsize = 32,
- },
- { /* e500 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x80200000,
- .cpu_name = "e500",
- /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
- PPC_FEATURE_HAS_EFP_SINGLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- },
- { /* e500v2 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x80210000,
- .cpu_name = "e500v2",
- /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB | CPU_FTR_BIG_PHYS,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
- PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_HAS_EFP_DOUBLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- },
-#endif
-#if !CLASSIC_PPC
- { /* default match */
- .pvr_mask = 0x00000000,
- .pvr_value = 0x00000000,
- .cpu_name = "(generic PPC)",
- .cpu_features = CPU_FTR_COMMON,
- .cpu_user_features = PPC_FEATURE_32,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- }
-#endif /* !CLASSIC_PPC */
-};
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index 03d4886869f3..f044edbb454f 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -200,9 +200,8 @@ _GLOBAL(DoSyscall)
bl do_show_syscall
#endif /* SHOW_SYSCALLS */
rlwinm r10,r1,0,0,18 /* current_thread_info() */
- lwz r11,TI_LOCAL_FLAGS(r10)
- rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR
- stw r11,TI_LOCAL_FLAGS(r10)
+ li r11,0
+ stb r11,TI_SC_NOERR(r10)
lwz r11,TI_FLAGS(r10)
andi. r11,r11,_TIF_SYSCALL_T_OR_A
bne- syscall_dotrace
@@ -227,8 +226,8 @@ ret_from_syscall:
cmplw 0,r3,r11
rlwinm r12,r1,0,0,18 /* current_thread_info() */
blt+ 30f
- lwz r11,TI_LOCAL_FLAGS(r12)
- andi. r11,r11,_TIFL_FORCE_NOERROR
+ lbz r11,TI_SC_NOERR(r12)
+ cmpwi r11,0
bne 30f
neg r3,r3
lwz r10,_CCR(r1) /* Set SO bit in CR */
@@ -633,7 +632,8 @@ sigreturn_exit:
rlwinm r12,r1,0,0,18 /* current_thread_info() */
lwz r9,TI_FLAGS(r12)
andi. r0,r9,_TIF_SYSCALL_T_OR_A
- bnel- do_syscall_trace_leave
+ beq+ ret_from_except_full
+ bl do_syscall_trace_leave
/* fall through */
.globl ret_from_except_full
diff --git a/arch/ppc/kernel/fpu.S b/arch/ppc/kernel/fpu.S
deleted file mode 100644
index 665d7d34304c..000000000000
--- a/arch/ppc/kernel/fpu.S
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * FPU support code, moved here from head.S so that it can be used
- * by chips which use other head-whatever.S files.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/config.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/mmu.h>
-#include <asm/pgtable.h>
-#include <asm/cputable.h>
-#include <asm/cache.h>
-#include <asm/thread_info.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-
-/*
- * This task wants to use the FPU now.
- * On UP, disable FP for the task which had the FPU previously,
- * and save its floating-point registers in its thread_struct.
- * Load up this task's FP registers from its thread_struct,
- * enable the FPU for the current task and return to the task.
- */
- .globl load_up_fpu
-load_up_fpu:
- mfmsr r5
- ori r5,r5,MSR_FP
-#ifdef CONFIG_PPC64BRIDGE
- clrldi r5,r5,1 /* turn off 64-bit mode */
-#endif /* CONFIG_PPC64BRIDGE */
- SYNC
- MTMSRD(r5) /* enable use of fpu now */
- isync
-/*
- * For SMP, we don't do lazy FPU switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another. Instead we call giveup_fpu in switch_to.
- */
-#ifndef CONFIG_SMP
- tophys(r6,0) /* get __pa constant */
- addis r3,r6,last_task_used_math@ha
- lwz r4,last_task_used_math@l(r3)
- cmpwi 0,r4,0
- beq 1f
- add r4,r4,r6
- addi r4,r4,THREAD /* want last_task_used_math->thread */
- SAVE_32FPRS(0, r4)
- mffs fr0
- stfd fr0,THREAD_FPSCR-4(r4)
- lwz r5,PT_REGS(r4)
- add r5,r5,r6
- lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- li r10,MSR_FP|MSR_FE0|MSR_FE1
- andc r4,r4,r10 /* disable FP for previous task */
- stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
- /* enable use of FP after return */
- mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
- lwz r4,THREAD_FPEXC_MODE(r5)
- ori r9,r9,MSR_FP /* enable FP for current */
- or r9,r9,r4
- lfd fr0,THREAD_FPSCR-4(r5)
- mtfsf 0xff,fr0
- REST_32FPRS(0, r5)
-#ifndef CONFIG_SMP
- subi r4,r5,THREAD
- sub r4,r4,r6
- stw r4,last_task_used_math@l(r3)
-#endif /* CONFIG_SMP */
- /* restore registers and return */
- /* we haven't used ctr or xer or lr */
- b fast_exception_return
-
-/*
- * FP unavailable trap from kernel - print a message, but let
- * the task use FP in the kernel until it returns to user mode.
- */
- .globl KernelFP
-KernelFP:
- lwz r3,_MSR(r1)
- ori r3,r3,MSR_FP
- stw r3,_MSR(r1) /* enable use of FP after return */
- lis r3,86f@h
- ori r3,r3,86f@l
- mr r4,r2 /* current */
- lwz r5,_NIP(r1)
- bl printk
- b ret_from_except
-86: .string "floating point used in kernel (task=%p, pc=%x)\n"
- .align 4,0
-
-/*
- * giveup_fpu(tsk)
- * Disable FP for the task given as the argument,
- * and save the floating-point registers in its thread_struct.
- * Enables the FPU for use in the kernel on return.
- */
- .globl giveup_fpu
-giveup_fpu:
- mfmsr r5
- ori r5,r5,MSR_FP
- SYNC_601
- ISYNC_601
- MTMSRD(r5) /* enable use of fpu now */
- SYNC_601
- isync
- cmpwi 0,r3,0
- beqlr- /* if no previous owner, done */
- addi r3,r3,THREAD /* want THREAD of task */
- lwz r5,PT_REGS(r3)
- cmpwi 0,r5,0
- SAVE_32FPRS(0, r3)
- mffs fr0
- stfd fr0,THREAD_FPSCR-4(r3)
- beq 1f
- lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- li r3,MSR_FP|MSR_FE0|MSR_FE1
- andc r4,r4,r3 /* disable FP for previous task */
- stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#ifndef CONFIG_SMP
- li r5,0
- lis r4,last_task_used_math@ha
- stw r5,last_task_used_math@l(r4)
-#endif /* CONFIG_SMP */
- blr
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
index 1960fb8c259c..c5a890dca9cf 100644
--- a/arch/ppc/kernel/head.S
+++ b/arch/ppc/kernel/head.S
@@ -349,12 +349,12 @@ i##n: \
/* System reset */
/* core99 pmac starts the seconary here by changing the vector, and
- putting it back to what it was (UnknownException) when done. */
+ putting it back to what it was (unknown_exception) when done. */
#if defined(CONFIG_GEMINI) && defined(CONFIG_SMP)
. = 0x100
b __secondary_start_gemini
#else
- EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD)
+ EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
#endif
/* Machine check */
@@ -389,7 +389,7 @@ i##n: \
cmpwi cr1,r4,0
bne cr1,1f
#endif
- EXC_XFER_STD(0x200, MachineCheckException)
+ EXC_XFER_STD(0x200, machine_check_exception)
#ifdef CONFIG_PPC_CHRP
1: b machine_check_in_rtas
#endif
@@ -456,10 +456,10 @@ Alignment:
mfspr r5,SPRN_DSISR
stw r5,_DSISR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_EE(0x600, AlignmentException)
+ EXC_XFER_EE(0x600, alignment_exception)
/* Program check exception */
- EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD)
+ EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
/* Floating-point unavailable */
. = 0x800
@@ -467,13 +467,13 @@ FPUnavailable:
EXCEPTION_PROLOG
bne load_up_fpu /* if from user, just load it up */
addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_EE_LITE(0x800, KernelFP)
+ EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
/* Decrementer */
EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
- EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE)
- EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
/* System call */
. = 0xc00
@@ -482,8 +482,8 @@ SystemCall:
EXC_XFER_EE_LITE(0xc00, DoSyscall)
/* Single step - not used on 601 */
- EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD)
- EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
+ EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
/*
* The Altivec unavailable trap is at 0x0f20. Foo.
@@ -502,7 +502,7 @@ SystemCall:
Trap_0f:
EXCEPTION_PROLOG
addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_EE(0xf00, UnknownException)
+ EXC_XFER_EE(0xf00, unknown_exception)
/*
* Handle TLB miss for instruction on 603/603e.
@@ -702,44 +702,44 @@ DataStoreTLBMiss:
rfi
#ifndef CONFIG_ALTIVEC
-#define AltivecAssistException UnknownException
+#define altivec_assist_exception unknown_exception
#endif
- EXCEPTION(0x1300, Trap_13, InstructionBreakpoint, EXC_XFER_EE)
+ EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
- EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
#ifdef CONFIG_POWER4
- EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1700, Trap_17, AltivecAssistException, EXC_XFER_EE)
+ EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1700, Trap_17, altivec_assist_exception, EXC_XFER_EE)
EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD)
#else /* !CONFIG_POWER4 */
- EXCEPTION(0x1600, Trap_16, AltivecAssistException, EXC_XFER_EE)
+ EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
- EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
#endif /* CONFIG_POWER4 */
- EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
- EXCEPTION(0x2100, Trap_21, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x2200, Trap_22, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x2300, Trap_23, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x2400, Trap_24, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x2500, Trap_25, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x2600, Trap_26, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x2700, Trap_27, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x2800, Trap_28, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x2900, Trap_29, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x2a00, Trap_2a, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x2b00, Trap_2b, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x2c00, Trap_2c, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x2d00, Trap_2d, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x2e00, Trap_2e, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x2f00, MOLTrampoline, UnknownException, EXC_XFER_EE_LITE)
+ EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE)
.globl mol_trampoline
.set mol_trampoline, i0x2f00
@@ -751,7 +751,7 @@ AltiVecUnavailable:
#ifdef CONFIG_ALTIVEC
bne load_up_altivec /* if from user, just load it up */
#endif /* CONFIG_ALTIVEC */
- EXC_XFER_EE_LITE(0xf20, AltivecUnavailException)
+ EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
#ifdef CONFIG_PPC64BRIDGE
DataAccess:
@@ -767,12 +767,12 @@ DataSegment:
addi r3,r1,STACK_FRAME_OVERHEAD
mfspr r4,SPRN_DAR
stw r4,_DAR(r11)
- EXC_XFER_STD(0x380, UnknownException)
+ EXC_XFER_STD(0x380, unknown_exception)
InstructionSegment:
EXCEPTION_PROLOG
addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_STD(0x480, UnknownException)
+ EXC_XFER_STD(0x480, unknown_exception)
#endif /* CONFIG_PPC64BRIDGE */
#ifdef CONFIG_ALTIVEC
@@ -804,7 +804,7 @@ load_up_altivec:
beq 1f
add r4,r4,r6
addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
- SAVE_32VR(0,r10,r4)
+ SAVE_32VRS(0,r10,r4)
mfvscr vr0
li r10,THREAD_VSCR
stvx vr0,r10,r4
@@ -824,7 +824,7 @@ load_up_altivec:
stw r4,THREAD_USED_VR(r5)
lvx vr0,r10,r5
mtvscr vr0
- REST_32VR(0,r10,r5)
+ REST_32VRS(0,r10,r5)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
sub r4,r4,r6
@@ -870,7 +870,7 @@ giveup_altivec:
addi r3,r3,THREAD /* want THREAD of task */
lwz r5,PT_REGS(r3)
cmpwi 0,r5,0
- SAVE_32VR(0, r4, r3)
+ SAVE_32VRS(0, r4, r3)
mfvscr vr0
li r4,THREAD_VSCR
stvx vr0,r4,r3
@@ -916,7 +916,7 @@ relocate_kernel:
copy_and_flush:
addi r5,r5,-4
addi r6,r6,-4
-4: li r0,L1_CACHE_LINE_SIZE/4
+4: li r0,L1_CACHE_BYTES/4
mtctr r0
3: addi r6,r6,4 /* copy a cache line */
lwzx r0,r6,r4
@@ -1059,7 +1059,6 @@ __secondary_start:
lis r3,-KERNELBASE@h
mr r4,r24
- bl identify_cpu
bl call_setup_cpu /* Call setup_cpu for this CPU */
#ifdef CONFIG_6xx
lis r3,-KERNELBASE@h
@@ -1109,11 +1108,6 @@ __secondary_start:
* Those generic dummy functions are kept for CPUs not
* included in CONFIG_6xx
*/
-_GLOBAL(__setup_cpu_power3)
- blr
-_GLOBAL(__setup_cpu_generic)
- blr
-
#if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4)
_GLOBAL(__save_cpu_setup)
blr
diff --git a/arch/ppc/kernel/head_44x.S b/arch/ppc/kernel/head_44x.S
index 599245b0407e..8b49679fad54 100644
--- a/arch/ppc/kernel/head_44x.S
+++ b/arch/ppc/kernel/head_44x.S
@@ -309,13 +309,13 @@ skpinv: addi r4,r4,1 /* Increment */
interrupt_base:
/* Critical Input Interrupt */
- CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException)
+ CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
/* Machine Check Interrupt */
#ifdef CONFIG_440A
- MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
+ MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
#else
- CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
+ CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
#endif
/* Data Storage Interrupt */
@@ -442,7 +442,7 @@ interrupt_base:
#ifdef CONFIG_PPC_FPU
FP_UNAVAILABLE_EXCEPTION
#else
- EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
#endif
/* System Call Interrupt */
@@ -451,21 +451,21 @@ interrupt_base:
EXC_XFER_EE_LITE(0x0c00, DoSyscall)
/* Auxillary Processor Unavailable Interrupt */
- EXCEPTION(0x2020, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
/* Decrementer Interrupt */
DECREMENTER_EXCEPTION
/* Fixed Internal Timer Interrupt */
/* TODO: Add FIT support */
- EXCEPTION(0x1010, FixedIntervalTimer, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
/* Watchdog Timer Interrupt */
/* TODO: Add watchdog support */
#ifdef CONFIG_BOOKE_WDT
CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
#else
- CRITICAL_EXCEPTION(0x1020, WatchdogTimer, UnknownException)
+ CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
#endif
/* Data TLB Error Interrupt */
@@ -743,14 +743,18 @@ _GLOBAL(set_context)
* goes at the beginning of the data segment, which is page-aligned.
*/
.data
-_GLOBAL(sdata)
-_GLOBAL(empty_zero_page)
+ .align 12
+ .globl sdata
+sdata:
+ .globl empty_zero_page
+empty_zero_page:
.space 4096
/*
* To support >32-bit physical addresses, we use an 8KB pgdir.
*/
-_GLOBAL(swapper_pg_dir)
+ .globl swapper_pg_dir
+swapper_pg_dir:
.space 8192
/* Reserved 4k for the critical exception stack & 4k for the machine
@@ -759,13 +763,15 @@ _GLOBAL(swapper_pg_dir)
.align 12
exception_stack_bottom:
.space BOOKE_EXCEPTION_STACK_SIZE
-_GLOBAL(exception_stack_top)
+ .globl exception_stack_top
+exception_stack_top:
/*
* This space gets a copy of optional info passed to us by the bootstrap
* which is used to pass parameters into the kernel like root=/dev/sda1, etc.
*/
-_GLOBAL(cmd_line)
+ .globl cmd_line
+cmd_line:
.space 512
/*
@@ -774,5 +780,3 @@ _GLOBAL(cmd_line)
*/
abatron_pteptrs:
.space 8
-
-
diff --git a/arch/ppc/kernel/head_4xx.S b/arch/ppc/kernel/head_4xx.S
index 8562b807b37c..10c261c67021 100644
--- a/arch/ppc/kernel/head_4xx.S
+++ b/arch/ppc/kernel/head_4xx.S
@@ -245,12 +245,12 @@ label:
/*
* 0x0100 - Critical Interrupt Exception
*/
- CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, UnknownException)
+ CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception)
/*
* 0x0200 - Machine Check Exception
*/
- CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
+ CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
/*
* 0x0300 - Data Storage Exception
@@ -405,7 +405,7 @@ label:
mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */
stw r4,_DEAR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_EE(0x600, AlignmentException)
+ EXC_XFER_EE(0x600, alignment_exception)
/* 0x0700 - Program Exception */
START_EXCEPTION(0x0700, ProgramCheck)
@@ -413,21 +413,21 @@ label:
mfspr r4,SPRN_ESR /* Grab the ESR and save it */
stw r4,_ESR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_STD(0x700, ProgramCheckException)
+ EXC_XFER_STD(0x700, program_check_exception)
- EXCEPTION(0x0800, Trap_08, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x0900, Trap_09, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x0A00, Trap_0A, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x0B00, Trap_0B, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE)
/* 0x0C00 - System Call Exception */
START_EXCEPTION(0x0C00, SystemCall)
NORMAL_EXCEPTION_PROLOG
EXC_XFER_EE_LITE(0xc00, DoSyscall)
- EXCEPTION(0x0D00, Trap_0D, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x0E00, Trap_0E, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x0F00, Trap_0F, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE)
/* 0x1000 - Programmable Interval Timer (PIT) Exception */
START_EXCEPTION(0x1000, Decrementer)
@@ -444,14 +444,14 @@ label:
/* 0x1010 - Fixed Interval Timer (FIT) Exception
*/
- STND_EXCEPTION(0x1010, FITException, UnknownException)
+ STND_EXCEPTION(0x1010, FITException, unknown_exception)
/* 0x1020 - Watchdog Timer (WDT) Exception
*/
#ifdef CONFIG_BOOKE_WDT
CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException)
#else
- CRITICAL_EXCEPTION(0x1020, WDTException, UnknownException)
+ CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception)
#endif
#endif
@@ -656,25 +656,25 @@ label:
mfspr r10, SPRN_SPRG0
b InstructionAccess
- EXCEPTION(0x1300, Trap_13, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1400, Trap_14, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
#ifdef CONFIG_IBM405_ERR51
/* 405GP errata 51 */
START_EXCEPTION(0x1700, Trap_17)
b DTLBMiss
#else
- EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
#endif
- EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1A00, Trap_1A, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1B00, Trap_1B, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1C00, Trap_1C, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1D00, Trap_1D, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1E00, Trap_1E, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1F00, Trap_1F, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE)
/* Check for a single step debug exception while in an exception
* handler before state has been saved. This is to catch the case
@@ -988,10 +988,14 @@ _GLOBAL(set_context)
* goes at the beginning of the data segment, which is page-aligned.
*/
.data
-_GLOBAL(sdata)
-_GLOBAL(empty_zero_page)
+ .align 12
+ .globl sdata
+sdata:
+ .globl empty_zero_page
+empty_zero_page:
.space 4096
-_GLOBAL(swapper_pg_dir)
+ .globl swapper_pg_dir
+swapper_pg_dir:
.space 4096
@@ -1001,12 +1005,14 @@ _GLOBAL(swapper_pg_dir)
exception_stack_bottom:
.space 4096
critical_stack_top:
-_GLOBAL(exception_stack_top)
+ .globl exception_stack_top
+exception_stack_top:
/* This space gets a copy of optional info passed to us by the bootstrap
* which is used to pass parameters into the kernel like root=/dev/sda1, etc.
*/
-_GLOBAL(cmd_line)
+ .globl cmd_line
+cmd_line:
.space 512
/* Room for two PTE pointers, usually the kernel and current user pointers
diff --git a/arch/ppc/kernel/head_8xx.S b/arch/ppc/kernel/head_8xx.S
index cb1a3a54a026..de0978742221 100644
--- a/arch/ppc/kernel/head_8xx.S
+++ b/arch/ppc/kernel/head_8xx.S
@@ -203,7 +203,7 @@ i##n: \
ret_from_except)
/* System reset */
- EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD)
+ EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
/* Machine check */
. = 0x200
@@ -214,7 +214,7 @@ MachineCheck:
mfspr r5,SPRN_DSISR
stw r5,_DSISR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_STD(0x200, MachineCheckException)
+ EXC_XFER_STD(0x200, machine_check_exception)
/* Data access exception.
* This is "never generated" by the MPC8xx. We jump to it for other
@@ -252,20 +252,20 @@ Alignment:
mfspr r5,SPRN_DSISR
stw r5,_DSISR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_EE(0x600, AlignmentException)
+ EXC_XFER_EE(0x600, alignment_exception)
/* Program check exception */
- EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD)
+ EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
/* No FPU on MPC8xx. This exception is not supposed to happen.
*/
- EXCEPTION(0x800, FPUnavailable, UnknownException, EXC_XFER_STD)
+ EXCEPTION(0x800, FPUnavailable, unknown_exception, EXC_XFER_STD)
/* Decrementer */
EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
- EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE)
- EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
/* System call */
. = 0xc00
@@ -274,9 +274,9 @@ SystemCall:
EXC_XFER_EE_LITE(0xc00, DoSyscall)
/* Single step - not used on 601 */
- EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD)
- EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE)
- EXCEPTION(0xf00, Trap_0f, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
+ EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_EE)
/* On the MPC8xx, this is a software emulation interrupt. It occurs
* for all unimplemented and illegal instructions.
@@ -540,22 +540,22 @@ DataTLBError:
#endif
b DataAccess
- EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
/* On the MPC8xx, these next four traps are used for development
* support of breakpoints and such. Someday I will get around to
* using them.
*/
- EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE)
- EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
. = 0x2000
diff --git a/arch/ppc/kernel/head_booke.h b/arch/ppc/kernel/head_booke.h
index 9342acf12e72..aeb349b47af3 100644
--- a/arch/ppc/kernel/head_booke.h
+++ b/arch/ppc/kernel/head_booke.h
@@ -335,7 +335,7 @@ label:
mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \
stw r4,_DEAR(r11); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_EE(0x0600, AlignmentException)
+ EXC_XFER_EE(0x0600, alignment_exception)
#define PROGRAM_EXCEPTION \
START_EXCEPTION(Program) \
@@ -343,7 +343,7 @@ label:
mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \
stw r4,_ESR(r11); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_STD(0x0700, ProgramCheckException)
+ EXC_XFER_STD(0x0700, program_check_exception)
#define DECREMENTER_EXCEPTION \
START_EXCEPTION(Decrementer) \
diff --git a/arch/ppc/kernel/head_fsl_booke.S b/arch/ppc/kernel/head_fsl_booke.S
index 8e52e8408316..5063c603fad4 100644
--- a/arch/ppc/kernel/head_fsl_booke.S
+++ b/arch/ppc/kernel/head_fsl_booke.S
@@ -426,14 +426,14 @@ skpinv: addi r6,r6,1 /* Increment */
interrupt_base:
/* Critical Input Interrupt */
- CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException)
+ CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
/* Machine Check Interrupt */
#ifdef CONFIG_E200
/* no RFMCI, MCSRRs on E200 */
- CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
+ CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
#else
- MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
+ MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
#endif
/* Data Storage Interrupt */
@@ -542,9 +542,9 @@ interrupt_base:
#else
#ifdef CONFIG_E200
/* E200 treats 'normal' floating point instructions as FP Unavail exception */
- EXCEPTION(0x0800, FloatingPointUnavailable, ProgramCheckException, EXC_XFER_EE)
+ EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
#else
- EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
#endif
#endif
@@ -554,20 +554,20 @@ interrupt_base:
EXC_XFER_EE_LITE(0x0c00, DoSyscall)
/* Auxillary Processor Unavailable Interrupt */
- EXCEPTION(0x2900, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
/* Decrementer Interrupt */
DECREMENTER_EXCEPTION
/* Fixed Internal Timer Interrupt */
/* TODO: Add FIT support */
- EXCEPTION(0x3100, FixedIntervalTimer, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
/* Watchdog Timer Interrupt */
#ifdef CONFIG_BOOKE_WDT
CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
#else
- CRITICAL_EXCEPTION(0x3200, WatchdogTimer, UnknownException)
+ CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
#endif
/* Data TLB Error Interrupt */
@@ -696,21 +696,21 @@ interrupt_base:
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x2010, KernelSPE)
#else
- EXCEPTION(0x2020, SPEUnavailable, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
#endif /* CONFIG_SPE */
/* SPE Floating Point Data */
#ifdef CONFIG_SPE
EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
#else
- EXCEPTION(0x2040, SPEFloatingPointData, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
#endif /* CONFIG_SPE */
/* SPE Floating Point Round */
- EXCEPTION(0x2050, SPEFloatingPointRound, UnknownException, EXC_XFER_EE)
+ EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
/* Performance Monitor */
- EXCEPTION(0x2060, PerformanceMonitor, PerformanceMonitorException, EXC_XFER_STD)
+ EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
/* Debug Interrupt */
@@ -853,7 +853,7 @@ load_up_spe:
cmpi 0,r4,0
beq 1f
addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
- SAVE_32EVR(0,r10,r4)
+ SAVE_32EVRS(0,r10,r4)
evxor evr10, evr10, evr10 /* clear out evr10 */
evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
li r5,THREAD_ACC
@@ -873,7 +873,7 @@ load_up_spe:
stw r4,THREAD_USED_SPE(r5)
evlddx evr4,r10,r5
evmra evr4,evr4
- REST_32EVR(0,r10,r5)
+ REST_32EVRS(0,r10,r5)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
stw r4,last_task_used_spe@l(r3)
@@ -963,7 +963,7 @@ _GLOBAL(giveup_spe)
addi r3,r3,THREAD /* want THREAD of task */
lwz r5,PT_REGS(r3)
cmpi 0,r5,0
- SAVE_32EVR(0, r4, r3)
+ SAVE_32EVRS(0, r4, r3)
evxor evr6, evr6, evr6 /* clear out evr6 */
evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
li r4,THREAD_ACC
@@ -1028,10 +1028,14 @@ _GLOBAL(set_context)
* goes at the beginning of the data segment, which is page-aligned.
*/
.data
-_GLOBAL(sdata)
-_GLOBAL(empty_zero_page)
+ .align 12
+ .globl sdata
+sdata:
+ .globl empty_zero_page
+empty_zero_page:
.space 4096
-_GLOBAL(swapper_pg_dir)
+ .globl swapper_pg_dir
+swapper_pg_dir:
.space 4096
/* Reserved 4k for the critical exception stack & 4k for the machine
@@ -1040,13 +1044,15 @@ _GLOBAL(swapper_pg_dir)
.align 12
exception_stack_bottom:
.space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS
-_GLOBAL(exception_stack_top)
+ .globl exception_stack_top
+exception_stack_top:
/*
* This space gets a copy of optional info passed to us by the bootstrap
* which is used to pass parameters into the kernel like root=/dev/sda1, etc.
*/
-_GLOBAL(cmd_line)
+ .globl cmd_line
+cmd_line:
.space 512
/*
@@ -1055,4 +1061,3 @@ _GLOBAL(cmd_line)
*/
abatron_pteptrs:
.space 8
-
diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c
index fba29c876b62..11e5b44713f7 100644
--- a/arch/ppc/kernel/idle.c
+++ b/arch/ppc/kernel/idle.c
@@ -32,6 +32,7 @@
#include <asm/cache.h>
#include <asm/cputable.h>
#include <asm/machdep.h>
+#include <asm/smp.h>
void default_idle(void)
{
@@ -74,7 +75,7 @@ void cpu_idle(void)
/*
* Register the sysctl to set/clear powersave_nap.
*/
-extern unsigned long powersave_nap;
+extern int powersave_nap;
static ctl_table powersave_nap_ctl_table[]={
{
diff --git a/arch/ppc/kernel/irq.c b/arch/ppc/kernel/irq.c
index 8843f3af230f..fbb2b9f8922c 100644
--- a/arch/ppc/kernel/irq.c
+++ b/arch/ppc/kernel/irq.c
@@ -57,6 +57,7 @@
#include <asm/cache.h>
#include <asm/prom.h>
#include <asm/ptrace.h>
+#include <asm/machdep.h>
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
@@ -125,7 +126,7 @@ skip:
seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
}
#endif
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
/* should this be per processor send/receive? */
seq_printf(p, "IPI (recv/sent): %10u/%u\n",
atomic_read(&ipi_recv), atomic_read(&ipi_sent));
diff --git a/arch/ppc/kernel/l2cr.S b/arch/ppc/kernel/l2cr.S
index 861115249b35..d7f4e982b539 100644
--- a/arch/ppc/kernel/l2cr.S
+++ b/arch/ppc/kernel/l2cr.S
@@ -203,7 +203,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
* L1 icache
*/
b 20f
- .balign L1_CACHE_LINE_SIZE
+ .balign L1_CACHE_BYTES
22:
sync
mtspr SPRN_L2CR,r3
diff --git a/arch/ppc/kernel/machine_kexec.c b/arch/ppc/kernel/machine_kexec.c
index a72787747df7..a882b0dbe8de 100644
--- a/arch/ppc/kernel/machine_kexec.c
+++ b/arch/ppc/kernel/machine_kexec.c
@@ -32,7 +32,7 @@ const extern unsigned int relocate_new_kernel_size;
* Provide a dummy crash_notes definition while crash dump arrives to ppc.
* This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
*/
-void *crash_notes = NULL;
+note_buf_t crash_notes[NR_CPUS];
void machine_shutdown(void)
{
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index 90d917d2e856..3056ede2424d 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -125,9 +125,8 @@ _GLOBAL(identify_cpu)
1:
addis r6,r3,cur_cpu_spec@ha
addi r6,r6,cur_cpu_spec@l
- slwi r4,r4,2
sub r8,r8,r3
- stwx r8,r4,r6
+ stw r8,0(r6)
blr
/*
@@ -186,19 +185,18 @@ _GLOBAL(do_cpu_ftr_fixups)
*
* Setup function is called with:
* r3 = data offset
- * r4 = CPU number
- * r5 = ptr to CPU spec (relocated)
+ * r4 = ptr to CPU spec (relocated)
*/
_GLOBAL(call_setup_cpu)
- addis r5,r3,cur_cpu_spec@ha
- addi r5,r5,cur_cpu_spec@l
- slwi r4,r24,2
- lwzx r5,r4,r5
+ addis r4,r3,cur_cpu_spec@ha
+ addi r4,r4,cur_cpu_spec@l
+ lwz r4,0(r4)
+ add r4,r4,r3
+ lwz r5,CPU_SPEC_SETUP(r4)
+ cmpi 0,r5,0
add r5,r5,r3
- lwz r6,CPU_SPEC_SETUP(r5)
- add r6,r6,r3
- mtctr r6
- mr r4,r24
+ beqlr
+ mtctr r5
bctr
#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
@@ -273,134 +271,6 @@ _GLOBAL(low_choose_7447a_dfs)
#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
-/* void local_save_flags_ptr(unsigned long *flags) */
-_GLOBAL(local_save_flags_ptr)
- mfmsr r4
- stw r4,0(r3)
- blr
- /*
- * Need these nops here for taking over save/restore to
- * handle lost intrs
- * -- Cort
- */
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-_GLOBAL(local_save_flags_ptr_end)
-
-/* void local_irq_restore(unsigned long flags) */
-_GLOBAL(local_irq_restore)
-/*
- * Just set/clear the MSR_EE bit through restore/flags but do not
- * change anything else. This is needed by the RT system and makes
- * sense anyway.
- * -- Cort
- */
- mfmsr r4
- /* Copy all except the MSR_EE bit from r4 (current MSR value)
- to r3. This is the sort of thing the rlwimi instruction is
- designed for. -- paulus. */
- rlwimi r3,r4,0,17,15
- /* Check if things are setup the way we want _already_. */
- cmpw 0,r3,r4
- beqlr
-1: SYNC
- mtmsr r3
- SYNC
- blr
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-_GLOBAL(local_irq_restore_end)
-
-_GLOBAL(local_irq_disable)
- mfmsr r0 /* Get current interrupt state */
- rlwinm r3,r0,16+1,32-1,31 /* Extract old value of 'EE' */
- rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
- SYNC /* Some chip revs have problems here... */
- mtmsr r0 /* Update machine state */
- blr /* Done */
- /*
- * Need these nops here for taking over save/restore to
- * handle lost intrs
- * -- Cort
- */
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-_GLOBAL(local_irq_disable_end)
-
-_GLOBAL(local_irq_enable)
- mfmsr r3 /* Get current state */
- ori r3,r3,MSR_EE /* Turn on 'EE' bit */
- SYNC /* Some chip revs have problems here... */
- mtmsr r3 /* Update machine state */
- blr
- /*
- * Need these nops here for taking over save/restore to
- * handle lost intrs
- * -- Cort
- */
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-_GLOBAL(local_irq_enable_end)
-
/*
* complement mask on the msr then "or" some values on.
* _nmask_and_or_msr(nmask, value_to_or)
@@ -628,21 +498,21 @@ _GLOBAL(flush_icache_range)
BEGIN_FTR_SECTION
blr /* for 601, do nothing */
END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
- li r5,L1_CACHE_LINE_SIZE-1
+ li r5,L1_CACHE_BYTES-1
andc r3,r3,r5
subf r4,r3,r4
add r4,r4,r5
- srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
+ srwi. r4,r4,L1_CACHE_SHIFT
beqlr
mtctr r4
mr r6,r3
1: dcbst 0,r3
- addi r3,r3,L1_CACHE_LINE_SIZE
+ addi r3,r3,L1_CACHE_BYTES
bdnz 1b
sync /* wait for dcbst's to get to ram */
mtctr r4
2: icbi 0,r6
- addi r6,r6,L1_CACHE_LINE_SIZE
+ addi r6,r6,L1_CACHE_BYTES
bdnz 2b
sync /* additional sync needed on g4 */
isync
@@ -655,16 +525,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
* clean_dcache_range(unsigned long start, unsigned long stop)
*/
_GLOBAL(clean_dcache_range)
- li r5,L1_CACHE_LINE_SIZE-1
+ li r5,L1_CACHE_BYTES-1
andc r3,r3,r5
subf r4,r3,r4
add r4,r4,r5
- srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
+ srwi. r4,r4,L1_CACHE_SHIFT
beqlr
mtctr r4
1: dcbst 0,r3
- addi r3,r3,L1_CACHE_LINE_SIZE
+ addi r3,r3,L1_CACHE_BYTES
bdnz 1b
sync /* wait for dcbst's to get to ram */
blr
@@ -676,16 +546,16 @@ _GLOBAL(clean_dcache_range)
* flush_dcache_range(unsigned long start, unsigned long stop)
*/
_GLOBAL(flush_dcache_range)
- li r5,L1_CACHE_LINE_SIZE-1
+ li r5,L1_CACHE_BYTES-1
andc r3,r3,r5
subf r4,r3,r4
add r4,r4,r5
- srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
+ srwi. r4,r4,L1_CACHE_SHIFT
beqlr
mtctr r4
1: dcbf 0,r3
- addi r3,r3,L1_CACHE_LINE_SIZE
+ addi r3,r3,L1_CACHE_BYTES
bdnz 1b
sync /* wait for dcbst's to get to ram */
blr
@@ -698,16 +568,16 @@ _GLOBAL(flush_dcache_range)
* invalidate_dcache_range(unsigned long start, unsigned long stop)
*/
_GLOBAL(invalidate_dcache_range)
- li r5,L1_CACHE_LINE_SIZE-1
+ li r5,L1_CACHE_BYTES-1
andc r3,r3,r5
subf r4,r3,r4
add r4,r4,r5
- srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
+ srwi. r4,r4,L1_CACHE_SHIFT
beqlr
mtctr r4
1: dcbi 0,r3
- addi r3,r3,L1_CACHE_LINE_SIZE
+ addi r3,r3,L1_CACHE_BYTES
bdnz 1b
sync /* wait for dcbi's to get to ram */
blr
@@ -728,7 +598,7 @@ _GLOBAL(flush_dcache_all)
mtctr r4
lis r5, KERNELBASE@h
1: lwz r3, 0(r5) /* Load one word from every line */
- addi r5, r5, L1_CACHE_LINE_SIZE
+ addi r5, r5, L1_CACHE_BYTES
bdnz 1b
blr
#endif /* CONFIG_NOT_COHERENT_CACHE */
@@ -746,16 +616,16 @@ BEGIN_FTR_SECTION
blr /* for 601, do nothing */
END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
rlwinm r3,r3,0,0,19 /* Get page base address */
- li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */
+ li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
mtctr r4
mr r6,r3
0: dcbst 0,r3 /* Write line to ram */
- addi r3,r3,L1_CACHE_LINE_SIZE
+ addi r3,r3,L1_CACHE_BYTES
bdnz 0b
sync
mtctr r4
1: icbi 0,r6
- addi r6,r6,L1_CACHE_LINE_SIZE
+ addi r6,r6,L1_CACHE_BYTES
bdnz 1b
sync
isync
@@ -778,16 +648,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
mtmsr r0
isync
rlwinm r3,r3,0,0,19 /* Get page base address */
- li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */
+ li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
mtctr r4
mr r6,r3
0: dcbst 0,r3 /* Write line to ram */
- addi r3,r3,L1_CACHE_LINE_SIZE
+ addi r3,r3,L1_CACHE_BYTES
bdnz 0b
sync
mtctr r4
1: icbi 0,r6
- addi r6,r6,L1_CACHE_LINE_SIZE
+ addi r6,r6,L1_CACHE_BYTES
bdnz 1b
sync
mtmsr r10 /* restore DR */
@@ -802,7 +672,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
* void clear_pages(void *page, int order) ;
*/
_GLOBAL(clear_pages)
- li r0,4096/L1_CACHE_LINE_SIZE
+ li r0,4096/L1_CACHE_BYTES
slw r0,r0,r4
mtctr r0
#ifdef CONFIG_8xx
@@ -814,7 +684,7 @@ _GLOBAL(clear_pages)
#else
1: dcbz 0,r3
#endif
- addi r3,r3,L1_CACHE_LINE_SIZE
+ addi r3,r3,L1_CACHE_BYTES
bdnz 1b
blr
@@ -840,7 +710,7 @@ _GLOBAL(copy_page)
#ifdef CONFIG_8xx
/* don't use prefetch on 8xx */
- li r0,4096/L1_CACHE_LINE_SIZE
+ li r0,4096/L1_CACHE_BYTES
mtctr r0
1: COPY_16_BYTES
bdnz 1b
@@ -854,13 +724,13 @@ _GLOBAL(copy_page)
li r11,4
mtctr r0
11: dcbt r11,r4
- addi r11,r11,L1_CACHE_LINE_SIZE
+ addi r11,r11,L1_CACHE_BYTES
bdnz 11b
#else /* MAX_COPY_PREFETCH == 1 */
dcbt r5,r4
- li r11,L1_CACHE_LINE_SIZE+4
+ li r11,L1_CACHE_BYTES+4
#endif /* MAX_COPY_PREFETCH */
- li r0,4096/L1_CACHE_LINE_SIZE - MAX_COPY_PREFETCH
+ li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
crclr 4*cr0+eq
2:
mtctr r0
@@ -868,12 +738,12 @@ _GLOBAL(copy_page)
dcbt r11,r4
dcbz r5,r3
COPY_16_BYTES
-#if L1_CACHE_LINE_SIZE >= 32
+#if L1_CACHE_BYTES >= 32
COPY_16_BYTES
-#if L1_CACHE_LINE_SIZE >= 64
+#if L1_CACHE_BYTES >= 64
COPY_16_BYTES
COPY_16_BYTES
-#if L1_CACHE_LINE_SIZE >= 128
+#if L1_CACHE_BYTES >= 128
COPY_16_BYTES
COPY_16_BYTES
COPY_16_BYTES
@@ -1098,33 +968,6 @@ _GLOBAL(_get_SP)
blr
/*
- * These are used in the alignment trap handler when emulating
- * single-precision loads and stores.
- * We restore and save the fpscr so the task gets the same result
- * and exceptions as if the cpu had performed the load or store.
- */
-
-#ifdef CONFIG_PPC_FPU
-_GLOBAL(cvt_fd)
- lfd 0,-4(r5) /* load up fpscr value */
- mtfsf 0xff,0
- lfs 0,0(r3)
- stfd 0,0(r4)
- mffs 0 /* save new fpscr value */
- stfd 0,-4(r5)
- blr
-
-_GLOBAL(cvt_df)
- lfd 0,-4(r5) /* load up fpscr value */
- mtfsf 0xff,0
- lfd 0,0(r3)
- stfs 0,0(r4)
- mffs 0 /* save new fpscr value */
- stfd 0,-4(r5)
- blr
-#endif
-
-/*
* Create a kernel thread
* kernel_thread(fn, arg, flags)
*/
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c
index 854e45beb387..e8f4e576750a 100644
--- a/arch/ppc/kernel/pci.c
+++ b/arch/ppc/kernel/pci.c
@@ -21,6 +21,7 @@
#include <asm/byteorder.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
+#include <asm/machdep.h>
#undef DEBUG
@@ -53,7 +54,7 @@ static u8* pci_to_OF_bus_map;
/* By default, we don't re-assign bus numbers. We do this only on
* some pmacs
*/
-int pci_assign_all_busses;
+int pci_assign_all_buses;
struct pci_controller* hose_head;
struct pci_controller** hose_tail = &hose_head;
@@ -644,7 +645,7 @@ pcibios_alloc_controller(void)
/*
* Functions below are used on OpenFirmware machines.
*/
-static void __openfirmware
+static void
make_one_node_map(struct device_node* node, u8 pci_bus)
{
int *bus_range;
@@ -678,7 +679,7 @@ make_one_node_map(struct device_node* node, u8 pci_bus)
}
}
-void __openfirmware
+void
pcibios_make_OF_bus_map(void)
{
int i;
@@ -720,7 +721,7 @@ pcibios_make_OF_bus_map(void)
typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
-static struct device_node* __openfirmware
+static struct device_node*
scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data)
{
struct device_node* sub_node;
@@ -761,7 +762,7 @@ scan_OF_pci_childs_iterator(struct device_node* node, void* data)
return 0;
}
-static struct device_node* __openfirmware
+static struct device_node*
scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
{
u8 filter_data[2] = {bus, dev_fn};
@@ -813,18 +814,20 @@ pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
/* Now, lookup childs of the hose */
return scan_OF_childs_for_device(node->child, busnr, devfn);
}
+EXPORT_SYMBOL(pci_busdev_to_OF_node);
struct device_node*
pci_device_to_OF_node(struct pci_dev *dev)
{
return pci_busdev_to_OF_node(dev->bus, dev->devfn);
}
+EXPORT_SYMBOL(pci_device_to_OF_node);
/* This routine is meant to be used early during boot, when the
* PCI bus numbers have not yet been assigned, and you need to
* issue PCI config cycles to an OF device.
* It could also be used to "fix" RTAS config cycles if you want
- * to set pci_assign_all_busses to 1 and still use RTAS for PCI
+ * to set pci_assign_all_buses to 1 and still use RTAS for PCI
* config cycles.
*/
struct pci_controller*
@@ -842,7 +845,7 @@ pci_find_hose_for_OF_device(struct device_node* node)
return NULL;
}
-static int __openfirmware
+static int
find_OF_pci_device_filter(struct device_node* node, void* data)
{
return ((void *)node == data);
@@ -890,6 +893,7 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
}
return -ENODEV;
}
+EXPORT_SYMBOL(pci_device_from_OF_node);
void __init
pci_process_bridge_OF_ranges(struct pci_controller *hose,
@@ -1030,6 +1034,10 @@ static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *att
}
static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
+#else /* CONFIG_PPC_OF */
+void pcibios_make_OF_bus_map(void)
+{
+}
#endif /* CONFIG_PPC_OF */
/* Add sysfs properties */
@@ -1262,12 +1270,12 @@ pcibios_init(void)
/* Scan all of the recorded PCI controllers. */
for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
- if (pci_assign_all_busses)
+ if (pci_assign_all_buses)
hose->first_busno = next_busno;
hose->last_busno = 0xff;
bus = pci_scan_bus(hose->first_busno, hose->ops, hose);
hose->last_busno = bus->subordinate;
- if (pci_assign_all_busses || next_busno <= hose->last_busno)
+ if (pci_assign_all_buses || next_busno <= hose->last_busno)
next_busno = hose->last_busno + pcibios_assign_bus_offset;
}
pci_bus_count = next_busno;
@@ -1276,7 +1284,7 @@ pcibios_init(void)
* numbers vs. kernel bus numbers since we may have to
* remap them.
*/
- if (pci_assign_all_busses && have_of)
+ if (pci_assign_all_buses && have_of)
pcibios_make_OF_bus_map();
/* Do machine dependent PCI interrupt routing */
@@ -1586,16 +1594,17 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
* above routine
*/
pgprot_t pci_phys_mem_access_prot(struct file *file,
- unsigned long offset,
+ unsigned long pfn,
unsigned long size,
pgprot_t protection)
{
struct pci_dev *pdev = NULL;
struct resource *found = NULL;
unsigned long prot = pgprot_val(protection);
+ unsigned long offset = pfn << PAGE_SHIFT;
int i;
- if (page_is_ram(offset >> PAGE_SHIFT))
+ if (page_is_ram(pfn))
return prot;
prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
diff --git a/arch/ppc/kernel/perfmon.c b/arch/ppc/kernel/perfmon.c
deleted file mode 100644
index 22df9a596a0f..000000000000
--- a/arch/ppc/kernel/perfmon.c
+++ /dev/null
@@ -1,96 +0,0 @@
-/* kernel/perfmon.c
- * PPC 32 Performance Monitor Infrastructure
- *
- * Author: Andy Fleming
- * Copyright (c) 2004 Freescale Semiconductor, Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/user.h>
-#include <linux/a.out.h>
-#include <linux/interrupt.h>
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/prctl.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/reg.h>
-#include <asm/xmon.h>
-
-/* A lock to regulate grabbing the interrupt */
-DEFINE_SPINLOCK(perfmon_lock);
-
-#if defined (CONFIG_FSL_BOOKE) && !defined (CONFIG_E200)
-static void dummy_perf(struct pt_regs *regs)
-{
- unsigned int pmgc0 = mfpmr(PMRN_PMGC0);
-
- pmgc0 &= ~PMGC0_PMIE;
- mtpmr(PMRN_PMGC0, pmgc0);
-}
-
-#elif defined(CONFIG_6xx)
-/* Ensure exceptions are disabled */
-static void dummy_perf(struct pt_regs *regs)
-{
- unsigned int mmcr0 = mfspr(SPRN_MMCR0);
-
- mmcr0 &= ~MMCR0_PMXE;
- mtspr(SPRN_MMCR0, mmcr0);
-}
-#else
-static void dummy_perf(struct pt_regs *regs)
-{
-}
-#endif
-
-void (*perf_irq)(struct pt_regs *) = dummy_perf;
-
-/* Grab the interrupt, if it's free.
- * Returns 0 on success, -1 if the interrupt is taken already */
-int request_perfmon_irq(void (*handler)(struct pt_regs *))
-{
- int err = 0;
-
- spin_lock(&perfmon_lock);
-
- if (perf_irq == dummy_perf)
- perf_irq = handler;
- else {
- pr_info("perfmon irq already handled by %p\n", perf_irq);
- err = -1;
- }
-
- spin_unlock(&perfmon_lock);
-
- return err;
-}
-
-void free_perfmon_irq(void)
-{
- spin_lock(&perfmon_lock);
-
- perf_irq = dummy_perf;
-
- spin_unlock(&perfmon_lock);
-}
-
-EXPORT_SYMBOL(perf_irq);
-EXPORT_SYMBOL(request_perfmon_irq);
-EXPORT_SYMBOL(free_perfmon_irq);
diff --git a/arch/ppc/kernel/perfmon_fsl_booke.c b/arch/ppc/kernel/perfmon_fsl_booke.c
index 03526bfb0840..32455dfcc36b 100644
--- a/arch/ppc/kernel/perfmon_fsl_booke.c
+++ b/arch/ppc/kernel/perfmon_fsl_booke.c
@@ -32,7 +32,7 @@
#include <asm/io.h>
#include <asm/reg.h>
#include <asm/xmon.h>
-#include <asm/perfmon.h>
+#include <asm/pmc.h>
static inline u32 get_pmlca(int ctr);
static inline void set_pmlca(int ctr, u32 pmlca);
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index 88f6bb7b6964..e0ca61b37f4f 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -53,10 +53,10 @@
extern void transfer_to_handler(void);
extern void do_IRQ(struct pt_regs *regs);
-extern void MachineCheckException(struct pt_regs *regs);
-extern void AlignmentException(struct pt_regs *regs);
-extern void ProgramCheckException(struct pt_regs *regs);
-extern void SingleStepException(struct pt_regs *regs);
+extern void machine_check_exception(struct pt_regs *regs);
+extern void alignment_exception(struct pt_regs *regs);
+extern void program_check_exception(struct pt_regs *regs);
+extern void single_step_exception(struct pt_regs *regs);
extern int do_signal(sigset_t *, struct pt_regs *);
extern int pmac_newworld;
extern int sys_sigreturn(struct pt_regs *regs);
@@ -72,10 +72,10 @@ EXPORT_SYMBOL(clear_user_page);
EXPORT_SYMBOL(do_signal);
EXPORT_SYMBOL(transfer_to_handler);
EXPORT_SYMBOL(do_IRQ);
-EXPORT_SYMBOL(MachineCheckException);
-EXPORT_SYMBOL(AlignmentException);
-EXPORT_SYMBOL(ProgramCheckException);
-EXPORT_SYMBOL(SingleStepException);
+EXPORT_SYMBOL(machine_check_exception);
+EXPORT_SYMBOL(alignment_exception);
+EXPORT_SYMBOL(program_check_exception);
+EXPORT_SYMBOL(single_step_exception);
EXPORT_SYMBOL(sys_sigreturn);
EXPORT_SYMBOL(ppc_n_lost_interrupts);
EXPORT_SYMBOL(ppc_lost_interrupts);
@@ -131,6 +131,11 @@ EXPORT_SYMBOL(outw);
EXPORT_SYMBOL(outl);
EXPORT_SYMBOL(outsl);*/
+EXPORT_SYMBOL(__ide_mm_insl);
+EXPORT_SYMBOL(__ide_mm_outsw);
+EXPORT_SYMBOL(__ide_mm_insw);
+EXPORT_SYMBOL(__ide_mm_outsl);
+
EXPORT_SYMBOL(_insb);
EXPORT_SYMBOL(_outsb);
EXPORT_SYMBOL(_insw);
@@ -230,9 +235,6 @@ EXPORT_SYMBOL(find_all_nodes);
EXPORT_SYMBOL(get_property);
EXPORT_SYMBOL(request_OF_resource);
EXPORT_SYMBOL(release_OF_resource);
-EXPORT_SYMBOL(pci_busdev_to_OF_node);
-EXPORT_SYMBOL(pci_device_to_OF_node);
-EXPORT_SYMBOL(pci_device_from_OF_node);
EXPORT_SYMBOL(of_find_node_by_name);
EXPORT_SYMBOL(of_find_node_by_type);
EXPORT_SYMBOL(of_find_compatible_node);
@@ -272,16 +274,6 @@ EXPORT_SYMBOL(screen_info);
#endif
EXPORT_SYMBOL(__delay);
-#ifndef INLINE_IRQS
-EXPORT_SYMBOL(local_irq_enable);
-EXPORT_SYMBOL(local_irq_enable_end);
-EXPORT_SYMBOL(local_irq_disable);
-EXPORT_SYMBOL(local_irq_disable_end);
-EXPORT_SYMBOL(local_save_flags_ptr);
-EXPORT_SYMBOL(local_save_flags_ptr_end);
-EXPORT_SYMBOL(local_irq_restore);
-EXPORT_SYMBOL(local_irq_restore_end);
-#endif
EXPORT_SYMBOL(timer_interrupt);
EXPORT_SYMBOL(irq_desc);
EXPORT_SYMBOL(tb_ticks_per_jiffy);
@@ -335,11 +327,6 @@ EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
extern long *intercept_table;
EXPORT_SYMBOL(intercept_table);
#endif /* CONFIG_PPC_STD_MMU */
-EXPORT_SYMBOL(cur_cpu_spec);
-#ifdef CONFIG_PPC_PMAC
-extern unsigned long agp_special_page;
-EXPORT_SYMBOL(agp_special_page);
-#endif
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
EXPORT_SYMBOL(__mtdcr);
EXPORT_SYMBOL(__mfdcr);
diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c
index 82de66e4db6d..cb1c7b92f8c6 100644
--- a/arch/ppc/kernel/process.c
+++ b/arch/ppc/kernel/process.c
@@ -152,18 +152,66 @@ int check_stack(struct task_struct *tsk)
}
#endif /* defined(CHECK_STACK) */
-#ifdef CONFIG_ALTIVEC
-int
-dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
+/*
+ * Make sure the floating-point register state in the
+ * the thread_struct is up to date for task tsk.
+ */
+void flush_fp_to_thread(struct task_struct *tsk)
{
- if (regs->msr & MSR_VEC)
- giveup_altivec(current);
- memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
+ if (tsk->thread.regs) {
+ /*
+ * We need to disable preemption here because if we didn't,
+ * another process could get scheduled after the regs->msr
+ * test but before we have finished saving the FP registers
+ * to the thread_struct. That process could take over the
+ * FPU, and then when we get scheduled again we would store
+ * bogus values for the remaining FP registers.
+ */
+ preempt_disable();
+ if (tsk->thread.regs->msr & MSR_FP) {
+#ifdef CONFIG_SMP
+ /*
+ * This should only ever be called for current or
+ * for a stopped child process. Since we save away
+ * the FP register state on context switch on SMP,
+ * there is something wrong if a stopped child appears
+ * to still have its FP state in the CPU registers.
+ */
+ BUG_ON(tsk != current);
+#endif
+ giveup_fpu(current);
+ }
+ preempt_enable();
+ }
+}
+
+void enable_kernel_fp(void)
+{
+ WARN_ON(preemptible());
+
+#ifdef CONFIG_SMP
+ if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
+ giveup_fpu(current);
+ else
+ giveup_fpu(NULL); /* just enables FP for kernel */
+#else
+ giveup_fpu(last_task_used_math);
+#endif /* CONFIG_SMP */
+}
+EXPORT_SYMBOL(enable_kernel_fp);
+
+int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
+{
+ preempt_disable();
+ if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
+ giveup_fpu(tsk);
+ preempt_enable();
+ memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
return 1;
}
-void
-enable_kernel_altivec(void)
+#ifdef CONFIG_ALTIVEC
+void enable_kernel_altivec(void)
{
WARN_ON(preemptible());
@@ -177,19 +225,35 @@ enable_kernel_altivec(void)
#endif /* __SMP __ */
}
EXPORT_SYMBOL(enable_kernel_altivec);
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
-int
-dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
+/*
+ * Make sure the VMX/Altivec register state in the
+ * the thread_struct is up to date for task tsk.
+ */
+void flush_altivec_to_thread(struct task_struct *tsk)
{
- if (regs->msr & MSR_SPE)
- giveup_spe(current);
- /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
- memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
+ if (tsk->thread.regs) {
+ preempt_disable();
+ if (tsk->thread.regs->msr & MSR_VEC) {
+#ifdef CONFIG_SMP
+ BUG_ON(tsk != current);
+#endif
+ giveup_altivec(current);
+ }
+ preempt_enable();
+ }
+}
+
+int dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
+{
+ if (regs->msr & MSR_VEC)
+ giveup_altivec(current);
+ memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
return 1;
}
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
void
enable_kernel_spe(void)
{
@@ -205,34 +269,30 @@ enable_kernel_spe(void)
#endif /* __SMP __ */
}
EXPORT_SYMBOL(enable_kernel_spe);
-#endif /* CONFIG_SPE */
-void
-enable_kernel_fp(void)
+void flush_spe_to_thread(struct task_struct *tsk)
{
- WARN_ON(preemptible());
-
+ if (tsk->thread.regs) {
+ preempt_disable();
+ if (tsk->thread.regs->msr & MSR_SPE) {
#ifdef CONFIG_SMP
- if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
- giveup_fpu(current);
- else
- giveup_fpu(NULL); /* just enables FP for kernel */
-#else
- giveup_fpu(last_task_used_math);
-#endif /* CONFIG_SMP */
+ BUG_ON(tsk != current);
+#endif
+ giveup_spe(current);
+ }
+ preempt_enable();
+ }
}
-EXPORT_SYMBOL(enable_kernel_fp);
-int
-dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
+int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
{
- preempt_disable();
- if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
- giveup_fpu(tsk);
- preempt_enable();
- memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
+ if (regs->msr & MSR_SPE)
+ giveup_spe(current);
+ /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
+ memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
return 1;
}
+#endif /* CONFIG_SPE */
struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new)
@@ -287,11 +347,13 @@ struct task_struct *__switch_to(struct task_struct *prev,
#endif /* CONFIG_SPE */
#endif /* CONFIG_SMP */
+#ifdef CONFIG_ALTIVEC
/* Avoid the trap. On smp this this never happens since
* we don't set last_task_used_altivec -- Cort
*/
if (new->thread.regs && last_task_used_altivec == new)
new->thread.regs->msr |= MSR_VEC;
+#endif
#ifdef CONFIG_SPE
/* Avoid the trap. On smp this this never happens since
* we don't set last_task_used_spe
@@ -482,7 +544,7 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
last_task_used_spe = NULL;
#endif
memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
- current->thread.fpscr = 0;
+ current->thread.fpscr.val = 0;
#ifdef CONFIG_ALTIVEC
memset(current->thread.vr, 0, sizeof(current->thread.vr));
memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
@@ -557,14 +619,16 @@ int sys_clone(unsigned long clone_flags, unsigned long usp,
return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
}
-int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
+int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
+ unsigned long p4, unsigned long p5, unsigned long p6,
struct pt_regs *regs)
{
CHECK_FULL_REGS(regs);
return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
}
-int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6,
+int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
+ unsigned long p4, unsigned long p5, unsigned long p6,
struct pt_regs *regs)
{
CHECK_FULL_REGS(regs);
diff --git a/arch/ppc/kernel/ptrace.c b/arch/ppc/kernel/ptrace.c
deleted file mode 100644
index e7aee4108dea..000000000000
--- a/arch/ppc/kernel/ptrace.c
+++ /dev/null
@@ -1,507 +0,0 @@
-/*
- * arch/ppc/kernel/ptrace.c
- *
- * PowerPC version
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Derived from "arch/m68k/kernel/ptrace.c"
- * Copyright (C) 1994 by Hamish Macdonald
- * Taken from linux/kernel/ptrace.c and modified for M680x0.
- * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
- *
- * Modified by Cort Dougan (cort@hq.fsmlabs.com)
- * and Paul Mackerras (paulus@linuxcare.com.au).
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file README.legal in the main directory of
- * this archive for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/user.h>
-#include <linux/security.h>
-#include <linux/signal.h>
-#include <linux/seccomp.h>
-#include <linux/audit.h>
-#include <linux/module.h>
-
-#include <asm/uaccess.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/system.h>
-
-/*
- * Set of msr bits that gdb can change on behalf of a process.
- */
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
-#define MSR_DEBUGCHANGE 0
-#else
-#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
-#endif
-
-/*
- * does not yet catch signals sent when the child dies.
- * in exit.c or in signal.c.
- */
-
-/*
- * Get contents of register REGNO in task TASK.
- */
-static inline unsigned long get_reg(struct task_struct *task, int regno)
-{
- if (regno < sizeof(struct pt_regs) / sizeof(unsigned long)
- && task->thread.regs != NULL)
- return ((unsigned long *)task->thread.regs)[regno];
- return (0);
-}
-
-/*
- * Write contents of register REGNO in task TASK.
- */
-static inline int put_reg(struct task_struct *task, int regno,
- unsigned long data)
-{
- if (regno <= PT_MQ && task->thread.regs != NULL) {
- if (regno == PT_MSR)
- data = (data & MSR_DEBUGCHANGE)
- | (task->thread.regs->msr & ~MSR_DEBUGCHANGE);
- ((unsigned long *)task->thread.regs)[regno] = data;
- return 0;
- }
- return -EIO;
-}
-
-#ifdef CONFIG_ALTIVEC
-/*
- * Get contents of AltiVec register state in task TASK
- */
-static inline int get_vrregs(unsigned long __user *data, struct task_struct *task)
-{
- int i, j;
-
- if (!access_ok(VERIFY_WRITE, data, 133 * sizeof(unsigned long)))
- return -EFAULT;
-
- /* copy AltiVec registers VR[0] .. VR[31] */
- for (i = 0; i < 32; i++)
- for (j = 0; j < 4; j++, data++)
- if (__put_user(task->thread.vr[i].u[j], data))
- return -EFAULT;
-
- /* copy VSCR */
- for (i = 0; i < 4; i++, data++)
- if (__put_user(task->thread.vscr.u[i], data))
- return -EFAULT;
-
- /* copy VRSAVE */
- if (__put_user(task->thread.vrsave, data))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * Write contents of AltiVec register state into task TASK.
- */
-static inline int set_vrregs(struct task_struct *task, unsigned long __user *data)
-{
- int i, j;
-
- if (!access_ok(VERIFY_READ, data, 133 * sizeof(unsigned long)))
- return -EFAULT;
-
- /* copy AltiVec registers VR[0] .. VR[31] */
- for (i = 0; i < 32; i++)
- for (j = 0; j < 4; j++, data++)
- if (__get_user(task->thread.vr[i].u[j], data))
- return -EFAULT;
-
- /* copy VSCR */
- for (i = 0; i < 4; i++, data++)
- if (__get_user(task->thread.vscr.u[i], data))
- return -EFAULT;
-
- /* copy VRSAVE */
- if (__get_user(task->thread.vrsave, data))
- return -EFAULT;
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_SPE
-
-/*
- * For get_evrregs/set_evrregs functions 'data' has the following layout:
- *
- * struct {
- * u32 evr[32];
- * u64 acc;
- * u32 spefscr;
- * }
- */
-
-/*
- * Get contents of SPE register state in task TASK.
- */
-static inline int get_evrregs(unsigned long *data, struct task_struct *task)
-{
- int i;
-
- if (!access_ok(VERIFY_WRITE, data, 35 * sizeof(unsigned long)))
- return -EFAULT;
-
- /* copy SPEFSCR */
- if (__put_user(task->thread.spefscr, &data[34]))
- return -EFAULT;
-
- /* copy SPE registers EVR[0] .. EVR[31] */
- for (i = 0; i < 32; i++, data++)
- if (__put_user(task->thread.evr[i], data))
- return -EFAULT;
-
- /* copy ACC */
- if (__put_user64(task->thread.acc, (unsigned long long *)data))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * Write contents of SPE register state into task TASK.
- */
-static inline int set_evrregs(struct task_struct *task, unsigned long *data)
-{
- int i;
-
- if (!access_ok(VERIFY_READ, data, 35 * sizeof(unsigned long)))
- return -EFAULT;
-
- /* copy SPEFSCR */
- if (__get_user(task->thread.spefscr, &data[34]))
- return -EFAULT;
-
- /* copy SPE registers EVR[0] .. EVR[31] */
- for (i = 0; i < 32; i++, data++)
- if (__get_user(task->thread.evr[i], data))
- return -EFAULT;
- /* copy ACC */
- if (__get_user64(task->thread.acc, (unsigned long long*)data))
- return -EFAULT;
-
- return 0;
-}
-#endif /* CONFIG_SPE */
-
-static inline void
-set_single_step(struct task_struct *task)
-{
- struct pt_regs *regs = task->thread.regs;
-
- if (regs != NULL) {
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
- task->thread.dbcr0 = DBCR0_IDM | DBCR0_IC;
- regs->msr |= MSR_DE;
-#else
- regs->msr |= MSR_SE;
-#endif
- }
-}
-
-static inline void
-clear_single_step(struct task_struct *task)
-{
- struct pt_regs *regs = task->thread.regs;
-
- if (regs != NULL) {
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
- task->thread.dbcr0 = 0;
- regs->msr &= ~MSR_DE;
-#else
- regs->msr &= ~MSR_SE;
-#endif
- }
-}
-
-/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure single step bits etc are not set.
- */
-void ptrace_disable(struct task_struct *child)
-{
- /* make sure the single step bit is not set. */
- clear_single_step(child);
-}
-
-int sys_ptrace(long request, long pid, long addr, long data)
-{
- struct task_struct *child;
- int ret = -EPERM;
-
- lock_kernel();
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- ret = security_ptrace(current->parent, current);
- if (ret)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
-
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
-
- switch (request) {
- /* when I and D space are separate, these will need to be fixed. */
- case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA: {
- unsigned long tmp;
- int copied;
-
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- break;
- ret = put_user(tmp,(unsigned long __user *) data);
- break;
- }
-
- /* read the word at location addr in the USER area. */
- /* XXX this will need fixing for 64-bit */
- case PTRACE_PEEKUSR: {
- unsigned long index, tmp;
-
- ret = -EIO;
- /* convert to index and check */
- index = (unsigned long) addr >> 2;
- if ((addr & 3) || index > PT_FPSCR
- || child->thread.regs == NULL)
- break;
-
- CHECK_FULL_REGS(child->thread.regs);
- if (index < PT_FPR0) {
- tmp = get_reg(child, (int) index);
- } else {
- preempt_disable();
- if (child->thread.regs->msr & MSR_FP)
- giveup_fpu(child);
- preempt_enable();
- tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
- }
- ret = put_user(tmp,(unsigned long __user *) data);
- break;
- }
-
- /* If I and D space are separate, this will have to be fixed. */
- case PTRACE_POKETEXT: /* write the word at location addr. */
- case PTRACE_POKEDATA:
- ret = 0;
- if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
- break;
- ret = -EIO;
- break;
-
- /* write the word at location addr in the USER area */
- case PTRACE_POKEUSR: {
- unsigned long index;
-
- ret = -EIO;
- /* convert to index and check */
- index = (unsigned long) addr >> 2;
- if ((addr & 3) || index > PT_FPSCR
- || child->thread.regs == NULL)
- break;
-
- CHECK_FULL_REGS(child->thread.regs);
- if (index == PT_ORIG_R3)
- break;
- if (index < PT_FPR0) {
- ret = put_reg(child, index, data);
- } else {
- preempt_disable();
- if (child->thread.regs->msr & MSR_FP)
- giveup_fpu(child);
- preempt_enable();
- ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
- ret = 0;
- }
- break;
- }
-
- case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
- case PTRACE_CONT: { /* restart after signal. */
- ret = -EIO;
- if (!valid_signal(data))
- break;
- if (request == PTRACE_SYSCALL) {
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- } else {
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- }
- child->exit_code = data;
- /* make sure the single step bit is not set. */
- clear_single_step(child);
- wake_up_process(child);
- ret = 0;
- break;
- }
-
-/*
- * make the child exit. Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
- * exit.
- */
- case PTRACE_KILL: {
- ret = 0;
- if (child->exit_state == EXIT_ZOMBIE) /* already dead */
- break;
- child->exit_code = SIGKILL;
- /* make sure the single step bit is not set. */
- clear_single_step(child);
- wake_up_process(child);
- break;
- }
-
- case PTRACE_SINGLESTEP: { /* set the trap flag. */
- ret = -EIO;
- if (!valid_signal(data))
- break;
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- set_single_step(child);
- child->exit_code = data;
- /* give it a chance to run. */
- wake_up_process(child);
- ret = 0;
- break;
- }
-
- case PTRACE_DETACH:
- ret = ptrace_detach(child, data);
- break;
-
-#ifdef CONFIG_ALTIVEC
- case PTRACE_GETVRREGS:
- /* Get the child altivec register state. */
- preempt_disable();
- if (child->thread.regs->msr & MSR_VEC)
- giveup_altivec(child);
- preempt_enable();
- ret = get_vrregs((unsigned long __user *)data, child);
- break;
-
- case PTRACE_SETVRREGS:
- /* Set the child altivec register state. */
- /* this is to clear the MSR_VEC bit to force a reload
- * of register state from memory */
- preempt_disable();
- if (child->thread.regs->msr & MSR_VEC)
- giveup_altivec(child);
- preempt_enable();
- ret = set_vrregs(child, (unsigned long __user *)data);
- break;
-#endif
-#ifdef CONFIG_SPE
- case PTRACE_GETEVRREGS:
- /* Get the child spe register state. */
- if (child->thread.regs->msr & MSR_SPE)
- giveup_spe(child);
- ret = get_evrregs((unsigned long __user *)data, child);
- break;
-
- case PTRACE_SETEVRREGS:
- /* Set the child spe register state. */
- /* this is to clear the MSR_SPE bit to force a reload
- * of register state from memory */
- if (child->thread.regs->msr & MSR_SPE)
- giveup_spe(child);
- ret = set_evrregs(child, (unsigned long __user *)data);
- break;
-#endif
-
- default:
- ret = ptrace_request(child, request, addr, data);
- break;
- }
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
- return ret;
-}
-
-static void do_syscall_trace(void)
-{
- /* the 0x80 provides a way for the tracing parent to distinguish
- between a syscall stop and SIGTRAP delivery */
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
- ? 0x80 : 0));
-
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
-}
-
-void do_syscall_trace_enter(struct pt_regs *regs)
-{
- if (test_thread_flag(TIF_SYSCALL_TRACE)
- && (current->ptrace & PT_PTRACED))
- do_syscall_trace();
-
- if (unlikely(current->audit_context))
- audit_syscall_entry(current, AUDIT_ARCH_PPC,
- regs->gpr[0],
- regs->gpr[3], regs->gpr[4],
- regs->gpr[5], regs->gpr[6]);
-}
-
-void do_syscall_trace_leave(struct pt_regs *regs)
-{
- secure_computing(regs->gpr[0]);
-
- if (unlikely(current->audit_context))
- audit_syscall_exit(current,
- (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
- regs->result);
-
- if ((test_thread_flag(TIF_SYSCALL_TRACE))
- && (current->ptrace & PT_PTRACED))
- do_syscall_trace();
-}
-
-EXPORT_SYMBOL(do_syscall_trace_enter);
-EXPORT_SYMBOL(do_syscall_trace_leave);
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 545cfd0fab59..6bcb85d2b7fd 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -71,7 +71,8 @@ struct ide_machdep_calls ppc_ide_md;
unsigned long boot_mem_size;
unsigned long ISA_DMA_THRESHOLD;
-unsigned long DMA_MODE_READ, DMA_MODE_WRITE;
+unsigned int DMA_MODE_READ;
+unsigned int DMA_MODE_WRITE;
#ifdef CONFIG_PPC_MULTIPLATFORM
int _machine = 0;
@@ -82,8 +83,18 @@ extern void pmac_init(unsigned long r3, unsigned long r4,
unsigned long r5, unsigned long r6, unsigned long r7);
extern void chrp_init(unsigned long r3, unsigned long r4,
unsigned long r5, unsigned long r6, unsigned long r7);
+
+dev_t boot_dev;
#endif /* CONFIG_PPC_MULTIPLATFORM */
+int have_of;
+EXPORT_SYMBOL(have_of);
+
+#ifdef __DO_IRQ_CANON
+int ppc_do_canonicalize_irqs;
+EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
+#endif
+
#ifdef CONFIG_MAGIC_SYSRQ
unsigned long SYSRQ_KEY = 0x54;
#endif /* CONFIG_MAGIC_SYSRQ */
@@ -185,18 +196,18 @@ int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "processor\t: %d\n", i);
seq_printf(m, "cpu\t\t: ");
- if (cur_cpu_spec[i]->pvr_mask)
- seq_printf(m, "%s", cur_cpu_spec[i]->cpu_name);
+ if (cur_cpu_spec->pvr_mask)
+ seq_printf(m, "%s", cur_cpu_spec->cpu_name);
else
seq_printf(m, "unknown (%08x)", pvr);
#ifdef CONFIG_ALTIVEC
- if (cur_cpu_spec[i]->cpu_features & CPU_FTR_ALTIVEC)
+ if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
seq_printf(m, ", altivec supported");
#endif
seq_printf(m, "\n");
#ifdef CONFIG_TAU
- if (cur_cpu_spec[i]->cpu_features & CPU_FTR_TAU) {
+ if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
#ifdef CONFIG_TAU_AVERAGE
/* more straightforward, but potentially misleading */
seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
@@ -339,7 +350,7 @@ early_init(int r3, int r4, int r5)
* Assume here that all clock rates are the same in a
* smp system. -- Cort
*/
-int __openfirmware
+int
of_show_percpuinfo(struct seq_file *m, int i)
{
struct device_node *cpu_node;
@@ -404,11 +415,15 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
_machine = _MACH_prep;
}
+#ifdef CONFIG_PPC_PREP
/* not much more to do here, if prep */
if (_machine == _MACH_prep) {
prep_init(r3, r4, r5, r6, r7);
return;
}
+#endif
+
+ have_of = 1;
/* prom_init has already been called from __start */
if (boot_infos)
@@ -479,12 +494,16 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
#endif /* CONFIG_ADB */
switch (_machine) {
+#ifdef CONFIG_PPC_PMAC
case _MACH_Pmac:
pmac_init(r3, r4, r5, r6, r7);
break;
+#endif
+#ifdef CONFIG_PPC_CHRP
case _MACH_chrp:
chrp_init(r3, r4, r5, r6, r7);
break;
+#endif
}
}
@@ -721,7 +740,7 @@ void __init setup_arch(char **cmdline_p)
#endif
#ifdef CONFIG_XMON
- xmon_map_scc();
+ xmon_init(1);
if (strstr(cmd_line, "xmon"))
xmon(NULL);
#endif /* CONFIG_XMON */
@@ -745,12 +764,12 @@ void __init setup_arch(char **cmdline_p)
* for a possibly more accurate value.
*/
if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) {
- dcache_bsize = cur_cpu_spec[0]->dcache_bsize;
- icache_bsize = cur_cpu_spec[0]->icache_bsize;
+ dcache_bsize = cur_cpu_spec->dcache_bsize;
+ icache_bsize = cur_cpu_spec->icache_bsize;
ucache_bsize = 0;
} else
ucache_bsize = dcache_bsize = icache_bsize
- = cur_cpu_spec[0]->dcache_bsize;
+ = cur_cpu_spec->dcache_bsize;
/* reboot on panic */
panic_timeout = 180;
diff --git a/arch/ppc/kernel/signal.c b/arch/ppc/kernel/signal.c
deleted file mode 100644
index 2244bf91e593..000000000000
--- a/arch/ppc/kernel/signal.c
+++ /dev/null
@@ -1,771 +0,0 @@
-/*
- * arch/ppc/kernel/signal.c
- *
- * PowerPC version
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Derived from "arch/i386/kernel/signal.c"
- * Copyright (C) 1991, 1992 Linus Torvalds
- * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/ptrace.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/elf.h>
-#include <linux/tty.h>
-#include <linux/binfmts.h>
-#include <linux/suspend.h>
-#include <asm/ucontext.h>
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/cacheflush.h>
-
-#undef DEBUG_SIG
-
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-extern void sigreturn_exit(struct pt_regs *);
-
-#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
-
-int do_signal(sigset_t *oldset, struct pt_regs *regs);
-
-/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-int
-sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
- struct pt_regs *regs)
-{
- sigset_t saveset;
-
- mask &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
- siginitset(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- regs->result = -EINTR;
- regs->gpr[3] = EINTR;
- regs->ccr |= 0x10000000;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (do_signal(&saveset, regs))
- sigreturn_exit(regs);
- }
-}
-
-int
-sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, int p3, int p4,
- int p6, int p7, struct pt_regs *regs)
-{
- sigset_t saveset, newset;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
-
- if (copy_from_user(&newset, unewset, sizeof(newset)))
- return -EFAULT;
- sigdelsetmask(&newset, ~_BLOCKABLE);
-
- spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- regs->result = -EINTR;
- regs->gpr[3] = EINTR;
- regs->ccr |= 0x10000000;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (do_signal(&saveset, regs))
- sigreturn_exit(regs);
- }
-}
-
-
-int
-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, int r5,
- int r6, int r7, int r8, struct pt_regs *regs)
-{
- return do_sigaltstack(uss, uoss, regs->gpr[1]);
-}
-
-int
-sys_sigaction(int sig, const struct old_sigaction __user *act,
- struct old_sigaction __user *oact)
-{
- struct k_sigaction new_ka, old_ka;
- int ret;
-
- if (act) {
- old_sigset_t mask;
- if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
- __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
- return -EFAULT;
- __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- __get_user(mask, &act->sa_mask);
- siginitset(&new_ka.sa.sa_mask, mask);
- }
-
- ret = do_sigaction(sig, (act? &new_ka: NULL), (oact? &old_ka: NULL));
-
- if (!ret && oact) {
- if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
- __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
- return -EFAULT;
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
- }
-
- return ret;
-}
-
-/*
- * When we have signals to deliver, we set up on the
- * user stack, going down from the original stack pointer:
- * a sigregs struct
- * a sigcontext struct
- * a gap of __SIGNAL_FRAMESIZE bytes
- *
- * Each of these things must be a multiple of 16 bytes in size.
- *
- */
-struct sigregs {
- struct mcontext mctx; /* all the register values */
- /* Programs using the rs6000/xcoff abi can save up to 19 gp regs
- and 18 fp regs below sp before decrementing it. */
- int abigap[56];
-};
-
-/* We use the mc_pad field for the signal return trampoline. */
-#define tramp mc_pad
-
-/*
- * When we have rt signals to deliver, we set up on the
- * user stack, going down from the original stack pointer:
- * one rt_sigframe struct (siginfo + ucontext + ABI gap)
- * a gap of __SIGNAL_FRAMESIZE+16 bytes
- * (the +16 is to get the siginfo and ucontext in the same
- * positions as in older kernels).
- *
- * Each of these things must be a multiple of 16 bytes in size.
- *
- */
-struct rt_sigframe
-{
- struct siginfo info;
- struct ucontext uc;
- /* Programs using the rs6000/xcoff abi can save up to 19 gp regs
- and 18 fp regs below sp before decrementing it. */
- int abigap[56];
-};
-
-/*
- * Save the current user registers on the user stack.
- * We only save the altivec/spe registers if the process has used
- * altivec/spe instructions at some point.
- */
-static int
-save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, int sigret)
-{
- /* save general and floating-point registers */
- CHECK_FULL_REGS(regs);
- preempt_disable();
- if (regs->msr & MSR_FP)
- giveup_fpu(current);
-#ifdef CONFIG_ALTIVEC
- if (current->thread.used_vr && (regs->msr & MSR_VEC))
- giveup_altivec(current);
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
- if (current->thread.used_spe && (regs->msr & MSR_SPE))
- giveup_spe(current);
-#endif /* CONFIG_ALTIVEC */
- preempt_enable();
-
- if (__copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE)
- || __copy_to_user(&frame->mc_fregs, current->thread.fpr,
- ELF_NFPREG * sizeof(double)))
- return 1;
-
- current->thread.fpscr = 0; /* turn off all fp exceptions */
-
-#ifdef CONFIG_ALTIVEC
- /* save altivec registers */
- if (current->thread.used_vr) {
- if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
- ELF_NVRREG * sizeof(vector128)))
- return 1;
- /* set MSR_VEC in the saved MSR value to indicate that
- frame->mc_vregs contains valid data */
- if (__put_user(regs->msr | MSR_VEC, &frame->mc_gregs[PT_MSR]))
- return 1;
- }
- /* else assert((regs->msr & MSR_VEC) == 0) */
-
- /* We always copy to/from vrsave, it's 0 if we don't have or don't
- * use altivec. Since VSCR only contains 32 bits saved in the least
- * significant bits of a vector, we "cheat" and stuff VRSAVE in the
- * most significant bits of that same vector. --BenH
- */
- if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
- return 1;
-#endif /* CONFIG_ALTIVEC */
-
-#ifdef CONFIG_SPE
- /* save spe registers */
- if (current->thread.used_spe) {
- if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
- ELF_NEVRREG * sizeof(u32)))
- return 1;
- /* set MSR_SPE in the saved MSR value to indicate that
- frame->mc_vregs contains valid data */
- if (__put_user(regs->msr | MSR_SPE, &frame->mc_gregs[PT_MSR]))
- return 1;
- }
- /* else assert((regs->msr & MSR_SPE) == 0) */
-
- /* We always copy to/from spefscr */
- if (__put_user(current->thread.spefscr, (u32 *)&frame->mc_vregs + ELF_NEVRREG))
- return 1;
-#endif /* CONFIG_SPE */
-
- if (sigret) {
- /* Set up the sigreturn trampoline: li r0,sigret; sc */
- if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
- || __put_user(0x44000002UL, &frame->tramp[1]))
- return 1;
- flush_icache_range((unsigned long) &frame->tramp[0],
- (unsigned long) &frame->tramp[2]);
- }
-
- return 0;
-}
-
-/*
- * Restore the current user register values from the user stack,
- * (except for MSR).
- */
-static int
-restore_user_regs(struct pt_regs *regs, struct mcontext __user *sr, int sig)
-{
- unsigned long save_r2 = 0;
-#if defined(CONFIG_ALTIVEC) || defined(CONFIG_SPE)
- unsigned long msr;
-#endif
-
- /* backup/restore the TLS as we don't want it to be modified */
- if (!sig)
- save_r2 = regs->gpr[2];
- /* copy up to but not including MSR */
- if (__copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t)))
- return 1;
- /* copy from orig_r3 (the word after the MSR) up to the end */
- if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
- GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
- return 1;
- if (!sig)
- regs->gpr[2] = save_r2;
-
- /* force the process to reload the FP registers from
- current->thread when it next does FP instructions */
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
- if (__copy_from_user(current->thread.fpr, &sr->mc_fregs,
- sizeof(sr->mc_fregs)))
- return 1;
-
-#ifdef CONFIG_ALTIVEC
- /* force the process to reload the altivec registers from
- current->thread when it next does altivec instructions */
- regs->msr &= ~MSR_VEC;
- if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_VEC) != 0) {
- /* restore altivec registers from the stack */
- if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
- sizeof(sr->mc_vregs)))
- return 1;
- } else if (current->thread.used_vr)
- memset(&current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
-
- /* Always get VRSAVE back */
- if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
- return 1;
-#endif /* CONFIG_ALTIVEC */
-
-#ifdef CONFIG_SPE
- /* force the process to reload the spe registers from
- current->thread when it next does spe instructions */
- regs->msr &= ~MSR_SPE;
- if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_SPE) != 0) {
- /* restore spe registers from the stack */
- if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
- ELF_NEVRREG * sizeof(u32)))
- return 1;
- } else if (current->thread.used_spe)
- memset(&current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
-
- /* Always get SPEFSCR back */
- if (__get_user(current->thread.spefscr, (u32 *)&sr->mc_vregs + ELF_NEVRREG))
- return 1;
-#endif /* CONFIG_SPE */
-
-#ifndef CONFIG_SMP
- preempt_disable();
- if (last_task_used_math == current)
- last_task_used_math = NULL;
- if (last_task_used_altivec == current)
- last_task_used_altivec = NULL;
- if (last_task_used_spe == current)
- last_task_used_spe = NULL;
- preempt_enable();
-#endif
- return 0;
-}
-
-/*
- * Restore the user process's signal mask
- */
-static void
-restore_sigmask(sigset_t *set)
-{
- sigdelsetmask(set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = *set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-}
-
-/*
- * Set up a signal frame for a "real-time" signal handler
- * (one which gets siginfo).
- */
-static void
-handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
- siginfo_t *info, sigset_t *oldset, struct pt_regs * regs,
- unsigned long newsp)
-{
- struct rt_sigframe __user *rt_sf;
- struct mcontext __user *frame;
- unsigned long origsp = newsp;
-
- /* Set up Signal Frame */
- /* Put a Real Time Context onto stack */
- newsp -= sizeof(*rt_sf);
- rt_sf = (struct rt_sigframe __user *) newsp;
-
- /* create a stack frame for the caller of the handler */
- newsp -= __SIGNAL_FRAMESIZE + 16;
-
- if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
- goto badframe;
-
- /* Put the siginfo & fill in most of the ucontext */
- if (copy_siginfo_to_user(&rt_sf->info, info)
- || __put_user(0, &rt_sf->uc.uc_flags)
- || __put_user(0, &rt_sf->uc.uc_link)
- || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
- || __put_user(sas_ss_flags(regs->gpr[1]),
- &rt_sf->uc.uc_stack.ss_flags)
- || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
- || __put_user(&rt_sf->uc.uc_mcontext, &rt_sf->uc.uc_regs)
- || __copy_to_user(&rt_sf->uc.uc_sigmask, oldset, sizeof(*oldset)))
- goto badframe;
-
- /* Save user registers on the stack */
- frame = &rt_sf->uc.uc_mcontext;
- if (save_user_regs(regs, frame, __NR_rt_sigreturn))
- goto badframe;
-
- if (put_user(regs->gpr[1], (unsigned long __user *)newsp))
- goto badframe;
- regs->gpr[1] = newsp;
- regs->gpr[3] = sig;
- regs->gpr[4] = (unsigned long) &rt_sf->info;
- regs->gpr[5] = (unsigned long) &rt_sf->uc;
- regs->gpr[6] = (unsigned long) rt_sf;
- regs->nip = (unsigned long) ka->sa.sa_handler;
- regs->link = (unsigned long) frame->tramp;
- regs->trap = 0;
-
- return;
-
-badframe:
-#ifdef DEBUG_SIG
- printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
- regs, frame, newsp);
-#endif
- force_sigsegv(sig, current);
-}
-
-static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
-{
- sigset_t set;
- struct mcontext __user *mcp;
-
- if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(set))
- || __get_user(mcp, &ucp->uc_regs))
- return -EFAULT;
- restore_sigmask(&set);
- if (restore_user_regs(regs, mcp, sig))
- return -EFAULT;
-
- return 0;
-}
-
-int sys_swapcontext(struct ucontext __user *old_ctx,
- struct ucontext __user *new_ctx,
- int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
-{
- unsigned char tmp;
-
- /* Context size is for future use. Right now, we only make sure
- * we are passed something we understand
- */
- if (ctx_size < sizeof(struct ucontext))
- return -EINVAL;
-
- if (old_ctx != NULL) {
- if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
- || save_user_regs(regs, &old_ctx->uc_mcontext, 0)
- || __copy_to_user(&old_ctx->uc_sigmask,
- &current->blocked, sizeof(sigset_t))
- || __put_user(&old_ctx->uc_mcontext, &old_ctx->uc_regs))
- return -EFAULT;
- }
- if (new_ctx == NULL)
- return 0;
- if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx))
- || __get_user(tmp, (u8 __user *) new_ctx)
- || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1))
- return -EFAULT;
-
- /*
- * If we get a fault copying the context into the kernel's
- * image of the user's registers, we can't just return -EFAULT
- * because the user's registers will be corrupted. For instance
- * the NIP value may have been updated but not some of the
- * other registers. Given that we have done the access_ok
- * and successfully read the first and last bytes of the region
- * above, this should only happen in an out-of-memory situation
- * or if another thread unmaps the region containing the context.
- * We kill the task with a SIGSEGV in this situation.
- */
- if (do_setcontext(new_ctx, regs, 0))
- do_exit(SIGSEGV);
- sigreturn_exit(regs);
- /* doesn't actually return back to here */
- return 0;
-}
-
-int sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
- struct pt_regs *regs)
-{
- struct rt_sigframe __user *rt_sf;
-
- /* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
- rt_sf = (struct rt_sigframe __user *)
- (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
- if (!access_ok(VERIFY_READ, rt_sf, sizeof(struct rt_sigframe)))
- goto bad;
- if (do_setcontext(&rt_sf->uc, regs, 1))
- goto bad;
-
- /*
- * It's not clear whether or why it is desirable to save the
- * sigaltstack setting on signal delivery and restore it on
- * signal return. But other architectures do this and we have
- * always done it up until now so it is probably better not to
- * change it. -- paulus
- */
- do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
-
- sigreturn_exit(regs); /* doesn't return here */
- return 0;
-
- bad:
- force_sig(SIGSEGV, current);
- return 0;
-}
-
-int sys_debug_setcontext(struct ucontext __user *ctx,
- int ndbg, struct sig_dbg_op __user *dbg,
- int r6, int r7, int r8,
- struct pt_regs *regs)
-{
- struct sig_dbg_op op;
- int i;
- unsigned long new_msr = regs->msr;
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- unsigned long new_dbcr0 = current->thread.dbcr0;
-#endif
-
- for (i=0; i<ndbg; i++) {
- if (__copy_from_user(&op, dbg, sizeof(op)))
- return -EFAULT;
- switch (op.dbg_type) {
- case SIG_DBG_SINGLE_STEPPING:
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- if (op.dbg_value) {
- new_msr |= MSR_DE;
- new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
- } else {
- new_msr &= ~MSR_DE;
- new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
- }
-#else
- if (op.dbg_value)
- new_msr |= MSR_SE;
- else
- new_msr &= ~MSR_SE;
-#endif
- break;
- case SIG_DBG_BRANCH_TRACING:
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- return -EINVAL;
-#else
- if (op.dbg_value)
- new_msr |= MSR_BE;
- else
- new_msr &= ~MSR_BE;
-#endif
- break;
-
- default:
- return -EINVAL;
- }
- }
-
- /* We wait until here to actually install the values in the
- registers so if we fail in the above loop, it will not
- affect the contents of these registers. After this point,
- failure is a problem, anyway, and it's very unlikely unless
- the user is really doing something wrong. */
- regs->msr = new_msr;
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- current->thread.dbcr0 = new_dbcr0;
-#endif
-
- /*
- * If we get a fault copying the context into the kernel's
- * image of the user's registers, we can't just return -EFAULT
- * because the user's registers will be corrupted. For instance
- * the NIP value may have been updated but not some of the
- * other registers. Given that we have done the access_ok
- * and successfully read the first and last bytes of the region
- * above, this should only happen in an out-of-memory situation
- * or if another thread unmaps the region containing the context.
- * We kill the task with a SIGSEGV in this situation.
- */
- if (do_setcontext(ctx, regs, 1)) {
- force_sig(SIGSEGV, current);
- goto out;
- }
-
- /*
- * It's not clear whether or why it is desirable to save the
- * sigaltstack setting on signal delivery and restore it on
- * signal return. But other architectures do this and we have
- * always done it up until now so it is probably better not to
- * change it. -- paulus
- */
- do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
-
- sigreturn_exit(regs);
- /* doesn't actually return back to here */
-
- out:
- return 0;
-}
-
-/*
- * OK, we're invoking a handler
- */
-static void
-handle_signal(unsigned long sig, struct k_sigaction *ka,
- siginfo_t *info, sigset_t *oldset, struct pt_regs * regs,
- unsigned long newsp)
-{
- struct sigcontext __user *sc;
- struct sigregs __user *frame;
- unsigned long origsp = newsp;
-
- /* Set up Signal Frame */
- newsp -= sizeof(struct sigregs);
- frame = (struct sigregs __user *) newsp;
-
- /* Put a sigcontext on the stack */
- newsp -= sizeof(*sc);
- sc = (struct sigcontext __user *) newsp;
-
- /* create a stack frame for the caller of the handler */
- newsp -= __SIGNAL_FRAMESIZE;
-
- if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
- goto badframe;
-
-#if _NSIG != 64
-#error "Please adjust handle_signal()"
-#endif
- if (__put_user((unsigned long) ka->sa.sa_handler, &sc->handler)
- || __put_user(oldset->sig[0], &sc->oldmask)
- || __put_user(oldset->sig[1], &sc->_unused[3])
- || __put_user((struct pt_regs __user *)frame, &sc->regs)
- || __put_user(sig, &sc->signal))
- goto badframe;
-
- if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
- goto badframe;
-
- if (put_user(regs->gpr[1], (unsigned long __user *)newsp))
- goto badframe;
- regs->gpr[1] = newsp;
- regs->gpr[3] = sig;
- regs->gpr[4] = (unsigned long) sc;
- regs->nip = (unsigned long) ka->sa.sa_handler;
- regs->link = (unsigned long) frame->mctx.tramp;
- regs->trap = 0;
-
- return;
-
-badframe:
-#ifdef DEBUG_SIG
- printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
- regs, frame, newsp);
-#endif
- force_sigsegv(sig, current);
-}
-
-/*
- * Do a signal return; undo the signal stack.
- */
-int sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
- struct pt_regs *regs)
-{
- struct sigcontext __user *sc;
- struct sigcontext sigctx;
- struct mcontext __user *sr;
- sigset_t set;
-
- /* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
- sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
- if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
- goto badframe;
-
- set.sig[0] = sigctx.oldmask;
- set.sig[1] = sigctx._unused[3];
- restore_sigmask(&set);
-
- sr = (struct mcontext __user *) sigctx.regs;
- if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
- || restore_user_regs(regs, sr, 1))
- goto badframe;
-
- sigreturn_exit(regs); /* doesn't return */
- return 0;
-
-badframe:
- force_sig(SIGSEGV, current);
- return 0;
-}
-
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- */
-int do_signal(sigset_t *oldset, struct pt_regs *regs)
-{
- siginfo_t info;
- struct k_sigaction ka;
- unsigned long frame, newsp;
- int signr, ret;
-
- if (try_to_freeze()) {
- signr = 0;
- if (!signal_pending(current))
- goto no_signal;
- }
-
- if (!oldset)
- oldset = &current->blocked;
-
- newsp = frame = 0;
-
- signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- no_signal:
- if (TRAP(regs) == 0x0C00 /* System Call! */
- && regs->ccr & 0x10000000 /* error signalled */
- && ((ret = regs->gpr[3]) == ERESTARTSYS
- || ret == ERESTARTNOHAND || ret == ERESTARTNOINTR
- || ret == ERESTART_RESTARTBLOCK)) {
-
- if (signr > 0
- && (ret == ERESTARTNOHAND || ret == ERESTART_RESTARTBLOCK
- || (ret == ERESTARTSYS
- && !(ka.sa.sa_flags & SA_RESTART)))) {
- /* make the system call return an EINTR error */
- regs->result = -EINTR;
- regs->gpr[3] = EINTR;
- /* note that the cr0.SO bit is already set */
- } else {
- regs->nip -= 4; /* Back up & retry system call */
- regs->result = 0;
- regs->trap = 0;
- if (ret == ERESTART_RESTARTBLOCK)
- regs->gpr[0] = __NR_restart_syscall;
- else
- regs->gpr[3] = regs->orig_gpr3;
- }
- }
-
- if (signr == 0)
- return 0; /* no signals delivered */
-
- if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size
- && !on_sig_stack(regs->gpr[1]))
- newsp = current->sas_ss_sp + current->sas_ss_size;
- else
- newsp = regs->gpr[1];
- newsp &= ~0xfUL;
-
- /* Whee! Actually deliver the signal. */
- if (ka.sa.sa_flags & SA_SIGINFO)
- handle_rt_signal(signr, &ka, &info, oldset, regs, newsp);
- else
- handle_signal(signr, &ka, &info, oldset, regs, newsp);
-
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka.sa.sa_mask);
- if (!(ka.sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked, signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- return 1;
-}
-
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c
index 726fe7ce1747..bc5bf1124836 100644
--- a/arch/ppc/kernel/smp.c
+++ b/arch/ppc/kernel/smp.c
@@ -34,11 +34,11 @@
#include <asm/thread_info.h>
#include <asm/tlbflush.h>
#include <asm/xmon.h>
+#include <asm/machdep.h>
volatile int smp_commenced;
int smp_tb_synchronized;
struct cpuinfo_PPC cpu_data[NR_CPUS];
-struct klock_info_struct klock_info = { KLOCK_CLEAR, 0 };
atomic_t ipi_recv;
atomic_t ipi_sent;
cpumask_t cpu_online_map;
@@ -51,7 +51,7 @@ EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(cpu_possible_map);
/* SMP operations for this machine */
-static struct smp_ops_t *smp_ops;
+struct smp_ops_t *smp_ops;
/* all cpu mappings are 1-1 -- Cort */
volatile unsigned long cpu_callin_map[NR_CPUS];
@@ -74,11 +74,11 @@ extern void __save_cpu_setup(void);
#define PPC_MSG_XMON_BREAK 3
static inline void
-smp_message_pass(int target, int msg, unsigned long data, int wait)
+smp_message_pass(int target, int msg)
{
- if (smp_ops){
+ if (smp_ops) {
atomic_inc(&ipi_sent);
- smp_ops->message_pass(target,msg,data,wait);
+ smp_ops->message_pass(target, msg);
}
}
@@ -119,7 +119,7 @@ void smp_message_recv(int msg, struct pt_regs *regs)
void smp_send_tlb_invalidate(int cpu)
{
if ( PVR_VER(mfspr(SPRN_PVR)) == 8 )
- smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_INVALIDATE_TLB, 0, 0);
+ smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_INVALIDATE_TLB);
}
void smp_send_reschedule(int cpu)
@@ -135,13 +135,13 @@ void smp_send_reschedule(int cpu)
*/
/* This is only used if `cpu' is running an idle task,
so it will reschedule itself anyway... */
- smp_message_pass(cpu, PPC_MSG_RESCHEDULE, 0, 0);
+ smp_message_pass(cpu, PPC_MSG_RESCHEDULE);
}
#ifdef CONFIG_XMON
void smp_send_xmon_break(int cpu)
{
- smp_message_pass(cpu, PPC_MSG_XMON_BREAK, 0, 0);
+ smp_message_pass(cpu, PPC_MSG_XMON_BREAK);
}
#endif /* CONFIG_XMON */
@@ -224,7 +224,7 @@ static int __smp_call_function(void (*func) (void *info), void *info,
spin_lock(&call_lock);
call_data = &data;
/* Send a message to all other CPUs and wait for them to respond */
- smp_message_pass(target, PPC_MSG_CALL_FUNCTION, 0, 0);
+ smp_message_pass(target, PPC_MSG_CALL_FUNCTION);
/* Wait for response */
timeout = 1000000;
@@ -294,7 +294,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
smp_store_cpu_info(smp_processor_id());
cpu_callin_map[smp_processor_id()] = 1;
- smp_ops = ppc_md.smp_ops;
if (smp_ops == NULL) {
printk("SMP not supported on this machine.\n");
return;
@@ -308,9 +307,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/* Backup CPU 0 state */
__save_cpu_setup();
- if (smp_ops->space_timers)
- smp_ops->space_timers(num_cpus);
-
for_each_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
diff --git a/arch/ppc/kernel/syscalls.c b/arch/ppc/kernel/syscalls.c
deleted file mode 100644
index 127f040de9de..000000000000
--- a/arch/ppc/kernel/syscalls.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * arch/ppc/kernel/sys_ppc.c
- *
- * PowerPC version
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Derived from "arch/i386/kernel/sys_i386.c"
- * Adapted from the i386 version by Gary Thomas
- * Modified by Cort Dougan (cort@cs.nmt.edu)
- * and Paul Mackerras (paulus@cs.anu.edu.au).
- *
- * This file contains various random system calls that
- * have a non-standard calling sequence on the Linux/PPC
- * platform.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/sem.h>
-#include <linux/msg.h>
-#include <linux/shm.h>
-#include <linux/stat.h>
-#include <linux/syscalls.h>
-#include <linux/mman.h>
-#include <linux/sys.h>
-#include <linux/ipc.h>
-#include <linux/utsname.h>
-#include <linux/file.h>
-#include <linux/unistd.h>
-
-#include <asm/uaccess.h>
-#include <asm/ipc.h>
-#include <asm/semaphore.h>
-
-
-/*
- * sys_ipc() is the de-multiplexer for the SysV IPC calls..
- *
- * This is really horribly ugly.
- */
-int
-sys_ipc (uint call, int first, int second, int third, void __user *ptr, long fifth)
-{
- int version, ret;
-
- version = call >> 16; /* hack for backward compatibility */
- call &= 0xffff;
-
- ret = -ENOSYS;
- switch (call) {
- case SEMOP:
- ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
- second, NULL);
- break;
- case SEMTIMEDOP:
- ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
- second, (const struct timespec __user *) fifth);
- break;
- case SEMGET:
- ret = sys_semget (first, second, third);
- break;
- case SEMCTL: {
- union semun fourth;
-
- if (!ptr)
- break;
- if ((ret = access_ok(VERIFY_READ, ptr, sizeof(long)) ? 0 : -EFAULT)
- || (ret = get_user(fourth.__pad, (void __user *__user *)ptr)))
- break;
- ret = sys_semctl (first, second, third, fourth);
- break;
- }
- case MSGSND:
- ret = sys_msgsnd (first, (struct msgbuf __user *) ptr, second, third);
- break;
- case MSGRCV:
- switch (version) {
- case 0: {
- struct ipc_kludge tmp;
-
- if (!ptr)
- break;
- if ((ret = access_ok(VERIFY_READ, ptr, sizeof(tmp)) ? 0 : -EFAULT)
- || (ret = copy_from_user(&tmp,
- (struct ipc_kludge __user *) ptr,
- sizeof (tmp)) ? -EFAULT : 0))
- break;
- ret = sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp,
- third);
- break;
- }
- default:
- ret = sys_msgrcv (first, (struct msgbuf __user *) ptr,
- second, fifth, third);
- break;
- }
- break;
- case MSGGET:
- ret = sys_msgget ((key_t) first, second);
- break;
- case MSGCTL:
- ret = sys_msgctl (first, second, (struct msqid_ds __user *) ptr);
- break;
- case SHMAT: {
- ulong raddr;
-
- if ((ret = access_ok(VERIFY_WRITE, (ulong __user *) third,
- sizeof(ulong)) ? 0 : -EFAULT))
- break;
- ret = do_shmat (first, (char __user *) ptr, second, &raddr);
- if (ret)
- break;
- ret = put_user (raddr, (ulong __user *) third);
- break;
- }
- case SHMDT:
- ret = sys_shmdt ((char __user *)ptr);
- break;
- case SHMGET:
- ret = sys_shmget (first, second, third);
- break;
- case SHMCTL:
- ret = sys_shmctl (first, second, (struct shmid_ds __user *) ptr);
- break;
- }
-
- return ret;
-}
-
-/*
- * sys_pipe() is the normal C calling standard for creating
- * a pipe. It's not the way unix traditionally does this, though.
- */
-int sys_pipe(int __user *fildes)
-{
- int fd[2];
- int error;
-
- error = do_pipe(fd);
- if (!error) {
- if (copy_to_user(fildes, fd, 2*sizeof(int)))
- error = -EFAULT;
- }
- return error;
-}
-
-static inline unsigned long
-do_mmap2(unsigned long addr, size_t len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- struct file * file = NULL;
- int ret = -EBADF;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- if (!(file = fget(fd)))
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
- if (file)
- fput(file);
-out:
- return ret;
-}
-
-unsigned long sys_mmap2(unsigned long addr, size_t len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- return do_mmap2(addr, len, prot, flags, fd, pgoff);
-}
-
-unsigned long sys_mmap(unsigned long addr, size_t len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, off_t offset)
-{
- int err = -EINVAL;
-
- if (offset & ~PAGE_MASK)
- goto out;
-
- err = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
-out:
- return err;
-}
-
-/*
- * Due to some executables calling the wrong select we sometimes
- * get wrong args. This determines how the args are being passed
- * (a single ptr to them all args passed) then calls
- * sys_select() with the appropriate args. -- Cort
- */
-int
-ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp)
-{
- if ( (unsigned long)n >= 4096 )
- {
- unsigned long __user *buffer = (unsigned long __user *)n;
- if (!access_ok(VERIFY_READ, buffer, 5*sizeof(unsigned long))
- || __get_user(n, buffer)
- || __get_user(inp, ((fd_set __user * __user *)(buffer+1)))
- || __get_user(outp, ((fd_set __user * __user *)(buffer+2)))
- || __get_user(exp, ((fd_set __user * __user *)(buffer+3)))
- || __get_user(tvp, ((struct timeval __user * __user *)(buffer+4))))
- return -EFAULT;
- }
- return sys_select(n, inp, outp, exp, tvp);
-}
-
-int sys_uname(struct old_utsname __user * name)
-{
- int err = -EFAULT;
-
- down_read(&uts_sem);
- if (name && !copy_to_user(name, &system_utsname, sizeof (*name)))
- err = 0;
- up_read(&uts_sem);
- return err;
-}
-
-int sys_olduname(struct oldold_utsname __user * name)
-{
- int error;
-
- if (!name)
- return -EFAULT;
- if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
- return -EFAULT;
-
- down_read(&uts_sem);
- error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
- error -= __put_user(0,name->sysname+__OLD_UTS_LEN);
- error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
- error -= __put_user(0,name->nodename+__OLD_UTS_LEN);
- error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
- error -= __put_user(0,name->release+__OLD_UTS_LEN);
- error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
- error -= __put_user(0,name->version+__OLD_UTS_LEN);
- error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
- error = __put_user(0,name->machine+__OLD_UTS_LEN);
- up_read(&uts_sem);
-
- error = error ? -EFAULT : 0;
- return error;
-}
-
-/*
- * We put the arguments in a different order so we only use 6
- * registers for arguments, rather than 7 as sys_fadvise64_64 needs
- * (because `offset' goes in r5/r6).
- */
-long ppc_fadvise64_64(int fd, int advice, loff_t offset, loff_t len)
-{
- return sys_fadvise64_64(fd, offset, len, advice);
-}
diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
index 22d7fd1e0aea..53ea723af60a 100644
--- a/arch/ppc/kernel/time.c
+++ b/arch/ppc/kernel/time.c
@@ -66,11 +66,6 @@
#include <asm/time.h>
-/* XXX false sharing with below? */
-u64 jiffies_64 = INITIAL_JIFFIES;
-
-EXPORT_SYMBOL(jiffies_64);
-
unsigned long disarm_decr[NR_CPUS];
extern struct timezone sys_tz;
@@ -121,6 +116,15 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc);
#endif
+void wakeup_decrementer(void)
+{
+ set_dec(tb_ticks_per_jiffy);
+ /* No currently-supported powerbook has a 601,
+ * so use get_tbl, not native
+ */
+ last_jiffy_stamp(0) = tb_last_stamp = get_tbl();
+}
+
/*
* timer_interrupt - gets called when the decrementer overflows,
* with interrupts disabled.
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 961ede87be72..16adde6b429d 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -41,9 +41,14 @@
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
-#include <asm/perfmon.h>
+#include <asm/pmc.h>
#ifdef CONFIG_XMON
+extern int xmon_bpt(struct pt_regs *regs);
+extern int xmon_sstep(struct pt_regs *regs);
+extern int xmon_iabr_match(struct pt_regs *regs);
+extern int xmon_dabr_match(struct pt_regs *regs);
+
void (*debugger)(struct pt_regs *regs) = xmon;
int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt;
int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep;
@@ -74,7 +79,7 @@ void (*debugger_fault_handler)(struct pt_regs *regs);
DEFINE_SPINLOCK(die_lock);
-void die(const char * str, struct pt_regs * fp, long err)
+int die(const char * str, struct pt_regs * fp, long err)
{
static int die_counter;
int nl = 0;
@@ -232,7 +237,7 @@ platform_machine_check(struct pt_regs *regs)
{
}
-void MachineCheckException(struct pt_regs *regs)
+void machine_check_exception(struct pt_regs *regs)
{
unsigned long reason = get_mc_reason(regs);
@@ -393,14 +398,14 @@ void SMIException(struct pt_regs *regs)
#endif
}
-void UnknownException(struct pt_regs *regs)
+void unknown_exception(struct pt_regs *regs)
{
printk("Bad trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
regs->nip, regs->msr, regs->trap, print_tainted());
_exception(SIGTRAP, regs, 0, 0);
}
-void InstructionBreakpoint(struct pt_regs *regs)
+void instruction_breakpoint_exception(struct pt_regs *regs)
{
if (debugger_iabr_match(regs))
return;
@@ -575,7 +580,7 @@ extern struct bug_entry __start___bug_table[], __stop___bug_table[];
#define module_find_bug(x) NULL
#endif
-static struct bug_entry *find_bug(unsigned long bugaddr)
+struct bug_entry *find_bug(unsigned long bugaddr)
{
struct bug_entry *bug;
@@ -601,28 +606,28 @@ int check_bug_trap(struct pt_regs *regs)
if (bug->line & BUG_WARNING_TRAP) {
/* this is a WARN_ON rather than BUG/BUG_ON */
#ifdef CONFIG_XMON
- xmon_printf(KERN_ERR "Badness in %s at %s:%d\n",
+ xmon_printf(KERN_ERR "Badness in %s at %s:%ld\n",
bug->function, bug->file,
bug->line & ~BUG_WARNING_TRAP);
#endif /* CONFIG_XMON */
- printk(KERN_ERR "Badness in %s at %s:%d\n",
+ printk(KERN_ERR "Badness in %s at %s:%ld\n",
bug->function, bug->file,
bug->line & ~BUG_WARNING_TRAP);
dump_stack();
return 1;
}
#ifdef CONFIG_XMON
- xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
+ xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%ld!\n",
bug->function, bug->file, bug->line);
xmon(regs);
#endif /* CONFIG_XMON */
- printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
+ printk(KERN_CRIT "kernel BUG in %s at %s:%ld!\n",
bug->function, bug->file, bug->line);
return 0;
}
-void ProgramCheckException(struct pt_regs *regs)
+void program_check_exception(struct pt_regs *regs)
{
unsigned int reason = get_reason(regs);
extern int do_mathemu(struct pt_regs *regs);
@@ -654,7 +659,7 @@ void ProgramCheckException(struct pt_regs *regs)
giveup_fpu(current);
preempt_enable();
- fpscr = current->thread.fpscr;
+ fpscr = current->thread.fpscr.val;
fpscr &= fpscr << 22; /* mask summary bits with enables */
if (fpscr & FPSCR_VX)
code = FPE_FLTINV;
@@ -701,7 +706,7 @@ void ProgramCheckException(struct pt_regs *regs)
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
}
-void SingleStepException(struct pt_regs *regs)
+void single_step_exception(struct pt_regs *regs)
{
regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */
if (debugger_sstep(regs))
@@ -709,7 +714,7 @@ void SingleStepException(struct pt_regs *regs)
_exception(SIGTRAP, regs, TRAP_TRACE, 0);
}
-void AlignmentException(struct pt_regs *regs)
+void alignment_exception(struct pt_regs *regs)
{
int fixed;
@@ -814,7 +819,18 @@ void TAUException(struct pt_regs *regs)
}
#endif /* CONFIG_INT_TAU */
-void AltivecUnavailException(struct pt_regs *regs)
+/*
+ * FP unavailable trap from kernel - print a message, but let
+ * the task use FP in the kernel until it returns to user mode.
+ */
+void kernel_fp_unavailable_exception(struct pt_regs *regs)
+{
+ regs->msr |= MSR_FP;
+ printk(KERN_ERR "floating point used in kernel (task=%p, pc=%lx)\n",
+ current, regs->nip);
+}
+
+void altivec_unavailable_exception(struct pt_regs *regs)
{
static int kernel_altivec_count;
@@ -835,7 +851,7 @@ void AltivecUnavailException(struct pt_regs *regs)
}
#ifdef CONFIG_ALTIVEC
-void AltivecAssistException(struct pt_regs *regs)
+void altivec_assist_exception(struct pt_regs *regs)
{
int err;
@@ -872,7 +888,7 @@ void AltivecAssistException(struct pt_regs *regs)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_E500
-void PerformanceMonitorException(struct pt_regs *regs)
+void performance_monitor_exception(struct pt_regs *regs)
{
perf_irq(regs);
}
diff --git a/arch/ppc/kernel/vecemu.c b/arch/ppc/kernel/vecemu.c
deleted file mode 100644
index 604d0947cb20..000000000000
--- a/arch/ppc/kernel/vecemu.c
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * Routines to emulate some Altivec/VMX instructions, specifically
- * those that can trap when given denormalized operands in Java mode.
- */
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/uaccess.h>
-
-/* Functions in vector.S */
-extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b);
-extern void vsubfp(vector128 *dst, vector128 *a, vector128 *b);
-extern void vmaddfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
-extern void vnmsubfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
-extern void vrefp(vector128 *dst, vector128 *src);
-extern void vrsqrtefp(vector128 *dst, vector128 *src);
-extern void vexptep(vector128 *dst, vector128 *src);
-
-static unsigned int exp2s[8] = {
- 0x800000,
- 0x8b95c2,
- 0x9837f0,
- 0xa5fed7,
- 0xb504f3,
- 0xc5672a,
- 0xd744fd,
- 0xeac0c7
-};
-
-/*
- * Computes an estimate of 2^x. The `s' argument is the 32-bit
- * single-precision floating-point representation of x.
- */
-static unsigned int eexp2(unsigned int s)
-{
- int exp, pwr;
- unsigned int mant, frac;
-
- /* extract exponent field from input */
- exp = ((s >> 23) & 0xff) - 127;
- if (exp > 7) {
- /* check for NaN input */
- if (exp == 128 && (s & 0x7fffff) != 0)
- return s | 0x400000; /* return QNaN */
- /* 2^-big = 0, 2^+big = +Inf */
- return (s & 0x80000000)? 0: 0x7f800000; /* 0 or +Inf */
- }
- if (exp < -23)
- return 0x3f800000; /* 1.0 */
-
- /* convert to fixed point integer in 9.23 representation */
- pwr = (s & 0x7fffff) | 0x800000;
- if (exp > 0)
- pwr <<= exp;
- else
- pwr >>= -exp;
- if (s & 0x80000000)
- pwr = -pwr;
-
- /* extract integer part, which becomes exponent part of result */
- exp = (pwr >> 23) + 126;
- if (exp >= 254)
- return 0x7f800000;
- if (exp < -23)
- return 0;
-
- /* table lookup on top 3 bits of fraction to get mantissa */
- mant = exp2s[(pwr >> 20) & 7];
-
- /* linear interpolation using remaining 20 bits of fraction */
- asm("mulhwu %0,%1,%2" : "=r" (frac)
- : "r" (pwr << 12), "r" (0x172b83ff));
- asm("mulhwu %0,%1,%2" : "=r" (frac) : "r" (frac), "r" (mant));
- mant += frac;
-
- if (exp >= 0)
- return mant + (exp << 23);
-
- /* denormalized result */
- exp = -exp;
- mant += 1 << (exp - 1);
- return mant >> exp;
-}
-
-/*
- * Computes an estimate of log_2(x). The `s' argument is the 32-bit
- * single-precision floating-point representation of x.
- */
-static unsigned int elog2(unsigned int s)
-{
- int exp, mant, lz, frac;
-
- exp = s & 0x7f800000;
- mant = s & 0x7fffff;
- if (exp == 0x7f800000) { /* Inf or NaN */
- if (mant != 0)
- s |= 0x400000; /* turn NaN into QNaN */
- return s;
- }
- if ((exp | mant) == 0) /* +0 or -0 */
- return 0xff800000; /* return -Inf */
-
- if (exp == 0) {
- /* denormalized */
- asm("cntlzw %0,%1" : "=r" (lz) : "r" (mant));
- mant <<= lz - 8;
- exp = (-118 - lz) << 23;
- } else {
- mant |= 0x800000;
- exp -= 127 << 23;
- }
-
- if (mant >= 0xb504f3) { /* 2^0.5 * 2^23 */
- exp |= 0x400000; /* 0.5 * 2^23 */
- asm("mulhwu %0,%1,%2" : "=r" (mant)
- : "r" (mant), "r" (0xb504f334)); /* 2^-0.5 * 2^32 */
- }
- if (mant >= 0x9837f0) { /* 2^0.25 * 2^23 */
- exp |= 0x200000; /* 0.25 * 2^23 */
- asm("mulhwu %0,%1,%2" : "=r" (mant)
- : "r" (mant), "r" (0xd744fccb)); /* 2^-0.25 * 2^32 */
- }
- if (mant >= 0x8b95c2) { /* 2^0.125 * 2^23 */
- exp |= 0x100000; /* 0.125 * 2^23 */
- asm("mulhwu %0,%1,%2" : "=r" (mant)
- : "r" (mant), "r" (0xeac0c6e8)); /* 2^-0.125 * 2^32 */
- }
- if (mant > 0x800000) { /* 1.0 * 2^23 */
- /* calculate (mant - 1) * 1.381097463 */
- /* 1.381097463 == 0.125 / (2^0.125 - 1) */
- asm("mulhwu %0,%1,%2" : "=r" (frac)
- : "r" ((mant - 0x800000) << 1), "r" (0xb0c7cd3a));
- exp += frac;
- }
- s = exp & 0x80000000;
- if (exp != 0) {
- if (s)
- exp = -exp;
- asm("cntlzw %0,%1" : "=r" (lz) : "r" (exp));
- lz = 8 - lz;
- if (lz > 0)
- exp >>= lz;
- else if (lz < 0)
- exp <<= -lz;
- s += ((lz + 126) << 23) + exp;
- }
- return s;
-}
-
-#define VSCR_SAT 1
-
-static int ctsxs(unsigned int x, int scale, unsigned int *vscrp)
-{
- int exp, mant;
-
- exp = (x >> 23) & 0xff;
- mant = x & 0x7fffff;
- if (exp == 255 && mant != 0)
- return 0; /* NaN -> 0 */
- exp = exp - 127 + scale;
- if (exp < 0)
- return 0; /* round towards zero */
- if (exp >= 31) {
- /* saturate, unless the result would be -2^31 */
- if (x + (scale << 23) != 0xcf000000)
- *vscrp |= VSCR_SAT;
- return (x & 0x80000000)? 0x80000000: 0x7fffffff;
- }
- mant |= 0x800000;
- mant = (mant << 7) >> (30 - exp);
- return (x & 0x80000000)? -mant: mant;
-}
-
-static unsigned int ctuxs(unsigned int x, int scale, unsigned int *vscrp)
-{
- int exp;
- unsigned int mant;
-
- exp = (x >> 23) & 0xff;
- mant = x & 0x7fffff;
- if (exp == 255 && mant != 0)
- return 0; /* NaN -> 0 */
- exp = exp - 127 + scale;
- if (exp < 0)
- return 0; /* round towards zero */
- if (x & 0x80000000) {
- /* negative => saturate to 0 */
- *vscrp |= VSCR_SAT;
- return 0;
- }
- if (exp >= 32) {
- /* saturate */
- *vscrp |= VSCR_SAT;
- return 0xffffffff;
- }
- mant |= 0x800000;
- mant = (mant << 8) >> (31 - exp);
- return mant;
-}
-
-/* Round to floating integer, towards 0 */
-static unsigned int rfiz(unsigned int x)
-{
- int exp;
-
- exp = ((x >> 23) & 0xff) - 127;
- if (exp == 128 && (x & 0x7fffff) != 0)
- return x | 0x400000; /* NaN -> make it a QNaN */
- if (exp >= 23)
- return x; /* it's an integer already (or Inf) */
- if (exp < 0)
- return x & 0x80000000; /* |x| < 1.0 rounds to 0 */
- return x & ~(0x7fffff >> exp);
-}
-
-/* Round to floating integer, towards +/- Inf */
-static unsigned int rfii(unsigned int x)
-{
- int exp, mask;
-
- exp = ((x >> 23) & 0xff) - 127;
- if (exp == 128 && (x & 0x7fffff) != 0)
- return x | 0x400000; /* NaN -> make it a QNaN */
- if (exp >= 23)
- return x; /* it's an integer already (or Inf) */
- if ((x & 0x7fffffff) == 0)
- return x; /* +/-0 -> +/-0 */
- if (exp < 0)
- /* 0 < |x| < 1.0 rounds to +/- 1.0 */
- return (x & 0x80000000) | 0x3f800000;
- mask = 0x7fffff >> exp;
- /* mantissa overflows into exponent - that's OK,
- it can't overflow into the sign bit */
- return (x + mask) & ~mask;
-}
-
-/* Round to floating integer, to nearest */
-static unsigned int rfin(unsigned int x)
-{
- int exp, half;
-
- exp = ((x >> 23) & 0xff) - 127;
- if (exp == 128 && (x & 0x7fffff) != 0)
- return x | 0x400000; /* NaN -> make it a QNaN */
- if (exp >= 23)
- return x; /* it's an integer already (or Inf) */
- if (exp < -1)
- return x & 0x80000000; /* |x| < 0.5 -> +/-0 */
- if (exp == -1)
- /* 0.5 <= |x| < 1.0 rounds to +/- 1.0 */
- return (x & 0x80000000) | 0x3f800000;
- half = 0x400000 >> exp;
- /* add 0.5 to the magnitude and chop off the fraction bits */
- return (x + half) & ~(0x7fffff >> exp);
-}
-
-int emulate_altivec(struct pt_regs *regs)
-{
- unsigned int instr, i;
- unsigned int va, vb, vc, vd;
- vector128 *vrs;
-
- if (get_user(instr, (unsigned int __user *) regs->nip))
- return -EFAULT;
- if ((instr >> 26) != 4)
- return -EINVAL; /* not an altivec instruction */
- vd = (instr >> 21) & 0x1f;
- va = (instr >> 16) & 0x1f;
- vb = (instr >> 11) & 0x1f;
- vc = (instr >> 6) & 0x1f;
-
- vrs = current->thread.vr;
- switch (instr & 0x3f) {
- case 10:
- switch (vc) {
- case 0: /* vaddfp */
- vaddfp(&vrs[vd], &vrs[va], &vrs[vb]);
- break;
- case 1: /* vsubfp */
- vsubfp(&vrs[vd], &vrs[va], &vrs[vb]);
- break;
- case 4: /* vrefp */
- vrefp(&vrs[vd], &vrs[vb]);
- break;
- case 5: /* vrsqrtefp */
- vrsqrtefp(&vrs[vd], &vrs[vb]);
- break;
- case 6: /* vexptefp */
- for (i = 0; i < 4; ++i)
- vrs[vd].u[i] = eexp2(vrs[vb].u[i]);
- break;
- case 7: /* vlogefp */
- for (i = 0; i < 4; ++i)
- vrs[vd].u[i] = elog2(vrs[vb].u[i]);
- break;
- case 8: /* vrfin */
- for (i = 0; i < 4; ++i)
- vrs[vd].u[i] = rfin(vrs[vb].u[i]);
- break;
- case 9: /* vrfiz */
- for (i = 0; i < 4; ++i)
- vrs[vd].u[i] = rfiz(vrs[vb].u[i]);
- break;
- case 10: /* vrfip */
- for (i = 0; i < 4; ++i) {
- u32 x = vrs[vb].u[i];
- x = (x & 0x80000000)? rfiz(x): rfii(x);
- vrs[vd].u[i] = x;
- }
- break;
- case 11: /* vrfim */
- for (i = 0; i < 4; ++i) {
- u32 x = vrs[vb].u[i];
- x = (x & 0x80000000)? rfii(x): rfiz(x);
- vrs[vd].u[i] = x;
- }
- break;
- case 14: /* vctuxs */
- for (i = 0; i < 4; ++i)
- vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va,
- &current->thread.vscr.u[3]);
- break;
- case 15: /* vctsxs */
- for (i = 0; i < 4; ++i)
- vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va,
- &current->thread.vscr.u[3]);
- break;
- default:
- return -EINVAL;
- }
- break;
- case 46: /* vmaddfp */
- vmaddfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
- break;
- case 47: /* vnmsubfp */
- vnmsubfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/arch/ppc/kernel/vector.S b/arch/ppc/kernel/vector.S
deleted file mode 100644
index 82a21346bf80..000000000000
--- a/arch/ppc/kernel/vector.S
+++ /dev/null
@@ -1,217 +0,0 @@
-#include <asm/ppc_asm.h>
-#include <asm/processor.h>
-
-/*
- * The routines below are in assembler so we can closely control the
- * usage of floating-point registers. These routines must be called
- * with preempt disabled.
- */
- .data
-fpzero:
- .long 0
-fpone:
- .long 0x3f800000 /* 1.0 in single-precision FP */
-fphalf:
- .long 0x3f000000 /* 0.5 in single-precision FP */
-
- .text
-/*
- * Internal routine to enable floating point and set FPSCR to 0.
- * Don't call it from C; it doesn't use the normal calling convention.
- */
-fpenable:
- mfmsr r10
- ori r11,r10,MSR_FP
- mtmsr r11
- isync
- stfd fr0,24(r1)
- stfd fr1,16(r1)
- stfd fr31,8(r1)
- lis r11,fpzero@ha
- mffs fr31
- lfs fr1,fpzero@l(r11)
- mtfsf 0xff,fr1
- blr
-
-fpdisable:
- mtfsf 0xff,fr31
- lfd fr31,8(r1)
- lfd fr1,16(r1)
- lfd fr0,24(r1)
- mtmsr r10
- isync
- blr
-
-/*
- * Vector add, floating point.
- */
- .globl vaddfp
-vaddfp:
- stwu r1,-32(r1)
- mflr r0
- stw r0,36(r1)
- bl fpenable
- li r0,4
- mtctr r0
- li r6,0
-1: lfsx fr0,r4,r6
- lfsx fr1,r5,r6
- fadds fr0,fr0,fr1
- stfsx fr0,r3,r6
- addi r6,r6,4
- bdnz 1b
- bl fpdisable
- lwz r0,36(r1)
- mtlr r0
- addi r1,r1,32
- blr
-
-/*
- * Vector subtract, floating point.
- */
- .globl vsubfp
-vsubfp:
- stwu r1,-32(r1)
- mflr r0
- stw r0,36(r1)
- bl fpenable
- li r0,4
- mtctr r0
- li r6,0
-1: lfsx fr0,r4,r6
- lfsx fr1,r5,r6
- fsubs fr0,fr0,fr1
- stfsx fr0,r3,r6
- addi r6,r6,4
- bdnz 1b
- bl fpdisable
- lwz r0,36(r1)
- mtlr r0
- addi r1,r1,32
- blr
-
-/*
- * Vector multiply and add, floating point.
- */
- .globl vmaddfp
-vmaddfp:
- stwu r1,-48(r1)
- mflr r0
- stw r0,52(r1)
- bl fpenable
- stfd fr2,32(r1)
- li r0,4
- mtctr r0
- li r7,0
-1: lfsx fr0,r4,r7
- lfsx fr1,r5,r7
- lfsx fr2,r6,r7
- fmadds fr0,fr0,fr2,fr1
- stfsx fr0,r3,r7
- addi r7,r7,4
- bdnz 1b
- lfd fr2,32(r1)
- bl fpdisable
- lwz r0,52(r1)
- mtlr r0
- addi r1,r1,48
- blr
-
-/*
- * Vector negative multiply and subtract, floating point.
- */
- .globl vnmsubfp
-vnmsubfp:
- stwu r1,-48(r1)
- mflr r0
- stw r0,52(r1)
- bl fpenable
- stfd fr2,32(r1)
- li r0,4
- mtctr r0
- li r7,0
-1: lfsx fr0,r4,r7
- lfsx fr1,r5,r7
- lfsx fr2,r6,r7
- fnmsubs fr0,fr0,fr2,fr1
- stfsx fr0,r3,r7
- addi r7,r7,4
- bdnz 1b
- lfd fr2,32(r1)
- bl fpdisable
- lwz r0,52(r1)
- mtlr r0
- addi r1,r1,48
- blr
-
-/*
- * Vector reciprocal estimate. We just compute 1.0/x.
- * r3 -> destination, r4 -> source.
- */
- .globl vrefp
-vrefp:
- stwu r1,-32(r1)
- mflr r0
- stw r0,36(r1)
- bl fpenable
- lis r9,fpone@ha
- li r0,4
- lfs fr1,fpone@l(r9)
- mtctr r0
- li r6,0
-1: lfsx fr0,r4,r6
- fdivs fr0,fr1,fr0
- stfsx fr0,r3,r6
- addi r6,r6,4
- bdnz 1b
- bl fpdisable
- lwz r0,36(r1)
- mtlr r0
- addi r1,r1,32
- blr
-
-/*
- * Vector reciprocal square-root estimate, floating point.
- * We use the frsqrte instruction for the initial estimate followed
- * by 2 iterations of Newton-Raphson to get sufficient accuracy.
- * r3 -> destination, r4 -> source.
- */
- .globl vrsqrtefp
-vrsqrtefp:
- stwu r1,-48(r1)
- mflr r0
- stw r0,52(r1)
- bl fpenable
- stfd fr2,32(r1)
- stfd fr3,40(r1)
- stfd fr4,48(r1)
- stfd fr5,56(r1)
- lis r9,fpone@ha
- lis r8,fphalf@ha
- li r0,4
- lfs fr4,fpone@l(r9)
- lfs fr5,fphalf@l(r8)
- mtctr r0
- li r6,0
-1: lfsx fr0,r4,r6
- frsqrte fr1,fr0 /* r = frsqrte(s) */
- fmuls fr3,fr1,fr0 /* r * s */
- fmuls fr2,fr1,fr5 /* r * 0.5 */
- fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
- fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
- fmuls fr3,fr1,fr0 /* r * s */
- fmuls fr2,fr1,fr5 /* r * 0.5 */
- fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
- fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
- stfsx fr1,r3,r6
- addi r6,r6,4
- bdnz 1b
- lfd fr5,56(r1)
- lfd fr4,48(r1)
- lfd fr3,40(r1)
- lfd fr2,32(r1)
- bl fpdisable
- lwz r0,36(r1)
- mtlr r0
- addi r1,r1,32
- blr
diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S
index 17d2db7e537d..09c6525cfa61 100644
--- a/arch/ppc/kernel/vmlinux.lds.S
+++ b/arch/ppc/kernel/vmlinux.lds.S
@@ -149,32 +149,6 @@ SECTIONS
. = ALIGN(4096);
_sextratext = .;
- __pmac_begin = .;
- .pmac.text : { *(.pmac.text) }
- .pmac.data : { *(.pmac.data) }
- . = ALIGN(4096);
- __pmac_end = .;
-
- . = ALIGN(4096);
- __prep_begin = .;
- .prep.text : { *(.prep.text) }
- .prep.data : { *(.prep.data) }
- . = ALIGN(4096);
- __prep_end = .;
-
- . = ALIGN(4096);
- __chrp_begin = .;
- .chrp.text : { *(.chrp.text) }
- .chrp.data : { *(.chrp.data) }
- . = ALIGN(4096);
- __chrp_end = .;
-
- . = ALIGN(4096);
- __openfirmware_begin = .;
- .openfirmware.text : { *(.openfirmware.text) }
- .openfirmware.data : { *(.openfirmware.data) }
- . = ALIGN(4096);
- __openfirmware_end = .;
_eextratext = .;
__bss_start = .;