summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c5
-rw-r--r--arch/mips/Kconfig6
-rw-r--r--arch/mips/emma2rh/markeins/setup.c24
-rw-r--r--arch/mips/kernel/linux32.c10
-rw-r--r--arch/mips/kernel/r4k_switch.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/traps.c12
-rw-r--r--arch/mips/mips-boards/atlas/atlas_setup.c2
-rw-r--r--arch/mips/mips-boards/generic/display.c24
-rw-r--r--arch/mips/mips-boards/generic/time.c31
-rw-r--r--arch/mips/mips-boards/malta/malta_setup.c6
-rw-r--r--arch/mips/mips-boards/sead/sead_setup.c2
-rw-r--r--arch/mips/mm/dma-default.c5
-rw-r--r--arch/mips/qemu/q-irq.c2
-rw-r--r--arch/mips/sni/pcimt.c18
-rw-r--r--arch/mips/sni/setup.c33
-rw-r--r--arch/powerpc/Makefile1
-rw-r--r--arch/powerpc/boot/Makefile22
-rw-r--r--arch/powerpc/boot/crt0.S1
-rwxr-xr-xarch/powerpc/boot/wrapper4
-rw-r--r--arch/powerpc/kernel/irq.c6
-rw-r--r--arch/powerpc/kernel/of_platform.c2
-rw-r--r--arch/powerpc/kernel/prom.c11
-rw-r--r--arch/powerpc/kernel/ptrace.c2
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c33
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c57
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c62
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c161
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h2
-rw-r--r--arch/powerpc/platforms/celleb/Makefile2
-rw-r--r--arch/powerpc/platforms/pasemi/idle.c1
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c8
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c1
-rw-r--r--arch/powerpc/platforms/pseries/xics.c10
-rw-r--r--arch/ppc/syslib/ibm_ocp.c1
-rw-r--r--arch/sh/boards/se/73180/setup.c4
-rw-r--r--arch/sh/boards/superh/microdev/irq.c1
-rw-r--r--arch/sh/cchips/voyagergx/irq.c13
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S2
-rw-r--r--arch/sh/kernel/cpu/sh4/clock-sh4-202.c2
-rw-r--r--arch/sh/mm/ioremap.c1
-rw-r--r--arch/sparc64/Kconfig9
-rw-r--r--arch/sparc64/kernel/Makefile4
-rw-r--r--arch/sparc64/kernel/entry.S62
-rw-r--r--arch/sparc64/kernel/mdesc.c53
-rw-r--r--arch/sparc64/kernel/prom.c5
-rw-r--r--arch/sparc64/kernel/setup.c19
-rw-r--r--arch/sparc64/kernel/smp.c21
-rw-r--r--arch/sparc64/kernel/sysfs.c297
51 files changed, 757 insertions, 313 deletions
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index 1cf466df330a..7202b98aac4f 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -734,10 +734,13 @@ void mtrr_ap_init(void)
*/
void mtrr_save_state(void)
{
- if (smp_processor_id() == 0)
+ int cpu = get_cpu();
+
+ if (cpu == 0)
mtrr_save_fixed_ranges(NULL);
else
smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1);
+ put_cpu();
}
static int __init mtrr_init_finialize(void)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 0f09412e1b7f..9528ee90640a 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -747,9 +747,9 @@ config EARLY_PRINTK
to print messages very early in the bootup process.
This is useful for kernel debugging when your machine crashes very
- early before the console code is initialized. For normal operation
- it is not recommended because it looks on some machines ugly and
- oesn't cooperate with an X server. You should normally N here,
+ early before the console code is initialized. For normal operation,
+ it is not recommended because it looks ugly on some machines and
+ doesn't cooperate with an X server. You should normally say N here,
unless you want to debug such a crash.
config SYS_HAS_EARLY_PRINTK
diff --git a/arch/mips/emma2rh/markeins/setup.c b/arch/mips/emma2rh/markeins/setup.c
index b29a44739230..2f060e1ed36c 100644
--- a/arch/mips/emma2rh/markeins/setup.c
+++ b/arch/mips/emma2rh/markeins/setup.c
@@ -115,30 +115,6 @@ extern void markeins_irq_setup(void);
static void inline __init markeins_sio_setup(void)
{
-#ifdef CONFIG_KGDB_8250
- struct uart_port emma_port;
-
- memset(&emma_port, 0, sizeof(emma_port));
-
- emma_port.flags =
- UPF_BOOT_AUTOCONF | UPF_SKIP_TEST;
- emma_port.iotype = UPIO_MEM;
- emma_port.regshift = 4; /* I/O addresses are every 8 bytes */
- emma_port.uartclk = 18544000; /* Clock rate of the chip */
-
- emma_port.line = 0;
- emma_port.mapbase = KSEG1ADDR(EMMA2RH_PFUR0_BASE + 3);
- emma_port.membase = (u8*)emma_port.mapbase;
- early_serial_setup(&emma_port);
-
- emma_port.line = 1;
- emma_port.mapbase = KSEG1ADDR(EMMA2RH_PFUR1_BASE + 3);
- emma_port.membase = (u8*)emma_port.mapbase;
- early_serial_setup(&emma_port);
-
- emma_port.irq = EMMA2RH_IRQ_PFUR1;
- kgdb8250_add_port(1, &emma_port);
-#endif
}
void __init plat_mem_setup(void)
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 37849edd0645..06e04da211d5 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -556,6 +556,16 @@ asmlinkage long sys32_sync_file_range(int fd, int __pad,
flags);
}
+asmlinkage long sys32_fadvise64_64(int fd, int __pad,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ int flags)
+{
+ return sys_fadvise64_64(fd,
+ merge_64(a2, a3), merge_64(a4, a5),
+ flags);
+}
+
save_static_function(sys32_clone);
__attribute_used__ noinline static int
_sys32_clone(nabi_no_regargs struct pt_regs regs)
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index cc566cf12246..06729596812f 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -174,7 +174,7 @@ LEAF(_init_fpu)
or t0, t1
mtc0 t0, CP0_STATUS
#endif /* CONFIG_MIPS_MT_SMTC */
- fpu_enable_hazard
+ enable_fpu_hazard
li t1, FPU_DEFAULT
ctc1 t1, fcr31
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 6eac28337423..1631035ffc24 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -299,7 +299,7 @@ EXPORT(sysn32_call_table)
PTR sys_ni_syscall /* res. for afs_syscall */
PTR sys_ni_syscall /* res. for security */
PTR sys_gettid
- PTR sys32_readahead
+ PTR sys_readahead
PTR sys_setxattr /* 6180 */
PTR sys_lsetxattr
PTR sys_fsetxattr
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 7e74b412a782..2aa99426ac1c 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -459,7 +459,7 @@ sys_call_table:
PTR sys_remap_file_pages
PTR sys_set_tid_address
PTR sys_restart_syscall
- PTR sys_fadvise64_64
+ PTR sys32_fadvise64_64
PTR compat_sys_statfs64 /* 4255 */
PTR compat_sys_fstatfs64
PTR compat_sys_timer_create
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 200de027f354..3f58b6ac1358 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -927,12 +927,6 @@ asmlinkage void do_reserved(struct pt_regs *regs)
(regs->cp0_cause & 0x7f) >> 2);
}
-static asmlinkage void do_default_vi(void)
-{
- show_regs(get_irq_regs());
- panic("Caught unexpected vectored interrupt.");
-}
-
/*
* Some MIPS CPUs can enable/disable for cache parity detection, but do
* it different ways.
@@ -1128,6 +1122,12 @@ void mips_srs_free(int set)
clear_bit(set, &sr->sr_allocated);
}
+static asmlinkage void do_default_vi(void)
+{
+ show_regs(get_irq_regs());
+ panic("Caught unexpected vectored interrupt.");
+}
+
static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
{
unsigned long handler;
diff --git a/arch/mips/mips-boards/atlas/atlas_setup.c b/arch/mips/mips-boards/atlas/atlas_setup.c
index 0c6b0ce15028..1cc6ebbedfdd 100644
--- a/arch/mips/mips-boards/atlas/atlas_setup.c
+++ b/arch/mips/mips-boards/atlas/atlas_setup.c
@@ -48,6 +48,8 @@ const char *get_system_type(void)
return "MIPS Atlas";
}
+const char display_string[] = " LINUX ON ATLAS ";
+
void __init plat_mem_setup(void)
{
mips_pcibios_init();
diff --git a/arch/mips/mips-boards/generic/display.c b/arch/mips/mips-boards/generic/display.c
index 548dbe5ce7c8..5d600054090a 100644
--- a/arch/mips/mips-boards/generic/display.c
+++ b/arch/mips/mips-boards/generic/display.c
@@ -19,9 +19,14 @@
*/
#include <linux/compiler.h>
+#include <linux/timer.h>
#include <asm/io.h>
#include <asm/mips-boards/generic.h>
+extern const char display_string[];
+static unsigned int display_count;
+static unsigned int max_display_count;
+
void mips_display_message(const char *str)
{
static unsigned int __iomem *display = NULL;
@@ -37,3 +42,22 @@ void mips_display_message(const char *str)
writel(' ', display + i);
}
}
+
+static void scroll_display_message(unsigned long data);
+static DEFINE_TIMER(mips_scroll_timer, scroll_display_message, HZ, 0);
+
+static void scroll_display_message(unsigned long data)
+{
+ mips_display_message(&display_string[display_count++]);
+ if (display_count == max_display_count)
+ display_count = 0;
+
+ mod_timer(&mips_scroll_timer, jiffies + HZ);
+}
+
+void mips_scroll_message(void)
+{
+ del_timer_sync(&mips_scroll_timer);
+ max_display_count = strlen(display_string) + 1 - 8;
+ mod_timer(&mips_scroll_timer, jiffies + 1);
+}
diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c
index df2a2bd3aa5d..37735bfc3afd 100644
--- a/arch/mips/mips-boards/generic/time.c
+++ b/arch/mips/mips-boards/generic/time.c
@@ -53,37 +53,11 @@
unsigned long cpu_khz;
-#if defined(CONFIG_MIPS_ATLAS)
-static char display_string[] = " LINUX ON ATLAS ";
-#endif
-#if defined(CONFIG_MIPS_MALTA)
-#if defined(CONFIG_MIPS_MT_SMTC)
-static char display_string[] = " SMTC LINUX ON MALTA ";
-#else
-static char display_string[] = " LINUX ON MALTA ";
-#endif /* CONFIG_MIPS_MT_SMTC */
-#endif
-#if defined(CONFIG_MIPS_SEAD)
-static char display_string[] = " LINUX ON SEAD ";
-#endif
-static unsigned int display_count;
-#define MAX_DISPLAY_COUNT (sizeof(display_string) - 8)
-
#define CPUCTR_IMASKBIT (0x100 << MIPSCPU_INT_CPUCTR)
-static unsigned int timer_tick_count;
static int mips_cpu_timer_irq;
extern void smtc_timer_broadcast(int);
-static inline void scroll_display_message(void)
-{
- if ((timer_tick_count++ % HZ) == 0) {
- mips_display_message(&display_string[display_count++]);
- if (display_count == MAX_DISPLAY_COUNT)
- display_count = 0;
- }
-}
-
static void mips_timer_dispatch(void)
{
do_IRQ(mips_cpu_timer_irq);
@@ -143,7 +117,6 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
if (cpu_data[cpu].vpe_id == 0) {
timer_interrupt(irq, NULL);
smtc_timer_broadcast(cpu_data[cpu].vpe_id);
- scroll_display_message();
} else {
write_c0_compare(read_c0_count() +
(mips_hpt_frequency/HZ));
@@ -167,8 +140,6 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
/* we keep interrupt disabled all the time */
if (!r2 || (read_c0_cause() & (1 << 30)))
timer_interrupt(irq, NULL);
-
- scroll_display_message();
} else {
/* Everyone else needs to reset the timer int here as
ll_local_timer_interrupt doesn't */
@@ -262,6 +233,8 @@ void __init mips_time_init(void)
(est_freq%1000000)*100/1000000);
cpu_khz = est_freq / 1000;
+
+ mips_scroll_message();
}
void __init plat_timer_setup(struct irqaction *irq)
diff --git a/arch/mips/mips-boards/malta/malta_setup.c b/arch/mips/mips-boards/malta/malta_setup.c
index 7873932532a1..c14b7bf89950 100644
--- a/arch/mips/mips-boards/malta/malta_setup.c
+++ b/arch/mips/mips-boards/malta/malta_setup.c
@@ -56,6 +56,12 @@ const char *get_system_type(void)
return "MIPS Malta";
}
+#if defined(CONFIG_MIPS_MT_SMTC)
+const char display_string[] = " SMTC LINUX ON MALTA ";
+#else
+const char display_string[] = " LINUX ON MALTA ";
+#endif /* CONFIG_MIPS_MT_SMTC */
+
#ifdef CONFIG_BLK_DEV_FD
void __init fd_activate(void)
{
diff --git a/arch/mips/mips-boards/sead/sead_setup.c b/arch/mips/mips-boards/sead/sead_setup.c
index a189dec7c7bc..811aba100605 100644
--- a/arch/mips/mips-boards/sead/sead_setup.c
+++ b/arch/mips/mips-boards/sead/sead_setup.c
@@ -43,6 +43,8 @@ const char *get_system_type(void)
return "MIPS SEAD";
}
+const char display_string[] = " LINUX ON SEAD ";
+
void __init plat_mem_setup(void)
{
ioport_resource.end = 0x7fffffff;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index f0eb29917d9a..76903c727647 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -168,8 +168,9 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
addr = (unsigned long) page_address(sg->page);
if (!plat_device_is_coherent(dev) && addr)
__dma_sync(addr + sg->offset, sg->length, direction);
- sg->dma_address = plat_map_dma_mem_page(dev, sg->page) +
- sg->offset;
+ sg->dma_address = plat_map_dma_mem(dev,
+ (void *)(addr + sg->offset),
+ sg->length);
}
return nents;
diff --git a/arch/mips/qemu/q-irq.c b/arch/mips/qemu/q-irq.c
index f5ea2fe10f14..89891e984b3b 100644
--- a/arch/mips/qemu/q-irq.c
+++ b/arch/mips/qemu/q-irq.c
@@ -7,8 +7,6 @@
#include <asm/system.h>
#include <asm/time.h>
-extern asmlinkage void qemu_handle_int(void);
-
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause();
diff --git a/arch/mips/sni/pcimt.c b/arch/mips/sni/pcimt.c
index 9ee208daa8b1..97b234361b4d 100644
--- a/arch/mips/sni/pcimt.c
+++ b/arch/mips/sni/pcimt.c
@@ -6,7 +6,7 @@
* for more details.
*
* Copyright (C) 1996, 97, 98, 2000, 03, 04, 06 Ralf Baechle (ralf@linux-mips.org)
- * Copyright (C) 2006 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ * Copyright (C) 2006,2007 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
*/
#include <linux/init.h>
@@ -131,6 +131,19 @@ static struct resource pcimt_io_resources[] = {
}
};
+static struct resource pcimt_mem_resources[] = {
+ {
+ /*
+ * this region should only be 4 bytes long,
+ * but it's 16MB on all RM300C I've checked
+ */
+ .start = 0x1a000000,
+ .end = 0x1affffff,
+ .name = "PCI INT ACK",
+ .flags = IORESOURCE_BUSY
+ }
+};
+
static struct resource sni_mem_resource = {
.start = 0x18000000UL,
.end = 0x1fbfffffUL,
@@ -145,6 +158,9 @@ static void __init sni_pcimt_resource_init(void)
/* request I/O space for devices used on all i[345]86 PCs */
for (i = 0; i < ARRAY_SIZE(pcimt_io_resources); i++)
request_resource(&sni_io_resource, pcimt_io_resources + i);
+ /* request MEM space for devices used on all i[345]86 PCs */
+ for (i = 0; i < ARRAY_SIZE(pcimt_mem_resources); i++)
+ request_resource(&sni_mem_resource, pcimt_mem_resources + i);
}
extern struct pci_ops sni_pcimt_ops;
diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c
index 68d7cf609b4f..4fedfbda0c79 100644
--- a/arch/mips/sni/setup.c
+++ b/arch/mips/sni/setup.c
@@ -6,7 +6,7 @@
* for more details.
*
* Copyright (C) 1996, 97, 98, 2000, 03, 04, 06 Ralf Baechle (ralf@linux-mips.org)
- * Copyright (C) 2006 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ * Copyright (C) 2006,2007 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
*/
#include <linux/eisa.h>
#include <linux/init.h>
@@ -92,3 +92,34 @@ void __init plat_mem_setup(void)
sni_display_setup();
}
+
+#if CONFIG_PCI
+
+#include <linux/pci.h>
+#include <video/vga.h>
+#include <video/cirrus.h>
+
+static void __devinit quirk_cirrus_ram_size(struct pci_dev *dev)
+{
+ u16 cmd;
+
+ /*
+ * firmware doesn't set the ram size correct, so we
+ * need to do it here, otherwise we get screen corruption
+ * on older Cirrus chips
+ */
+ pci_read_config_word (dev, PCI_COMMAND, &cmd);
+ if ((cmd & (PCI_COMMAND_IO|PCI_COMMAND_MEMORY))
+ == (PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) {
+ vga_wseq (NULL, CL_SEQR6, 0x12); /* unlock all extension registers */
+ vga_wseq (NULL, CL_SEQRF, 0x18);
+ }
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5434_8,
+ quirk_cirrus_ram_size);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5436,
+ quirk_cirrus_ram_size);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446,
+ quirk_cirrus_ram_size);
+#endif
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 6238b5875fd1..fbafd965dcd2 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -142,7 +142,6 @@ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
# Default to zImage, override when needed
defaultimage-y := zImage
-defaultimage-$(CONFIG_PPC_ISERIES) := vmlinux
defaultimage-$(CONFIG_DEFAULT_UIMAGE) := uImage
KBUILD_IMAGE := $(defaultimage-y)
all: $(KBUILD_IMAGE)
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 83788986b93b..ff2701949ee1 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -11,20 +11,18 @@
# bootloader and increase compatibility with OpenFirmware.
#
# To this end we need to define BOOTCC, etc, as the tools
-# needed to build the 32 bit image. These are normally HOSTCC,
-# but may be a third compiler if, for example, you are cross
-# compiling from an intel box. Once the 64bit ppc gcc is
-# stable it will probably simply be a compiler switch to
-# compile for 32bit mode.
+# needed to build the 32 bit image. That's normally the same
+# compiler for the rest of the kernel, with the -m32 flag added.
# To make it easier to setup a cross compiler,
# CROSS32_COMPILE is setup as a prefix just like CROSS_COMPILE
# in the toplevel makefile.
all: $(obj)/zImage
-HOSTCC := gcc
-BOOTCFLAGS := $(HOSTCFLAGS) -fno-builtin -nostdinc -isystem \
- $(shell $(CROSS32CC) -print-file-name=include) -fPIC
+BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ -fno-strict-aliasing -Os -msoft-float -pipe \
+ -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
+ -isystem $(shell $(CROSS32CC) -print-file-name=include)
BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
ifeq ($(call cc-option-yn, -fstack-protector),y)
@@ -33,8 +31,8 @@ endif
BOOTCFLAGS += -I$(obj) -I$(srctree)/$(obj)
-$(obj)/44x.o: BOOTCFLAGS += -Wa,-mbooke
-$(obj)/ebony.o: BOOTCFLAGS += -Wa,-mbooke
+$(obj)/44x.o: BOOTCFLAGS += -mcpu=440
+$(obj)/ebony.o: BOOTCFLAGS += -mcpu=440
zlib := inffast.c inflate.c inftrees.c
zlibheader := inffast.h inffixed.h inflate.h inftrees.h infutil.h
@@ -136,6 +134,7 @@ image-$(CONFIG_PPC_EFIKA) += zImage.chrp
image-$(CONFIG_PPC_PMAC) += zImage.pmac
image-$(CONFIG_PPC_HOLLY) += zImage.holly-elf
image-$(CONFIG_PPC_PRPMC2800) += zImage.prpmc2800
+image-$(CONFIG_PPC_ISERIES) += zImage.iseries
image-$(CONFIG_DEFAULT_UIMAGE) += uImage
ifneq ($(CONFIG_DEVICE_TREE),"")
@@ -185,6 +184,9 @@ $(obj)/zImage.initrd.%: vmlinux $(wrapperbits)
$(obj)/zImage.%: vmlinux $(wrapperbits)
$(call if_changed,wrap,$*)
+$(obj)/zImage.iseries: vmlinux
+ $(STRIP) -s -R .comment $< -o $@
+
$(obj)/zImage.ps3: vmlinux
$(STRIP) -s -R .comment $< -o $@
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 5a4215c4b014..f1c4dfc635be 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -13,6 +13,7 @@
.text
/* a procedure descriptor used when booting this as a COFF file */
+ .globl _zimage_start_opd
_zimage_start_opd:
.long _zimage_start, 0, 0, 0
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 2ed8b8b3f0ec..da77adc73078 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -129,7 +129,7 @@ case "$platform" in
pmac|pseries|chrp)
platformo=$object/of.o
;;
-pmaccoff)
+coff)
platformo=$object/of.o
lds=$object/zImage.coff.lds
;;
@@ -220,7 +220,7 @@ case "$platform" in
pseries|chrp)
$object/addnote "$ofile"
;;
-pmaccoff)
+coff)
${CROSS}objcopy -O aixcoff-rs6000 --set-start "$entry" "$ofile"
$object/hack-coff "$ofile"
;;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 068377a2a8dc..42c8ed6ed528 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -489,7 +489,7 @@ struct irq_host *irq_alloc_host(unsigned int revmap_type,
case IRQ_HOST_MAP_LINEAR:
rmap = (unsigned int *)(host + 1);
for (i = 0; i < revmap_arg; i++)
- rmap[i] = IRQ_NONE;
+ rmap[i] = NO_IRQ;
host->revmap_data.linear.size = revmap_arg;
smp_wmb();
host->revmap_data.linear.revmap = rmap;
@@ -614,7 +614,7 @@ unsigned int irq_create_mapping(struct irq_host *host,
* host->ops->map() to update the flags
*/
virq = irq_find_mapping(host, hwirq);
- if (virq != IRQ_NONE) {
+ if (virq != NO_IRQ) {
if (host->ops->remap)
host->ops->remap(host, virq, hwirq);
pr_debug("irq: -> existing mapping on virq %d\n", virq);
@@ -741,7 +741,7 @@ void irq_dispose_mapping(unsigned int virq)
switch(host->revmap_type) {
case IRQ_HOST_MAP_LINEAR:
if (hwirq < host->revmap_data.linear.size)
- host->revmap_data.linear.revmap[hwirq] = IRQ_NONE;
+ host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
break;
case IRQ_HOST_MAP_TREE:
/* Check if radix tree allocated yet */
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index d501c23e5159..d454f61c9c7c 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -433,7 +433,7 @@ static int __devinit of_pci_phb_probe(struct of_device *dev,
* Note also that we don't do ISA, this will also be fixed with a
* more massive rework.
*/
- pci_setup_phb_io(phb, 0);
+ pci_setup_phb_io(phb, pci_io_base == 0);
/* Init pci_dn data structures */
pci_devs_phb_init_dynamic(phb);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 066a6a7a25b8..af42ddab3ab4 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -1171,11 +1171,12 @@ EXPORT_SYMBOL(of_find_node_by_name);
/**
* of_find_node_by_type - Find a node by its "device_type" property
- * @from: The node to start searching from or NULL, the node
- * you pass will not be searched, only the next one
- * will; typically, you pass what the previous call
- * returned. of_node_put() will be called on it
- * @name: The type string to match against
+ * @from: The node to start searching from, or NULL to start searching
+ * the entire device tree. The node you pass will not be
+ * searched, only the next one will; typically, you pass
+ * what the previous call returned. of_node_put() will be
+ * called on from for you.
+ * @type: The type string to match against
*
* Returns a node pointer with refcount incremented, use
* of_node_put() on it when done.
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index f4f391cdd8f5..bf76562167c3 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -218,6 +218,7 @@ set_single_step(struct task_struct *task)
regs->msr |= MSR_SE;
#endif
}
+ set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
static inline void
@@ -233,6 +234,7 @@ clear_single_step(struct task_struct *task)
regs->msr &= ~MSR_SE;
#endif
}
+ clear_tsk_thread_flag(task, TIF_SINGLESTEP);
}
#endif /* CONFIG_PPC32 */
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
index f9ac3fe3be97..ac445998d831 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -67,6 +67,7 @@ static u64 MIC_Slow_Next_Timer_table[] = {
0x00003FC000000000ull,
};
+static unsigned int pmi_frequency_limit = 0;
/*
* hardware specific functions
*/
@@ -164,7 +165,6 @@ static int set_pmode(int cpu, unsigned int slow_mode) {
static void cbe_cpufreq_handle_pmi(struct of_device *dev, pmi_message_t pmi_msg)
{
- struct cpufreq_policy policy;
u8 cpu;
u8 cbe_pmode_new;
@@ -173,15 +173,27 @@ static void cbe_cpufreq_handle_pmi(struct of_device *dev, pmi_message_t pmi_msg)
cpu = cbe_node_to_cpu(pmi_msg.data1);
cbe_pmode_new = pmi_msg.data2;
- cpufreq_get_policy(&policy, cpu);
+ pmi_frequency_limit = cbe_freqs[cbe_pmode_new].frequency;
- policy.max = min(policy.max, cbe_freqs[cbe_pmode_new].frequency);
- policy.min = min(policy.min, policy.max);
+ pr_debug("cbe_handle_pmi: max freq=%d\n", pmi_frequency_limit);
+}
+
+static int pmi_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_policy *policy = data;
- pr_debug("cbe_handle_pmi: new policy.min=%d policy.max=%d\n", policy.min, policy.max);
- cpufreq_set_policy(&policy);
+ if (event != CPUFREQ_INCOMPATIBLE)
+ return 0;
+
+ cpufreq_verify_within_limits(policy, 0, pmi_frequency_limit);
+ return 0;
}
+static struct notifier_block pmi_notifier_block = {
+ .notifier_call = pmi_notifier,
+};
+
static struct pmi_handler cbe_pmi_handler = {
.type = PMI_TYPE_FREQ_CHANGE,
.handle_pmi_message = cbe_cpufreq_handle_pmi,
@@ -238,12 +250,21 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
+ if (pmi_dev) {
+ /* frequency might get limited later, initialize limit with max_freq */
+ pmi_frequency_limit = max_freq;
+ cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
+ }
+
/* this ensures that policy->cpuinfo_min and policy->cpuinfo_max are set correctly */
return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
}
static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
+ if (pmi_dev)
+ cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
+
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 8654749e317b..7c51cb54bca1 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -39,7 +39,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
if (spu_init_csa(&ctx->csa))
goto out_free;
spin_lock_init(&ctx->mmio_lock);
- spin_lock_init(&ctx->mapping_lock);
+ mutex_init(&ctx->mapping_lock);
kref_init(&ctx->kref);
mutex_init(&ctx->state_mutex);
mutex_init(&ctx->run_mutex);
@@ -103,6 +103,7 @@ void spu_forget(struct spu_context *ctx)
void spu_unmap_mappings(struct spu_context *ctx)
{
+ mutex_lock(&ctx->mapping_lock);
if (ctx->local_store)
unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
if (ctx->mfc)
@@ -117,6 +118,7 @@ void spu_unmap_mappings(struct spu_context *ctx)
unmap_mapping_range(ctx->mss, 0, 0x1000, 1);
if (ctx->psmap)
unmap_mapping_range(ctx->psmap, 0, 0x20000, 1);
+ mutex_unlock(&ctx->mapping_lock);
}
/**
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 45614c73c784..b1e7e2f8a2e9 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -45,11 +45,11 @@ spufs_mem_open(struct inode *inode, struct file *file)
struct spufs_inode_info *i = SPUFS_I(inode);
struct spu_context *ctx = i->i_ctx;
- spin_lock(&ctx->mapping_lock);
+ mutex_lock(&ctx->mapping_lock);
file->private_data = ctx;
if (!i->i_openers++)
ctx->local_store = inode->i_mapping;
- spin_unlock(&ctx->mapping_lock);
+ mutex_unlock(&ctx->mapping_lock);
return 0;
}
@@ -59,10 +59,10 @@ spufs_mem_release(struct inode *inode, struct file *file)
struct spufs_inode_info *i = SPUFS_I(inode);
struct spu_context *ctx = i->i_ctx;
- spin_lock(&ctx->mapping_lock);
+ mutex_lock(&ctx->mapping_lock);
if (!--i->i_openers)
ctx->local_store = NULL;
- spin_unlock(&ctx->mapping_lock);
+ mutex_unlock(&ctx->mapping_lock);
return 0;
}
@@ -217,6 +217,7 @@ unsigned long spufs_get_unmapped_area(struct file *file, unsigned long addr,
static const struct file_operations spufs_mem_fops = {
.open = spufs_mem_open,
+ .release = spufs_mem_release,
.read = spufs_mem_read,
.write = spufs_mem_write,
.llseek = generic_file_llseek,
@@ -309,11 +310,11 @@ static int spufs_cntl_open(struct inode *inode, struct file *file)
struct spufs_inode_info *i = SPUFS_I(inode);
struct spu_context *ctx = i->i_ctx;
- spin_lock(&ctx->mapping_lock);
+ mutex_lock(&ctx->mapping_lock);
file->private_data = ctx;
if (!i->i_openers++)
ctx->cntl = inode->i_mapping;
- spin_unlock(&ctx->mapping_lock);
+ mutex_unlock(&ctx->mapping_lock);
return simple_attr_open(inode, file, spufs_cntl_get,
spufs_cntl_set, "0x%08lx");
}
@@ -326,10 +327,10 @@ spufs_cntl_release(struct inode *inode, struct file *file)
simple_attr_close(inode, file);
- spin_lock(&ctx->mapping_lock);
+ mutex_lock(&ctx->mapping_lock);
if (!--i->i_openers)
ctx->cntl = NULL;
- spin_unlock(&ctx->mapping_lock);
+ mutex_unlock(&ctx->mapping_lock);
return 0;
}
@@ -812,11 +813,11 @@ static int spufs_signal1_open(struct inode *inode, struct file *file)
struct spufs_inode_info *i = SPUFS_I(inode);
struct spu_context *ctx = i->i_ctx;
- spin_lock(&ctx->mapping_lock);
+ mutex_lock(&ctx->mapping_lock);
file->private_data = ctx;
if (!i->i_openers++)
ctx->signal1 = inode->i_mapping;
- spin_unlock(&ctx->mapping_lock);
+ mutex_unlock(&ctx->mapping_lock);
return nonseekable_open(inode, file);
}
@@ -826,10 +827,10 @@ spufs_signal1_release(struct inode *inode, struct file *file)
struct spufs_inode_info *i = SPUFS_I(inode);
struct spu_context *ctx = i->i_ctx;
- spin_lock(&ctx->mapping_lock);
+ mutex_lock(&ctx->mapping_lock);
if (!--i->i_openers)
ctx->signal1 = NULL;
- spin_unlock(&ctx->mapping_lock);
+ mutex_unlock(&ctx->mapping_lock);
return 0;
}
@@ -936,11 +937,11 @@ static int spufs_signal2_open(struct inode *inode, struct file *file)
struct spufs_inode_info *i = SPUFS_I(inode);
struct spu_context *ctx = i->i_ctx;
- spin_lock(&ctx->mapping_lock);
+ mutex_lock(&ctx->mapping_lock);
file->private_data = ctx;
if (!i->i_openers++)
ctx->signal2 = inode->i_mapping;
- spin_unlock(&ctx->mapping_lock);
+ mutex_unlock(&ctx->mapping_lock);
return nonseekable_open(inode, file);
}
@@ -950,10 +951,10 @@ spufs_signal2_release(struct inode *inode, struct file *file)
struct spufs_inode_info *i = SPUFS_I(inode);
struct spu_context *ctx = i->i_ctx;
- spin_lock(&ctx->mapping_lock);
+ mutex_lock(&ctx->mapping_lock);
if (!--i->i_openers)
ctx->signal2 = NULL;
- spin_unlock(&ctx->mapping_lock);
+ mutex_unlock(&ctx->mapping_lock);
return 0;
}
@@ -1154,10 +1155,10 @@ static int spufs_mss_open(struct inode *inode, struct file *file)
file->private_data = i->i_ctx;
- spin_lock(&ctx->mapping_lock);
+ mutex_lock(&ctx->mapping_lock);
if (!i->i_openers++)
ctx->mss = inode->i_mapping;
- spin_unlock(&ctx->mapping_lock);
+ mutex_unlock(&ctx->mapping_lock);
return nonseekable_open(inode, file);
}
@@ -1167,10 +1168,10 @@ spufs_mss_release(struct inode *inode, struct file *file)
struct spufs_inode_info *i = SPUFS_I(inode);
struct spu_context *ctx = i->i_ctx;
- spin_lock(&ctx->mapping_lock);
+ mutex_lock(&ctx->mapping_lock);
if (!--i->i_openers)
ctx->mss = NULL;
- spin_unlock(&ctx->mapping_lock);
+ mutex_unlock(&ctx->mapping_lock);
return 0;
}
@@ -1211,11 +1212,11 @@ static int spufs_psmap_open(struct inode *inode, struct file *file)
struct spufs_inode_info *i = SPUFS_I(inode);
struct spu_context *ctx = i->i_ctx;
- spin_lock(&ctx->mapping_lock);
+ mutex_lock(&ctx->mapping_lock);
file->private_data = i->i_ctx;
if (!i->i_openers++)
ctx->psmap = inode->i_mapping;
- spin_unlock(&ctx->mapping_lock);
+ mutex_unlock(&ctx->mapping_lock);
return nonseekable_open(inode, file);
}
@@ -1225,10 +1226,10 @@ spufs_psmap_release(struct inode *inode, struct file *file)
struct spufs_inode_info *i = SPUFS_I(inode);
struct spu_context *ctx = i->i_ctx;
- spin_lock(&ctx->mapping_lock);
+ mutex_lock(&ctx->mapping_lock);
if (!--i->i_openers)
ctx->psmap = NULL;
- spin_unlock(&ctx->mapping_lock);
+ mutex_unlock(&ctx->mapping_lock);
return 0;
}
@@ -1281,11 +1282,11 @@ static int spufs_mfc_open(struct inode *inode, struct file *file)
if (atomic_read(&inode->i_count) != 1)
return -EBUSY;
- spin_lock(&ctx->mapping_lock);
+ mutex_lock(&ctx->mapping_lock);
file->private_data = ctx;
if (!i->i_openers++)
ctx->mfc = inode->i_mapping;
- spin_unlock(&ctx->mapping_lock);
+ mutex_unlock(&ctx->mapping_lock);
return nonseekable_open(inode, file);
}
@@ -1295,10 +1296,10 @@ spufs_mfc_release(struct inode *inode, struct file *file)
struct spufs_inode_info *i = SPUFS_I(inode);
struct spu_context *ctx = i->i_ctx;
- spin_lock(&ctx->mapping_lock);
+ mutex_lock(&ctx->mapping_lock);
if (!--i->i_openers)
ctx->mfc = NULL;
- spin_unlock(&ctx->mapping_lock);
+ mutex_unlock(&ctx->mapping_lock);
return 0;
}
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 7150730e2ff1..9807206e0219 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -177,7 +177,7 @@ static int spufs_rmdir(struct inode *parent, struct dentry *dir)
static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
int mode, struct spu_context *ctx)
{
- struct dentry *dentry;
+ struct dentry *dentry, *tmp;
int ret;
while (files->name && files->name[0]) {
@@ -193,7 +193,20 @@ static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
}
return 0;
out:
- spufs_prune_dir(dir);
+ /*
+ * remove all children from dir. dir->inode is not set so don't
+ * just simply use spufs_prune_dir() and panic afterwards :)
+ * dput() looks like it will do the right thing:
+ * - dec parent's ref counter
+ * - remove child from parent's child list
+ * - free child's inode if possible
+ * - free child
+ */
+ list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
+ dput(dentry);
+ }
+
+ shrink_dcache_parent(dir);
return ret;
}
@@ -274,6 +287,7 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
goto out;
out_free_ctx:
+ spu_forget(ctx);
put_spu_context(ctx);
out_iput:
iput(inode);
@@ -349,37 +363,6 @@ out:
return ret;
}
-static int spufs_rmgang(struct inode *root, struct dentry *dir)
-{
- /* FIXME: this fails if the dir is not empty,
- which causes a leak of gangs. */
- return simple_rmdir(root, dir);
-}
-
-static int spufs_gang_close(struct inode *inode, struct file *file)
-{
- struct inode *parent;
- struct dentry *dir;
- int ret;
-
- dir = file->f_path.dentry;
- parent = dir->d_parent->d_inode;
-
- ret = spufs_rmgang(parent, dir);
- WARN_ON(ret);
-
- return dcache_dir_close(inode, file);
-}
-
-const struct file_operations spufs_gang_fops = {
- .open = dcache_dir_open,
- .release = spufs_gang_close,
- .llseek = dcache_dir_lseek,
- .read = generic_read_dir,
- .readdir = dcache_readdir,
- .fsync = simple_sync_file,
-};
-
static int
spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
{
@@ -407,7 +390,6 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
inode->i_fop = &simple_dir_operations;
d_instantiate(dentry, inode);
- dget(dentry);
dir->i_nlink++;
dentry->d_inode->i_nlink++;
return ret;
@@ -437,7 +419,7 @@ static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
goto out;
}
- filp->f_op = &spufs_gang_fops;
+ filp->f_op = &simple_dir_operations;
fd_install(ret, filp);
out:
return ret;
@@ -458,8 +440,10 @@ static int spufs_create_gang(struct inode *inode,
* in error path of *_open().
*/
ret = spufs_gang_open(dget(dentry), mntget(mnt));
- if (ret < 0)
- WARN_ON(spufs_rmgang(inode, dentry));
+ if (ret < 0) {
+ int err = simple_rmdir(inode, dentry);
+ WARN_ON(err);
+ }
out:
mutex_unlock(&inode->i_mutex);
@@ -600,6 +584,10 @@ spufs_create_root(struct super_block *sb, void *data)
struct inode *inode;
int ret;
+ ret = -ENODEV;
+ if (!spu_management_ops)
+ goto out;
+
ret = -ENOMEM;
inode = spufs_new_inode(sb, S_IFDIR | 0775);
if (!inode)
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index b6ecb30e7d58..3b831e07f1ed 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -93,43 +93,6 @@ void spu_stop_tick(struct spu_context *ctx)
}
}
-void spu_sched_tick(struct work_struct *work)
-{
- struct spu_context *ctx =
- container_of(work, struct spu_context, sched_work.work);
- struct spu *spu;
- int preempted = 0;
-
- /*
- * If this context is being stopped avoid rescheduling from the
- * scheduler tick because we would block on the state_mutex.
- * The caller will yield the spu later on anyway.
- */
- if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
- return;
-
- mutex_lock(&ctx->state_mutex);
- spu = ctx->spu;
- if (spu) {
- int best = sched_find_first_bit(spu_prio->bitmap);
- if (best <= ctx->prio) {
- spu_deactivate(ctx);
- preempted = 1;
- }
- }
- mutex_unlock(&ctx->state_mutex);
-
- if (preempted) {
- /*
- * We need to break out of the wait loop in spu_run manually
- * to ensure this context gets put on the runqueue again
- * ASAP.
- */
- wake_up(&ctx->stop_wq);
- } else
- spu_start_tick(ctx);
-}
-
/**
* spu_add_to_active_list - add spu to active list
* @spu: spu to add to the active list
@@ -273,34 +236,6 @@ static void spu_prio_wait(struct spu_context *ctx)
remove_wait_queue(&ctx->stop_wq, &wait);
}
-/**
- * spu_reschedule - try to find a runnable context for a spu
- * @spu: spu available
- *
- * This function is called whenever a spu becomes idle. It looks for the
- * most suitable runnable spu context and schedules it for execution.
- */
-static void spu_reschedule(struct spu *spu)
-{
- int best;
-
- spu_free(spu);
-
- spin_lock(&spu_prio->runq_lock);
- best = sched_find_first_bit(spu_prio->bitmap);
- if (best < MAX_PRIO) {
- struct list_head *rq = &spu_prio->runq[best];
- struct spu_context *ctx;
-
- BUG_ON(list_empty(rq));
-
- ctx = list_entry(rq->next, struct spu_context, rq);
- __spu_del_from_rq(ctx);
- wake_up(&ctx->stop_wq);
- }
- spin_unlock(&spu_prio->runq_lock);
-}
-
static struct spu *spu_get_idle(struct spu_context *ctx)
{
struct spu *spu = NULL;
@@ -429,6 +364,51 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
}
/**
+ * grab_runnable_context - try to find a runnable context
+ *
+ * Remove the highest priority context on the runqueue and return it
+ * to the caller. Returns %NULL if no runnable context was found.
+ */
+static struct spu_context *grab_runnable_context(int prio)
+{
+ struct spu_context *ctx = NULL;
+ int best;
+
+ spin_lock(&spu_prio->runq_lock);
+ best = sched_find_first_bit(spu_prio->bitmap);
+ if (best < prio) {
+ struct list_head *rq = &spu_prio->runq[best];
+
+ BUG_ON(list_empty(rq));
+
+ ctx = list_entry(rq->next, struct spu_context, rq);
+ __spu_del_from_rq(ctx);
+ }
+ spin_unlock(&spu_prio->runq_lock);
+
+ return ctx;
+}
+
+static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
+{
+ struct spu *spu = ctx->spu;
+ struct spu_context *new = NULL;
+
+ if (spu) {
+ new = grab_runnable_context(max_prio);
+ if (new || force) {
+ spu_unbind_context(spu, ctx);
+ spu_free(spu);
+ if (new)
+ wake_up(&new->stop_wq);
+ }
+
+ }
+
+ return new != NULL;
+}
+
+/**
* spu_deactivate - unbind a context from it's physical spu
* @ctx: spu context to unbind
*
@@ -437,12 +417,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
*/
void spu_deactivate(struct spu_context *ctx)
{
- struct spu *spu = ctx->spu;
-
- if (spu) {
- spu_unbind_context(spu, ctx);
- spu_reschedule(spu);
- }
+ __spu_deactivate(ctx, 1, MAX_PRIO);
}
/**
@@ -455,21 +430,43 @@ void spu_deactivate(struct spu_context *ctx)
*/
void spu_yield(struct spu_context *ctx)
{
- struct spu *spu;
-
- if (mutex_trylock(&ctx->state_mutex)) {
- if ((spu = ctx->spu) != NULL) {
- int best = sched_find_first_bit(spu_prio->bitmap);
- if (best < MAX_PRIO) {
- pr_debug("%s: yielding SPU %d NODE %d\n",
- __FUNCTION__, spu->number, spu->node);
- spu_deactivate(ctx);
- }
- }
+ if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
+ mutex_lock(&ctx->state_mutex);
+ __spu_deactivate(ctx, 0, MAX_PRIO);
mutex_unlock(&ctx->state_mutex);
}
}
+void spu_sched_tick(struct work_struct *work)
+{
+ struct spu_context *ctx =
+ container_of(work, struct spu_context, sched_work.work);
+ int preempted;
+
+ /*
+ * If this context is being stopped avoid rescheduling from the
+ * scheduler tick because we would block on the state_mutex.
+ * The caller will yield the spu later on anyway.
+ */
+ if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
+ return;
+
+ mutex_lock(&ctx->state_mutex);
+ preempted = __spu_deactivate(ctx, 0, ctx->prio + 1);
+ mutex_unlock(&ctx->state_mutex);
+
+ if (preempted) {
+ /*
+ * We need to break out of the wait loop in spu_run manually
+ * to ensure this context gets put on the runqueue again
+ * ASAP.
+ */
+ wake_up(&ctx->stop_wq);
+ } else {
+ spu_start_tick(ctx);
+ }
+}
+
int __init spu_sched_init(void)
{
int i;
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 0a947fd7de57..47617e8014a5 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -55,7 +55,7 @@ struct spu_context {
struct address_space *signal2; /* 'signal2' area mappings. */
struct address_space *mss; /* 'mss' area mappings. */
struct address_space *psmap; /* 'psmap' area mappings. */
- spinlock_t mapping_lock;
+ struct mutex mapping_lock;
u64 object_id; /* user space pointer for oprofile */
enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
diff --git a/arch/powerpc/platforms/celleb/Makefile b/arch/powerpc/platforms/celleb/Makefile
index f4f82520dc4f..5240046d8671 100644
--- a/arch/powerpc/platforms/celleb/Makefile
+++ b/arch/powerpc/platforms/celleb/Makefile
@@ -4,5 +4,5 @@ obj-y += interrupt.o iommu.o setup.o \
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PPC_UDBG_BEAT) += udbg_beat.o
-obj-$(CONFIG_HAS_TXX9_SERIAL) += scc_sio.o
+obj-$(CONFIG_SERIAL_TXX9) += scc_sio.o
obj-$(CONFIG_SPU_BASE) += spu_priv1.o
diff --git a/arch/powerpc/platforms/pasemi/idle.c b/arch/powerpc/platforms/pasemi/idle.c
index 03cd45d8fefa..3c962d5757be 100644
--- a/arch/powerpc/platforms/pasemi/idle.c
+++ b/arch/powerpc/platforms/pasemi/idle.c
@@ -26,6 +26,7 @@
#include <asm/machdep.h>
#include <asm/reg.h>
+#include <asm/smp.h>
#include "pasemi.h"
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 95fa6a7d15ee..f33b21b9f5d4 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -31,8 +31,6 @@
#define IOBMAP_PAGE_SIZE (1 << IOBMAP_PAGE_SHIFT)
#define IOBMAP_PAGE_MASK (IOBMAP_PAGE_SIZE - 1)
-#define IOBMAP_PAGE_FACTOR (PAGE_SHIFT - IOBMAP_PAGE_SHIFT)
-
#define IOB_BASE 0xe0000000
#define IOB_SIZE 0x3000
/* Configuration registers */
@@ -97,9 +95,6 @@ static void iobmap_build(struct iommu_table *tbl, long index,
bus_addr = (tbl->it_offset + index) << PAGE_SHIFT;
- npages <<= IOBMAP_PAGE_FACTOR;
- index <<= IOBMAP_PAGE_FACTOR;
-
ip = ((u32 *)tbl->it_base) + index;
while (npages--) {
@@ -125,9 +120,6 @@ static void iobmap_free(struct iommu_table *tbl, long index,
bus_addr = (tbl->it_offset + index) << PAGE_SHIFT;
- npages <<= IOBMAP_PAGE_FACTOR;
- index <<= IOBMAP_PAGE_FACTOR;
-
ip = ((u32 *)tbl->it_base) + index;
while (npages--) {
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 9da82c266ba9..ec9030dbb5f1 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -25,6 +25,7 @@
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
+#include <asm/smp.h>
#include "platform.h"
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index b854e7f1001c..f1df942072bb 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -752,6 +752,7 @@ skip_gserver_check:
void xics_request_IPIs(void)
{
unsigned int ipi;
+ int rc;
ipi = irq_create_mapping(xics_host, XICS_IPI);
BUG_ON(ipi == NO_IRQ);
@@ -762,11 +763,12 @@ void xics_request_IPIs(void)
*/
set_irq_handler(ipi, handle_percpu_irq);
if (firmware_has_feature(FW_FEATURE_LPAR))
- request_irq(ipi, xics_ipi_action_lpar, IRQF_DISABLED,
- "IPI", NULL);
+ rc = request_irq(ipi, xics_ipi_action_lpar, IRQF_DISABLED,
+ "IPI", NULL);
else
- request_irq(ipi, xics_ipi_action_direct, IRQF_DISABLED,
- "IPI", NULL);
+ rc = request_irq(ipi, xics_ipi_action_direct, IRQF_DISABLED,
+ "IPI", NULL);
+ BUG_ON(rc);
}
#endif /* CONFIG_SMP */
diff --git a/arch/ppc/syslib/ibm_ocp.c b/arch/ppc/syslib/ibm_ocp.c
index 3f6e55c79181..2ee176610e7c 100644
--- a/arch/ppc/syslib/ibm_ocp.c
+++ b/arch/ppc/syslib/ibm_ocp.c
@@ -1,4 +1,5 @@
#include <linux/module.h>
+#include <asm/ibm4xx.h>
#include <asm/ocp.h>
struct ocp_sys_info_data ocp_sys_info = {
diff --git a/arch/sh/boards/se/73180/setup.c b/arch/sh/boards/se/73180/setup.c
index 911ce1cdbd7f..e143017c8975 100644
--- a/arch/sh/boards/se/73180/setup.c
+++ b/arch/sh/boards/se/73180/setup.c
@@ -38,8 +38,8 @@ static struct platform_device *se73180_devices[] __initdata = {
static int __init se73180_devices_setup(void)
{
- return platform_add_devices(sh7343se_platform_devices,
- ARRAY_SIZE(sh7343se_platform_devices));
+ return platform_add_devices(se73180_devices,
+ ARRAY_SIZE(se73180_devices));
}
__initcall(se73180_devices_setup);
diff --git a/arch/sh/boards/superh/microdev/irq.c b/arch/sh/boards/superh/microdev/irq.c
index cc1cb04fa618..4d335077a3ff 100644
--- a/arch/sh/boards/superh/microdev/irq.c
+++ b/arch/sh/boards/superh/microdev/irq.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/microdev.h>
diff --git a/arch/sh/cchips/voyagergx/irq.c b/arch/sh/cchips/voyagergx/irq.c
index 70f12907647f..d70e5c8461b5 100644
--- a/arch/sh/cchips/voyagergx/irq.c
+++ b/arch/sh/cchips/voyagergx/irq.c
@@ -28,7 +28,7 @@ static void disable_voyagergx_irq(unsigned int irq)
unsigned long val;
unsigned long mask = 1 << (irq - VOYAGER_IRQ_BASE);
- pr_debug("disable_voyagergx_irq(%d): mask=%x\n", irq, mask);
+ pr_debug("disable_voyagergx_irq(%d): mask=%lx\n", irq, mask);
val = readl((void __iomem *)VOYAGER_INT_MASK);
val &= ~mask;
writel(val, (void __iomem *)VOYAGER_INT_MASK);
@@ -39,7 +39,7 @@ static void enable_voyagergx_irq(unsigned int irq)
unsigned long val;
unsigned long mask = 1 << (irq - VOYAGER_IRQ_BASE);
- pr_debug("disable_voyagergx_irq(%d): mask=%x\n", irq, mask);
+ pr_debug("disable_voyagergx_irq(%d): mask=%lx\n", irq, mask);
val = readl((void __iomem *)VOYAGER_INT_MASK);
val |= mask;
writel(val, (void __iomem *)VOYAGER_INT_MASK);
@@ -125,11 +125,12 @@ int voyagergx_irq_demux(int irq)
i = 17;
else
printk("Unexpected IRQ irq = %d status = 0x%08lx\n", irq, val);
- pr_debug("voyagergx_irq_demux %d \n", i);
- if (i < VOYAGER_IRQ_NUM) {
+ pr_debug("voyagergx_irq_demux %ld \n", i);
+ if (i < VOYAGER_IRQ_NUM) {
irq = VOYAGER_IRQ_BASE + i;
- if (voyagergx_demux[i].func != 0)
- irq = voyagergx_demux[i].func(irq, voyagergx_demux[i].dev);
+ if (voyagergx_demux[i].func != 0)
+ irq = voyagergx_demux[i].func(irq,
+ voyagergx_demux[i].dev);
}
}
return irq;
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 659cc081e5e7..b0b59d4a33ca 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -320,7 +320,9 @@ skip_restore:
.align 2
5: .long 0x00001000 ! DSP
+#ifdef CONFIG_KGDB_NMI
6: .long in_nmi
+#endif
7: .long 0x30000000
! common exception handler
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
index fcb2c41bc34e..a33429463e96 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
@@ -111,7 +111,7 @@ static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate)
return 0;
}
-static int shoc_clk_set_rate(struct clk *clk, unsigned long rate)
+static int shoc_clk_set_rate(struct clk *clk, unsigned long rate, int algo_id)
{
unsigned long frqcr3;
unsigned int tmp;
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index be03d74e99cb..0c7b7e33abdc 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -22,6 +22,7 @@
#include <asm/addrspace.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
+#include <asm/mmu.h>
/*
* Remap an arbitrary physical address space into the kernel virtual
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index bd00f89eed1e..89a1b469b93d 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -396,6 +396,15 @@ config SCHED_SMT
when dealing with UltraSPARC cpus at a cost of slightly increased
overhead in some places. If unsure say N here.
+config SCHED_MC
+ bool "Multi-core scheduler support"
+ depends on SMP
+ default y
+ help
+ Multi-core scheduler support improves the CPU scheduler's decision
+ making when dealing with multi-core CPU chips at a cost of slightly
+ increased overhead in some places. If unsure say N here.
+
source "kernel/Kconfig.preempt"
config CMDLINE_BOOL
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index d8d19093d12f..f964bf28d21a 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -1,4 +1,4 @@
-# $Id: Makefile,v 1.70 2002/02/09 19:49:30 davem Exp $
+#
# Makefile for the linux kernel.
#
@@ -8,7 +8,7 @@ EXTRA_CFLAGS := -Werror
extra-y := head.o init_task.o vmlinux.lds
obj-y := process.o setup.o cpu.o idprom.o \
- traps.o auxio.o una_asm.o \
+ traps.o auxio.o una_asm.o sysfs.o \
irq.o ptrace.o time.o sys_sparc.o signal.o \
unaligned.o central.o pci.o starfire.o semaphore.o \
power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index ed712e0b3372..7d1a11822a1e 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -2514,9 +2514,9 @@ sun4v_ncs_request:
nop
.size sun4v_ncs_request, .-sun4v_ncs_request
- .globl sun4v_scv_send
- .type sun4v_scv_send,#function
-sun4v_scv_send:
+ .globl sun4v_svc_send
+ .type sun4v_svc_send,#function
+sun4v_svc_send:
save %sp, -192, %sp
mov %i0, %o0
mov %i1, %o1
@@ -2526,11 +2526,11 @@ sun4v_scv_send:
stx %o1, [%i3]
ret
restore
- .size sun4v_scv_send, .-sun4v_scv_send
+ .size sun4v_svc_send, .-sun4v_svc_send
- .globl sun4v_scv_recv
- .type sun4v_scv_recv,#function
-sun4v_scv_recv:
+ .globl sun4v_svc_recv
+ .type sun4v_svc_recv,#function
+sun4v_svc_recv:
save %sp, -192, %sp
mov %i0, %o0
mov %i1, %o1
@@ -2540,33 +2540,55 @@ sun4v_scv_recv:
stx %o1, [%i3]
ret
restore
- .size sun4v_scv_recv, .-sun4v_scv_recv
+ .size sun4v_svc_recv, .-sun4v_svc_recv
- .globl sun4v_scv_getstatus
- .type sun4v_scv_getstatus,#function
-sun4v_scv_getstatus:
+ .globl sun4v_svc_getstatus
+ .type sun4v_svc_getstatus,#function
+sun4v_svc_getstatus:
mov HV_FAST_SVC_GETSTATUS, %o5
mov %o1, %o4
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
- .size sun4v_scv_getstatus, .-sun4v_scv_getstatus
+ .size sun4v_svc_getstatus, .-sun4v_svc_getstatus
- .globl sun4v_scv_setstatus
- .type sun4v_scv_setstatus,#function
-sun4v_scv_setstatus:
+ .globl sun4v_svc_setstatus
+ .type sun4v_svc_setstatus,#function
+sun4v_svc_setstatus:
mov HV_FAST_SVC_SETSTATUS, %o5
ta HV_FAST_TRAP
retl
nop
- .size sun4v_scv_setstatus, .-sun4v_scv_setstatus
+ .size sun4v_svc_setstatus, .-sun4v_svc_setstatus
- .globl sun4v_scv_clrstatus
- .type sun4v_scv_clrstatus,#function
-sun4v_scv_clrstatus:
+ .globl sun4v_svc_clrstatus
+ .type sun4v_svc_clrstatus,#function
+sun4v_svc_clrstatus:
mov HV_FAST_SVC_CLRSTATUS, %o5
ta HV_FAST_TRAP
retl
nop
- .size sun4v_scv_clrstatus, .-sun4v_scv_clrstatus
+ .size sun4v_svc_clrstatus, .-sun4v_svc_clrstatus
+
+ .globl sun4v_mmustat_conf
+ .type sun4v_mmustat_conf,#function
+sun4v_mmustat_conf:
+ mov %o1, %o4
+ mov HV_FAST_MMUSTAT_CONF, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o4]
+ retl
+ nop
+ .size sun4v_mmustat_conf, .-sun4v_mmustat_conf
+
+ .globl sun4v_mmustat_info
+ .type sun4v_mmustat_info,#function
+sun4v_mmustat_info:
+ mov %o0, %o4
+ mov HV_FAST_MMUSTAT_INFO, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o4]
+ retl
+ nop
+ .size sun4v_mmustat_info, .-sun4v_mmustat_info
diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c
index 9246c2cf9574..f0e16045fb16 100644
--- a/arch/sparc64/kernel/mdesc.c
+++ b/arch/sparc64/kernel/mdesc.c
@@ -473,6 +473,53 @@ static void __init set_core_ids(void)
}
}
+static void __init mark_proc_ids(struct mdesc_node *mp, int proc_id)
+{
+ int i;
+
+ for (i = 0; i < mp->num_arcs; i++) {
+ struct mdesc_node *t = mp->arcs[i].arc;
+ const u64 *id;
+
+ if (strcmp(mp->arcs[i].name, "back"))
+ continue;
+
+ if (strcmp(t->name, "cpu"))
+ continue;
+
+ id = md_get_property(t, "id", NULL);
+ if (*id < NR_CPUS)
+ cpu_data(*id).proc_id = proc_id;
+ }
+}
+
+static void __init __set_proc_ids(const char *exec_unit_name)
+{
+ struct mdesc_node *mp;
+ int idx;
+
+ idx = 0;
+ md_for_each_node_by_name(mp, exec_unit_name) {
+ const char *type;
+ int len;
+
+ type = md_get_property(mp, "type", &len);
+ if (!find_in_proplist(type, "int", len) &&
+ !find_in_proplist(type, "integer", len))
+ continue;
+
+ mark_proc_ids(mp, idx);
+
+ idx++;
+ }
+}
+
+static void __init set_proc_ids(void)
+{
+ __set_proc_ids("exec_unit");
+ __set_proc_ids("exec-unit");
+}
+
static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def)
{
u64 val;
@@ -574,9 +621,15 @@ static void __init mdesc_fill_in_cpu_data(void)
#endif
c->core_id = 0;
+ c->proc_id = -1;
}
+#ifdef CONFIG_SMP
+ sparc64_multi_core = 1;
+#endif
+
set_core_ids();
+ set_proc_ids();
smp_fill_in_sib_core_maps();
}
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index dad4b3ba705f..6f4a5284b0ea 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -1781,6 +1781,10 @@ static void __init of_fill_in_cpu_data(void)
}
cpu_data(cpuid).core_id = portid + 1;
+ cpu_data(cpuid).proc_id = portid;
+#ifdef CONFIG_SMP
+ sparc64_multi_core = 1;
+#endif
} else {
cpu_data(cpuid).dcache_size =
of_getintprop_default(dp, "dcache-size", 16 * 1024);
@@ -1799,6 +1803,7 @@ static void __init of_fill_in_cpu_data(void)
of_getintprop_default(dp, "ecache-line-size", 64);
cpu_data(cpuid).core_id = 0;
+ cpu_data(cpuid).proc_id = -1;
}
#ifdef CONFIG_SMP
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index de9b4c13f1c7..7490cc670a53 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -513,22 +513,3 @@ void sun_do_break(void)
int serial_console = -1;
int stop_a_enabled = 1;
-
-static int __init topology_init(void)
-{
- int i, err;
-
- err = -ENOMEM;
-
- for_each_possible_cpu(i) {
- struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (p) {
- register_cpu(p, i);
- err = 0;
- }
- }
-
- return err;
-}
-
-subsys_initcall(topology_init);
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index c550bba3490a..4dcd7d0b60f2 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -44,6 +44,8 @@
extern void calibrate_delay(void);
+int sparc64_multi_core __read_mostly;
+
/* Please don't make this stuff initdata!!! --DaveM */
unsigned char boot_cpu_id;
@@ -51,6 +53,8 @@ cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
+ { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
static cpumask_t smp_commenced_mask;
static cpumask_t cpu_callout_map;
@@ -1217,13 +1221,28 @@ void __devinit smp_fill_in_sib_core_maps(void)
unsigned int j;
if (cpu_data(i).core_id == 0) {
- cpu_set(i, cpu_sibling_map[i]);
+ cpu_set(i, cpu_core_map[i]);
continue;
}
for_each_possible_cpu(j) {
if (cpu_data(i).core_id ==
cpu_data(j).core_id)
+ cpu_set(j, cpu_core_map[i]);
+ }
+ }
+
+ for_each_possible_cpu(i) {
+ unsigned int j;
+
+ if (cpu_data(i).proc_id == -1) {
+ cpu_set(i, cpu_sibling_map[i]);
+ continue;
+ }
+
+ for_each_possible_cpu(j) {
+ if (cpu_data(i).proc_id ==
+ cpu_data(j).proc_id)
cpu_set(j, cpu_sibling_map[i]);
}
}
diff --git a/arch/sparc64/kernel/sysfs.c b/arch/sparc64/kernel/sysfs.c
new file mode 100644
index 000000000000..cdb1477af89f
--- /dev/null
+++ b/arch/sparc64/kernel/sysfs.c
@@ -0,0 +1,297 @@
+/* sysfs.c: Toplogy sysfs support code for sparc64.
+ *
+ * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
+ */
+#include <linux/sysdev.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+
+#include <asm/hypervisor.h>
+#include <asm/spitfire.h>
+
+static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64)));
+
+#define SHOW_MMUSTAT_ULONG(NAME) \
+static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
+{ \
+ struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \
+ return sprintf(buf, "%lu\n", p->NAME); \
+} \
+static SYSDEV_ATTR(NAME, 0444, show_##NAME, NULL)
+
+SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_64k_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_64k_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_4mb_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_4mb_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_256mb_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_256mb_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_8k_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_8k_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_64k_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_64k_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_4mb_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_4mb_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_256mb_tte);
+SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_256mb_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_8k_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_8k_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_64k_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_64k_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_4mb_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_4mb_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_256mb_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_256mb_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_8k_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_8k_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_64k_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_64k_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_4mb_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_4mb_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte);
+SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte);
+
+static struct attribute *mmu_stat_attrs[] = {
+ &attr_immu_tsb_hits_ctx0_8k_tte.attr,
+ &attr_immu_tsb_ticks_ctx0_8k_tte.attr,
+ &attr_immu_tsb_hits_ctx0_64k_tte.attr,
+ &attr_immu_tsb_ticks_ctx0_64k_tte.attr,
+ &attr_immu_tsb_hits_ctx0_4mb_tte.attr,
+ &attr_immu_tsb_ticks_ctx0_4mb_tte.attr,
+ &attr_immu_tsb_hits_ctx0_256mb_tte.attr,
+ &attr_immu_tsb_ticks_ctx0_256mb_tte.attr,
+ &attr_immu_tsb_hits_ctxnon0_8k_tte.attr,
+ &attr_immu_tsb_ticks_ctxnon0_8k_tte.attr,
+ &attr_immu_tsb_hits_ctxnon0_64k_tte.attr,
+ &attr_immu_tsb_ticks_ctxnon0_64k_tte.attr,
+ &attr_immu_tsb_hits_ctxnon0_4mb_tte.attr,
+ &attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr,
+ &attr_immu_tsb_hits_ctxnon0_256mb_tte.attr,
+ &attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr,
+ &attr_dmmu_tsb_hits_ctx0_8k_tte.attr,
+ &attr_dmmu_tsb_ticks_ctx0_8k_tte.attr,
+ &attr_dmmu_tsb_hits_ctx0_64k_tte.attr,
+ &attr_dmmu_tsb_ticks_ctx0_64k_tte.attr,
+ &attr_dmmu_tsb_hits_ctx0_4mb_tte.attr,
+ &attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr,
+ &attr_dmmu_tsb_hits_ctx0_256mb_tte.attr,
+ &attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr,
+ &attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr,
+ &attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr,
+ &attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr,
+ &attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr,
+ &attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr,
+ &attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr,
+ &attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr,
+ &attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr,
+ NULL,
+};
+
+static struct attribute_group mmu_stat_group = {
+ .attrs = mmu_stat_attrs,
+ .name = "mmu_stats",
+};
+
+/* XXX convert to rusty's on_one_cpu */
+static unsigned long run_on_cpu(unsigned long cpu,
+ unsigned long (*func)(unsigned long),
+ unsigned long arg)
+{
+ cpumask_t old_affinity = current->cpus_allowed;
+ unsigned long ret;
+
+ /* should return -EINVAL to userspace */
+ if (set_cpus_allowed(current, cpumask_of_cpu(cpu)))
+ return 0;
+
+ ret = func(arg);
+
+ set_cpus_allowed(current, old_affinity);
+
+ return ret;
+}
+
+static unsigned long read_mmustat_enable(unsigned long junk)
+{
+ unsigned long ra = 0;
+
+ sun4v_mmustat_info(&ra);
+
+ return ra != 0;
+}
+
+static unsigned long write_mmustat_enable(unsigned long val)
+{
+ unsigned long ra, orig_ra;
+
+ if (val)
+ ra = __pa(&per_cpu(mmu_stats, smp_processor_id()));
+ else
+ ra = 0UL;
+
+ return sun4v_mmustat_conf(ra, &orig_ra);
+}
+
+static ssize_t show_mmustat_enable(struct sys_device *s, char *buf)
+{
+ unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0);
+ return sprintf(buf, "%lx\n", val);
+}
+
+static ssize_t store_mmustat_enable(struct sys_device *s, const char *buf, size_t count)
+{
+ unsigned long val, err;
+ int ret = sscanf(buf, "%ld", &val);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ err = run_on_cpu(s->id, write_mmustat_enable, val);
+ if (err)
+ return -EIO;
+
+ return count;
+}
+
+static SYSDEV_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable);
+
+static int mmu_stats_supported;
+
+static int register_mmu_stats(struct sys_device *s)
+{
+ if (!mmu_stats_supported)
+ return 0;
+ sysdev_create_file(s, &attr_mmustat_enable);
+ return sysfs_create_group(&s->kobj, &mmu_stat_group);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void unregister_mmu_stats(struct sys_device *s)
+{
+ if (!mmu_stats_supported)
+ return;
+ sysfs_remove_group(&s->kobj, &mmu_stat_group);
+ sysdev_remove_file(s, &attr_mmustat_enable);
+}
+#endif
+
+#define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \
+static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
+{ \
+ cpuinfo_sparc *c = &cpu_data(dev->id); \
+ return sprintf(buf, "%lu\n", c->MEMBER); \
+}
+
+#define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \
+static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
+{ \
+ cpuinfo_sparc *c = &cpu_data(dev->id); \
+ return sprintf(buf, "%u\n", c->MEMBER); \
+}
+
+SHOW_CPUDATA_ULONG_NAME(clock_tick, clock_tick);
+SHOW_CPUDATA_ULONG_NAME(udelay_val, udelay_val);
+SHOW_CPUDATA_UINT_NAME(l1_dcache_size, dcache_size);
+SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size, dcache_line_size);
+SHOW_CPUDATA_UINT_NAME(l1_icache_size, icache_size);
+SHOW_CPUDATA_UINT_NAME(l1_icache_line_size, icache_line_size);
+SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size);
+SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size);
+
+static struct sysdev_attribute cpu_core_attrs[] = {
+ _SYSDEV_ATTR(clock_tick, 0444, show_clock_tick, NULL),
+ _SYSDEV_ATTR(udelay_val, 0444, show_udelay_val, NULL),
+ _SYSDEV_ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL),
+ _SYSDEV_ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL),
+ _SYSDEV_ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL),
+ _SYSDEV_ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL),
+ _SYSDEV_ATTR(l2_cache_size, 0444, show_l2_cache_size, NULL),
+ _SYSDEV_ATTR(l2_cache_line_size, 0444, show_l2_cache_line_size, NULL),
+};
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+static void register_cpu_online(unsigned int cpu)
+{
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+ struct sys_device *s = &c->sysdev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
+ sysdev_create_file(s, &cpu_core_attrs[i]);
+
+ register_mmu_stats(s);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void unregister_cpu_online(unsigned int cpu)
+{
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+ struct sys_device *s = &c->sysdev;
+ int i;
+
+ unregister_mmu_stats(s);
+ for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
+ sysdev_remove_file(s, &cpu_core_attrs[i]);
+}
+#endif
+
+static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned int)(long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ register_cpu_online(cpu);
+ break;
+#ifdef CONFIG_HOTPLUG_CPU
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ unregister_cpu_online(cpu);
+ break;
+#endif
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
+ .notifier_call = sysfs_cpu_notify,
+};
+
+static void __init check_mmu_stats(void)
+{
+ unsigned long dummy1, err;
+
+ if (tlb_type != hypervisor)
+ return;
+
+ err = sun4v_mmustat_info(&dummy1);
+ if (!err)
+ mmu_stats_supported = 1;
+}
+
+static int __init topology_init(void)
+{
+ int cpu;
+
+ check_mmu_stats();
+
+ register_cpu_notifier(&sysfs_cpu_nb);
+
+ for_each_possible_cpu(cpu) {
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+
+ register_cpu(c, cpu);
+ if (cpu_online(cpu))
+ register_cpu_online(cpu);
+ }
+
+ return 0;
+}
+
+subsys_initcall(topology_init);