summaryrefslogtreecommitdiff
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile9
-rw-r--r--arch/mips/kernel/apm.c604
-rw-r--r--arch/mips/kernel/asm-offsets.c10
-rw-r--r--arch/mips/kernel/cpu-probe.c5
-rw-r--r--arch/mips/kernel/early_printk.c40
-rw-r--r--arch/mips/kernel/entry.S25
-rw-r--r--arch/mips/kernel/gdb-stub.c6
-rw-r--r--arch/mips/kernel/genex.S47
-rw-r--r--arch/mips/kernel/head.S25
-rw-r--r--arch/mips/kernel/i8259.c41
-rw-r--r--arch/mips/kernel/irixelf.c331
-rw-r--r--arch/mips/kernel/irq-msc01.c4
-rw-r--r--arch/mips/kernel/irq-mv6434x.c14
-rw-r--r--arch/mips/kernel/irq-rm7000.c13
-rw-r--r--arch/mips/kernel/irq-rm9000.c24
-rw-r--r--arch/mips/kernel/irq_cpu.c21
-rw-r--r--arch/mips/kernel/kspd.c27
-rw-r--r--arch/mips/kernel/linux32.c240
-rw-r--r--arch/mips/kernel/machine_kexec.c4
-rw-r--r--arch/mips/kernel/mips-mt.c28
-rw-r--r--arch/mips/kernel/mips_ksyms.c1
-rw-r--r--arch/mips/kernel/proc.c8
-rw-r--r--arch/mips/kernel/process.c45
-rw-r--r--arch/mips/kernel/ptrace.c10
-rw-r--r--arch/mips/kernel/r4k_fpu.S3
-rw-r--r--arch/mips/kernel/rtlx.c282
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S3
-rw-r--r--arch/mips/kernel/scall64-n32.S15
-rw-r--r--arch/mips/kernel/scall64-o32.S12
-rw-r--r--arch/mips/kernel/setup.c61
-rw-r--r--arch/mips/kernel/signal-common.h182
-rw-r--r--arch/mips/kernel/signal.c354
-rw-r--r--arch/mips/kernel/signal32.c563
-rw-r--r--arch/mips/kernel/signal_n32.c61
-rw-r--r--arch/mips/kernel/smp-mt.c9
-rw-r--r--arch/mips/kernel/smp.c25
-rw-r--r--arch/mips/kernel/smtc.c134
-rw-r--r--arch/mips/kernel/sysirix.c4
-rw-r--r--arch/mips/kernel/time.c7
-rw-r--r--arch/mips/kernel/traps.c88
-rw-r--r--arch/mips/kernel/unaligned.c2
-rw-r--r--arch/mips/kernel/vmlinux.lds.S2
-rw-r--r--arch/mips/kernel/vpe.c67
44 files changed, 1368 insertions, 2090 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index bbbb8d7cb89b..49246264cc7c 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -14,8 +14,6 @@ binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_MODULES) += mips_ksyms.o module.o
-obj-$(CONFIG_APM) += apm.o
-
obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o
obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o
@@ -55,9 +53,9 @@ obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o
obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-64.o
obj-$(CONFIG_BINFMT_IRIX) += binfmt_irix.o
-obj-$(CONFIG_MIPS32_COMPAT) += linux32.o signal32.o
+obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o
obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o
-obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o ptrace32.o
+obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o
obj-$(CONFIG_KGDB) += gdb-low.o gdb-stub.o
obj-$(CONFIG_PROC_FS) += proc.o
@@ -67,7 +65,6 @@ obj-$(CONFIG_64BIT) += cpu-bugs64.o
obj-$(CONFIG_I8253) += i8253.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
-
-EXTRA_AFLAGS := $(CFLAGS)
diff --git a/arch/mips/kernel/apm.c b/arch/mips/kernel/apm.c
deleted file mode 100644
index ba16d07588cb..000000000000
--- a/arch/mips/kernel/apm.c
+++ /dev/null
@@ -1,604 +0,0 @@
-/*
- * bios-less APM driver for MIPS Linux
- * Jamey Hicks <jamey@crl.dec.com>
- * adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com)
- *
- * APM 1.2 Reference:
- * Intel Corporation, Microsoft Corporation. Advanced Power Management
- * (APM) BIOS Interface Specification, Revision 1.2, February 1996.
- *
- * [This document is available from Microsoft at:
- * http://www.microsoft.com/hwdev/busbios/amp_12.htm]
- */
-#include <linux/module.h>
-#include <linux/poll.h>
-#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include <linux/miscdevice.h>
-#include <linux/apm_bios.h>
-#include <linux/capability.h>
-#include <linux/sched.h>
-#include <linux/pm.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/init.h>
-#include <linux/completion.h>
-
-#include <asm/apm.h> /* apm_power_info */
-#include <asm/system.h>
-
-/*
- * The apm_bios device is one of the misc char devices.
- * This is its minor number.
- */
-#define APM_MINOR_DEV 134
-
-/*
- * See Documentation/Config.help for the configuration options.
- *
- * Various options can be changed at boot time as follows:
- * (We allow underscores for compatibility with the modules code)
- * apm=on/off enable/disable APM
- */
-
-/*
- * Maximum number of events stored
- */
-#define APM_MAX_EVENTS 16
-
-struct apm_queue {
- unsigned int event_head;
- unsigned int event_tail;
- apm_event_t events[APM_MAX_EVENTS];
-};
-
-/*
- * The per-file APM data
- */
-struct apm_user {
- struct list_head list;
-
- unsigned int suser: 1;
- unsigned int writer: 1;
- unsigned int reader: 1;
-
- int suspend_result;
- unsigned int suspend_state;
-#define SUSPEND_NONE 0 /* no suspend pending */
-#define SUSPEND_PENDING 1 /* suspend pending read */
-#define SUSPEND_READ 2 /* suspend read, pending ack */
-#define SUSPEND_ACKED 3 /* suspend acked */
-#define SUSPEND_DONE 4 /* suspend completed */
-
- struct apm_queue queue;
-};
-
-/*
- * Local variables
- */
-static int suspends_pending;
-static int apm_disabled;
-static int mips_apm_active;
-
-static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
-static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
-
-/*
- * This is a list of everyone who has opened /dev/apm_bios
- */
-static DECLARE_RWSEM(user_list_lock);
-static LIST_HEAD(apm_user_list);
-
-/*
- * kapmd info. kapmd provides us a process context to handle
- * "APM" events within - specifically necessary if we're going
- * to be suspending the system.
- */
-static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait);
-static DECLARE_COMPLETION(kapmd_exit);
-static DEFINE_SPINLOCK(kapmd_queue_lock);
-static struct apm_queue kapmd_queue;
-
-
-static const char driver_version[] = "1.13"; /* no spaces */
-
-
-
-/*
- * Compatibility cruft until the IPAQ people move over to the new
- * interface.
- */
-static void __apm_get_power_status(struct apm_power_info *info)
-{
-}
-
-/*
- * This allows machines to provide their own "apm get power status" function.
- */
-void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status;
-EXPORT_SYMBOL(apm_get_power_status);
-
-
-/*
- * APM event queue management.
- */
-static inline int queue_empty(struct apm_queue *q)
-{
- return q->event_head == q->event_tail;
-}
-
-static inline apm_event_t queue_get_event(struct apm_queue *q)
-{
- q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
- return q->events[q->event_tail];
-}
-
-static void queue_add_event(struct apm_queue *q, apm_event_t event)
-{
- q->event_head = (q->event_head + 1) % APM_MAX_EVENTS;
- if (q->event_head == q->event_tail) {
- static int notified;
-
- if (notified++ == 0)
- printk(KERN_ERR "apm: an event queue overflowed\n");
- q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
- }
- q->events[q->event_head] = event;
-}
-
-static void queue_event_one_user(struct apm_user *as, apm_event_t event)
-{
- if (as->suser && as->writer) {
- switch (event) {
- case APM_SYS_SUSPEND:
- case APM_USER_SUSPEND:
- /*
- * If this user already has a suspend pending,
- * don't queue another one.
- */
- if (as->suspend_state != SUSPEND_NONE)
- return;
-
- as->suspend_state = SUSPEND_PENDING;
- suspends_pending++;
- break;
- }
- }
- queue_add_event(&as->queue, event);
-}
-
-static void queue_event(apm_event_t event, struct apm_user *sender)
-{
- struct apm_user *as;
-
- down_read(&user_list_lock);
- list_for_each_entry(as, &apm_user_list, list) {
- if (as != sender && as->reader)
- queue_event_one_user(as, event);
- }
- up_read(&user_list_lock);
- wake_up_interruptible(&apm_waitqueue);
-}
-
-static void apm_suspend(void)
-{
- struct apm_user *as;
- int err = pm_suspend(PM_SUSPEND_MEM);
-
- /*
- * Anyone on the APM queues will think we're still suspended.
- * Send a message so everyone knows we're now awake again.
- */
- queue_event(APM_NORMAL_RESUME, NULL);
-
- /*
- * Finally, wake up anyone who is sleeping on the suspend.
- */
- down_read(&user_list_lock);
- list_for_each_entry(as, &apm_user_list, list) {
- as->suspend_result = err;
- as->suspend_state = SUSPEND_DONE;
- }
- up_read(&user_list_lock);
-
- wake_up(&apm_suspend_waitqueue);
-}
-
-static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
-{
- struct apm_user *as = fp->private_data;
- apm_event_t event;
- int i = count, ret = 0;
-
- if (count < sizeof(apm_event_t))
- return -EINVAL;
-
- if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK)
- return -EAGAIN;
-
- wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue));
-
- while ((i >= sizeof(event)) && !queue_empty(&as->queue)) {
- event = queue_get_event(&as->queue);
-
- ret = -EFAULT;
- if (copy_to_user(buf, &event, sizeof(event)))
- break;
-
- if (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND)
- as->suspend_state = SUSPEND_READ;
-
- buf += sizeof(event);
- i -= sizeof(event);
- }
-
- if (i < count)
- ret = count - i;
-
- return ret;
-}
-
-static unsigned int apm_poll(struct file *fp, poll_table * wait)
-{
- struct apm_user *as = fp->private_data;
-
- poll_wait(fp, &apm_waitqueue, wait);
- return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM;
-}
-
-/*
- * apm_ioctl - handle APM ioctl
- *
- * APM_IOC_SUSPEND
- * This IOCTL is overloaded, and performs two functions. It is used to:
- * - initiate a suspend
- * - acknowledge a suspend read from /dev/apm_bios.
- * Only when everyone who has opened /dev/apm_bios with write permission
- * has acknowledge does the actual suspend happen.
- */
-static int
-apm_ioctl(struct inode * inode, struct file *filp, unsigned int cmd, unsigned long arg)
-{
- struct apm_user *as = filp->private_data;
- unsigned long flags;
- int err = -EINVAL;
-
- if (!as->suser || !as->writer)
- return -EPERM;
-
- switch (cmd) {
- case APM_IOC_SUSPEND:
- as->suspend_result = -EINTR;
-
- if (as->suspend_state == SUSPEND_READ) {
- /*
- * If we read a suspend command from /dev/apm_bios,
- * then the corresponding APM_IOC_SUSPEND ioctl is
- * interpreted as an acknowledge.
- */
- as->suspend_state = SUSPEND_ACKED;
- suspends_pending--;
- } else {
- /*
- * Otherwise it is a request to suspend the system.
- * Queue an event for all readers, and expect an
- * acknowledge from all writers who haven't already
- * acknowledged.
- */
- queue_event(APM_USER_SUSPEND, as);
- }
-
- /*
- * If there are no further acknowledges required, suspend
- * the system.
- */
- if (suspends_pending == 0)
- apm_suspend();
-
- /*
- * Wait for the suspend/resume to complete. If there are
- * pending acknowledges, we wait here for them.
- *
- * Note that we need to ensure that the PM subsystem does
- * not kick us out of the wait when it suspends the threads.
- */
- flags = current->flags;
- current->flags |= PF_NOFREEZE;
-
- /*
- * Note: do not allow a thread which is acking the suspend
- * to escape until the resume is complete.
- */
- if (as->suspend_state == SUSPEND_ACKED)
- wait_event(apm_suspend_waitqueue,
- as->suspend_state == SUSPEND_DONE);
- else
- wait_event_interruptible(apm_suspend_waitqueue,
- as->suspend_state == SUSPEND_DONE);
-
- current->flags = flags;
- err = as->suspend_result;
- as->suspend_state = SUSPEND_NONE;
- break;
- }
-
- return err;
-}
-
-static int apm_release(struct inode * inode, struct file * filp)
-{
- struct apm_user *as = filp->private_data;
- filp->private_data = NULL;
-
- down_write(&user_list_lock);
- list_del(&as->list);
- up_write(&user_list_lock);
-
- /*
- * We are now unhooked from the chain. As far as new
- * events are concerned, we no longer exist. However, we
- * need to balance suspends_pending, which means the
- * possibility of sleeping.
- */
- if (as->suspend_state != SUSPEND_NONE) {
- suspends_pending -= 1;
- if (suspends_pending == 0)
- apm_suspend();
- }
-
- kfree(as);
- return 0;
-}
-
-static int apm_open(struct inode * inode, struct file * filp)
-{
- struct apm_user *as;
-
- as = kzalloc(sizeof(*as), GFP_KERNEL);
- if (as) {
- /*
- * XXX - this is a tiny bit broken, when we consider BSD
- * process accounting. If the device is opened by root, we
- * instantly flag that we used superuser privs. Who knows,
- * we might close the device immediately without doing a
- * privileged operation -- cevans
- */
- as->suser = capable(CAP_SYS_ADMIN);
- as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE;
- as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ;
-
- down_write(&user_list_lock);
- list_add(&as->list, &apm_user_list);
- up_write(&user_list_lock);
-
- filp->private_data = as;
- }
-
- return as ? 0 : -ENOMEM;
-}
-
-static struct file_operations apm_bios_fops = {
- .owner = THIS_MODULE,
- .read = apm_read,
- .poll = apm_poll,
- .ioctl = apm_ioctl,
- .open = apm_open,
- .release = apm_release,
-};
-
-static struct miscdevice apm_device = {
- .minor = APM_MINOR_DEV,
- .name = "apm_bios",
- .fops = &apm_bios_fops
-};
-
-
-#ifdef CONFIG_PROC_FS
-/*
- * Arguments, with symbols from linux/apm_bios.h.
- *
- * 0) Linux driver version (this will change if format changes)
- * 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2.
- * 2) APM flags from APM Installation Check (0x00):
- * bit 0: APM_16_BIT_SUPPORT
- * bit 1: APM_32_BIT_SUPPORT
- * bit 2: APM_IDLE_SLOWS_CLOCK
- * bit 3: APM_BIOS_DISABLED
- * bit 4: APM_BIOS_DISENGAGED
- * 3) AC line status
- * 0x00: Off-line
- * 0x01: On-line
- * 0x02: On backup power (BIOS >= 1.1 only)
- * 0xff: Unknown
- * 4) Battery status
- * 0x00: High
- * 0x01: Low
- * 0x02: Critical
- * 0x03: Charging
- * 0x04: Selected battery not present (BIOS >= 1.2 only)
- * 0xff: Unknown
- * 5) Battery flag
- * bit 0: High
- * bit 1: Low
- * bit 2: Critical
- * bit 3: Charging
- * bit 7: No system battery
- * 0xff: Unknown
- * 6) Remaining battery life (percentage of charge):
- * 0-100: valid
- * -1: Unknown
- * 7) Remaining battery life (time units):
- * Number of remaining minutes or seconds
- * -1: Unknown
- * 8) min = minutes; sec = seconds
- */
-static int apm_get_info(char *buf, char **start, off_t fpos, int length)
-{
- struct apm_power_info info;
- char *units;
- int ret;
-
- info.ac_line_status = 0xff;
- info.battery_status = 0xff;
- info.battery_flag = 0xff;
- info.battery_life = -1;
- info.time = -1;
- info.units = -1;
-
- if (apm_get_power_status)
- apm_get_power_status(&info);
-
- switch (info.units) {
- default: units = "?"; break;
- case 0: units = "min"; break;
- case 1: units = "sec"; break;
- }
-
- ret = sprintf(buf, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
- driver_version, APM_32_BIT_SUPPORT,
- info.ac_line_status, info.battery_status,
- info.battery_flag, info.battery_life,
- info.time, units);
-
- return ret;
-}
-#endif
-
-static int kapmd(void *arg)
-{
- daemonize("kapmd");
- current->flags |= PF_NOFREEZE;
-
- do {
- apm_event_t event;
-
- wait_event_interruptible(kapmd_wait,
- !queue_empty(&kapmd_queue) || !mips_apm_active);
-
- if (!mips_apm_active)
- break;
-
- spin_lock_irq(&kapmd_queue_lock);
- event = 0;
- if (!queue_empty(&kapmd_queue))
- event = queue_get_event(&kapmd_queue);
- spin_unlock_irq(&kapmd_queue_lock);
-
- switch (event) {
- case 0:
- break;
-
- case APM_LOW_BATTERY:
- case APM_POWER_STATUS_CHANGE:
- queue_event(event, NULL);
- break;
-
- case APM_USER_SUSPEND:
- case APM_SYS_SUSPEND:
- queue_event(event, NULL);
- if (suspends_pending == 0)
- apm_suspend();
- break;
-
- case APM_CRITICAL_SUSPEND:
- apm_suspend();
- break;
- }
- } while (1);
-
- complete_and_exit(&kapmd_exit, 0);
-}
-
-static int __init apm_init(void)
-{
- int ret;
-
- if (apm_disabled) {
- printk(KERN_NOTICE "apm: disabled on user request.\n");
- return -ENODEV;
- }
-
- mips_apm_active = 1;
-
- ret = kernel_thread(kapmd, NULL, CLONE_KERNEL);
- if (ret < 0) {
- mips_apm_active = 0;
- return ret;
- }
-
-#ifdef CONFIG_PROC_FS
- create_proc_info_entry("apm", 0, NULL, apm_get_info);
-#endif
-
- ret = misc_register(&apm_device);
- if (ret != 0) {
- remove_proc_entry("apm", NULL);
-
- mips_apm_active = 0;
- wake_up(&kapmd_wait);
- wait_for_completion(&kapmd_exit);
- }
-
- return ret;
-}
-
-static void __exit apm_exit(void)
-{
- misc_deregister(&apm_device);
- remove_proc_entry("apm", NULL);
-
- mips_apm_active = 0;
- wake_up(&kapmd_wait);
- wait_for_completion(&kapmd_exit);
-}
-
-module_init(apm_init);
-module_exit(apm_exit);
-
-MODULE_AUTHOR("Stephen Rothwell");
-MODULE_DESCRIPTION("Advanced Power Management");
-MODULE_LICENSE("GPL");
-
-#ifndef MODULE
-static int __init apm_setup(char *str)
-{
- while ((str != NULL) && (*str != '\0')) {
- if (strncmp(str, "off", 3) == 0)
- apm_disabled = 1;
- if (strncmp(str, "on", 2) == 0)
- apm_disabled = 0;
- str = strchr(str, ',');
- if (str != NULL)
- str += strspn(str, ", \t");
- }
- return 1;
-}
-
-__setup("apm=", apm_setup);
-#endif
-
-/**
- * apm_queue_event - queue an APM event for kapmd
- * @event: APM event
- *
- * Queue an APM event for kapmd to process and ultimately take the
- * appropriate action. Only a subset of events are handled:
- * %APM_LOW_BATTERY
- * %APM_POWER_STATUS_CHANGE
- * %APM_USER_SUSPEND
- * %APM_SYS_SUSPEND
- * %APM_CRITICAL_SUSPEND
- */
-void apm_queue_event(apm_event_t event)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&kapmd_queue_lock, flags);
- queue_add_event(&kapmd_queue, event);
- spin_unlock_irqrestore(&kapmd_queue_lock, flags);
-
- wake_up_interruptible(&kapmd_wait);
-}
-EXPORT_SYMBOL(apm_queue_event);
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index ff88b06f89df..761a779d5c4f 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -64,6 +64,9 @@ void output_ptreg_defines(void)
offset("#define PT_R31 ", struct pt_regs, regs[31]);
offset("#define PT_LO ", struct pt_regs, lo);
offset("#define PT_HI ", struct pt_regs, hi);
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ offset("#define PT_ACX ", struct pt_regs, acx);
+#endif
offset("#define PT_EPC ", struct pt_regs, cp0_epc);
offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr);
offset("#define PT_STATUS ", struct pt_regs, cp0_status);
@@ -99,7 +102,6 @@ void output_thread_info_defines(void)
offset("#define TI_ADDR_LIMIT ", struct thread_info, addr_limit);
offset("#define TI_RESTART_BLOCK ", struct thread_info, restart_block);
offset("#define TI_REGS ", struct thread_info, regs);
- constant("#define _THREAD_SIZE_ORDER ", THREAD_SIZE_ORDER);
constant("#define _THREAD_SIZE ", THREAD_SIZE);
constant("#define _THREAD_MASK ", THREAD_MASK);
linefeed;
@@ -234,10 +236,6 @@ void output_mm_defines(void)
constant("#define _PMD_SHIFT ", PMD_SHIFT);
constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT);
linefeed;
- constant("#define _PGD_ORDER ", PGD_ORDER);
- constant("#define _PMD_ORDER ", PMD_ORDER);
- constant("#define _PTE_ORDER ", PTE_ORDER);
- linefeed;
constant("#define _PTRS_PER_PGD ", PTRS_PER_PGD);
constant("#define _PTRS_PER_PMD ", PTRS_PER_PMD);
constant("#define _PTRS_PER_PTE ", PTRS_PER_PTE);
@@ -250,10 +248,10 @@ void output_sc_defines(void)
text("/* Linux sigcontext offsets. */");
offset("#define SC_REGS ", struct sigcontext, sc_regs);
offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs);
+ offset("#define SC_ACX ", struct sigcontext, sc_acx);
offset("#define SC_MDHI ", struct sigcontext, sc_mdhi);
offset("#define SC_MDLO ", struct sigcontext, sc_mdlo);
offset("#define SC_PC ", struct sigcontext, sc_pc);
- offset("#define SC_STATUS ", struct sigcontext, sc_status);
offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr);
offset("#define SC_FPC_EIR ", struct sigcontext, sc_fpc_eir);
offset("#define SC_HI1 ", struct sigcontext, sc_hi1);
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 442839e9578c..ab755ea26c6a 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -16,6 +16,7 @@
#include <linux/ptrace.h>
#include <linux/stddef.h>
+#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
@@ -97,7 +98,7 @@ static void au1k_wait(void)
static int __initdata nowait = 0;
-int __init wait_disable(char *s)
+static int __init wait_disable(char *s)
{
nowait = 1;
@@ -565,7 +566,7 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
if (config3 & MIPS_CONF3_VEIC)
c->options |= MIPS_CPU_VEIC;
if (config3 & MIPS_CONF3_MT)
- c->ases |= MIPS_ASE_MIPSMT;
+ c->ases |= MIPS_ASE_MIPSMT;
return config3 & MIPS_CONF_M;
}
diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c
new file mode 100644
index 000000000000..304efdc5682f
--- /dev/null
+++ b/arch/mips/kernel/early_printk.c
@@ -0,0 +1,40 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2003, 06, 07 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * written by Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/console.h>
+#include <linux/init.h>
+
+extern void prom_putchar(char);
+
+static void early_console_write(struct console *con, const char *s, unsigned n)
+{
+ while (n-- && *s) {
+ if (*s == '\n')
+ prom_putchar('\r');
+ prom_putchar(*s);
+ s++;
+ }
+}
+
+static struct console early_console = {
+ .name = "early",
+ .write = early_console_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1
+};
+
+void __init setup_early_printk(void)
+{
+ register_console(&early_console);
+}
+
+void __init disable_early_printk(void)
+{
+ unregister_console(&early_console);
+}
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index f10b6a19f8bf..686249c5c328 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -21,24 +21,21 @@
#endif
#ifndef CONFIG_PREEMPT
- .macro preempt_stop
- local_irq_disable
- .endm
#define resume_kernel restore_all
+#else
+#define __ret_from_irq ret_from_exception
#endif
.text
.align 5
-FEXPORT(ret_from_irq)
- LONG_S s0, TI_REGS($28)
-#ifdef CONFIG_PREEMPT
-FEXPORT(ret_from_exception)
-#else
- b _ret_from_irq
+#ifndef CONFIG_PREEMPT
FEXPORT(ret_from_exception)
- preempt_stop
+ local_irq_disable # preempt stop
+ b __ret_from_irq
#endif
-FEXPORT(_ret_from_irq)
+FEXPORT(ret_from_irq)
+ LONG_S s0, TI_REGS($28)
+FEXPORT(__ret_from_irq)
LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
andi t0, t0, KU_USER
beqz t0, resume_kernel
@@ -124,7 +121,11 @@ FEXPORT(restore_partial) # restore partial frame
SAVE_AT
SAVE_TEMP
LONG_L v0, PT_STATUS(sp)
- and v0, 1
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ and v0, ST0_IEP
+#else
+ and v0, ST0_IE
+#endif
beqz v0, 1f
jal trace_hardirqs_on
b 2f
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c
index 719d26968cb2..7bc882049269 100644
--- a/arch/mips/kernel/gdb-stub.c
+++ b/arch/mips/kernel/gdb-stub.c
@@ -505,13 +505,13 @@ void show_gdbregs(struct gdb_regs * regs)
*/
printk("$0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
regs->reg0, regs->reg1, regs->reg2, regs->reg3,
- regs->reg4, regs->reg5, regs->reg6, regs->reg7);
+ regs->reg4, regs->reg5, regs->reg6, regs->reg7);
printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
regs->reg8, regs->reg9, regs->reg10, regs->reg11,
- regs->reg12, regs->reg13, regs->reg14, regs->reg15);
+ regs->reg12, regs->reg13, regs->reg14, regs->reg15);
printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
regs->reg16, regs->reg17, regs->reg18, regs->reg19,
- regs->reg20, regs->reg21, regs->reg22, regs->reg23);
+ regs->reg20, regs->reg21, regs->reg22, regs->reg23);
printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
regs->reg24, regs->reg25, regs->reg26, regs->reg27,
regs->reg28, regs->reg29, regs->reg30, regs->reg31);
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index aacd4a005c5f..297bd56c2347 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -128,6 +128,37 @@ handle_vcei:
.align 5
NESTED(handle_int, PT_SIZE, sp)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /*
+ * Check to see if the interrupted code has just disabled
+ * interrupts and ignore this interrupt for now if so.
+ *
+ * local_irq_disable() disables interrupts and then calls
+ * trace_hardirqs_off() to track the state. If an interrupt is taken
+ * after interrupts are disabled but before the state is updated
+ * it will appear to restore_all that it is incorrectly returning with
+ * interrupts disabled
+ */
+ .set push
+ .set noat
+ mfc0 k0, CP0_STATUS
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ and k0, ST0_IEP
+ bnez k0, 1f
+
+ mfc0 k0, EP0_EPC
+ .set noreorder
+ j k0
+ rfe
+#else
+ and k0, ST0_IE
+ bnez k0, 1f
+
+ eret
+#endif
+1:
+ .set pop
+#endif
SAVE_ALL
CLI
TRACE_IRQS_OFF
@@ -181,13 +212,13 @@ NESTED(except_vec_vi, 0, sp)
* during service by SMTC kernel, we also want to
* pass the IM value to be cleared.
*/
-EXPORT(except_vec_vi_mori)
+FEXPORT(except_vec_vi_mori)
ori a0, $0, 0
#endif /* CONFIG_MIPS_MT_SMTC */
-EXPORT(except_vec_vi_lui)
+FEXPORT(except_vec_vi_lui)
lui v0, 0 /* Patched */
j except_vec_vi_handler
-EXPORT(except_vec_vi_ori)
+FEXPORT(except_vec_vi_ori)
ori v0, 0 /* Patched */
.set pop
END(except_vec_vi)
@@ -220,7 +251,17 @@ NESTED(except_vec_vi_handler, 0, sp)
_ehb
#endif /* CONFIG_MIPS_MT_SMTC */
CLI
+#ifdef CONFIG_TRACE_IRQFLAGS
+ move s0, v0
+#ifdef CONFIG_MIPS_MT_SMTC
+ move s1, a0
+#endif
TRACE_IRQS_OFF
+#ifdef CONFIG_MIPS_MT_SMTC
+ move a0, s1
+#endif
+ move v0, s0
+#endif
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 9a7811d13db2..6f57ca44291f 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -231,28 +231,3 @@ NESTED(smp_bootstrap, 16, sp)
#endif /* CONFIG_SMP */
__FINIT
-
- .comm kernelsp, NR_CPUS * 8, 8
- .comm pgd_current, NR_CPUS * 8, 8
-
- .comm fw_arg0, SZREG, SZREG # firmware arguments
- .comm fw_arg1, SZREG, SZREG
- .comm fw_arg2, SZREG, SZREG
- .comm fw_arg3, SZREG, SZREG
-
- .macro page name, order
- .comm \name, (_PAGE_SIZE << \order), (_PAGE_SIZE << \order)
- .endm
-
- /*
- * On 64-bit we've got three-level pagetables with a slightly
- * different layout ...
- */
- page swapper_pg_dir, _PGD_ORDER
-#ifdef CONFIG_64BIT
-#if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64)
- page module_pg_dir, _PGD_ORDER
-#endif
- page invalid_pmd_table, _PMD_ORDER
-#endif
- page invalid_pte_table, _PTE_ORDER
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index b59a676c6d0e..2345160e63fc 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -28,7 +28,7 @@
* moves to arch independent land
*/
-static int i8259A_auto_eoi;
+static int i8259A_auto_eoi = -1;
DEFINE_SPINLOCK(i8259A_lock);
/* some platforms call this... */
void mask_and_ack_8259A(unsigned int);
@@ -54,9 +54,11 @@ static unsigned int cached_irq_mask = 0xffff;
void disable_8259A_irq(unsigned int irq)
{
- unsigned int mask = 1 << irq;
+ unsigned int mask;
unsigned long flags;
+ irq -= I8259A_IRQ_BASE;
+ mask = 1 << irq;
spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask |= mask;
if (irq & 8)
@@ -68,9 +70,11 @@ void disable_8259A_irq(unsigned int irq)
void enable_8259A_irq(unsigned int irq)
{
- unsigned int mask = ~(1 << irq);
+ unsigned int mask;
unsigned long flags;
+ irq -= I8259A_IRQ_BASE;
+ mask = ~(1 << irq);
spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask &= mask;
if (irq & 8)
@@ -82,10 +86,12 @@ void enable_8259A_irq(unsigned int irq)
int i8259A_irq_pending(unsigned int irq)
{
- unsigned int mask = 1 << irq;
+ unsigned int mask;
unsigned long flags;
int ret;
+ irq -= I8259A_IRQ_BASE;
+ mask = 1 << irq;
spin_lock_irqsave(&i8259A_lock, flags);
if (irq < 8)
ret = inb(PIC_MASTER_CMD) & mask;
@@ -134,9 +140,11 @@ static inline int i8259A_irq_real(unsigned int irq)
*/
void mask_and_ack_8259A(unsigned int irq)
{
- unsigned int irqmask = 1 << irq;
+ unsigned int irqmask;
unsigned long flags;
+ irq -= I8259A_IRQ_BASE;
+ irqmask = 1 << irq;
spin_lock_irqsave(&i8259A_lock, flags);
/*
* Lightweight spurious IRQ detection. We do not want
@@ -169,8 +177,8 @@ handle_real_irq:
outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */
}
#ifdef CONFIG_MIPS_MT_SMTC
- if (irq_hwmask[irq] & ST0_IM)
- set_c0_status(irq_hwmask[irq] & ST0_IM);
+ if (irq_hwmask[irq] & ST0_IM)
+ set_c0_status(irq_hwmask[irq] & ST0_IM);
#endif /* CONFIG_MIPS_MT_SMTC */
spin_unlock_irqrestore(&i8259A_lock, flags);
return;
@@ -208,7 +216,8 @@ spurious_8259A_irq:
static int i8259A_resume(struct sys_device *dev)
{
- init_8259A(i8259A_auto_eoi);
+ if (i8259A_auto_eoi >= 0)
+ init_8259A(i8259A_auto_eoi);
return 0;
}
@@ -218,8 +227,10 @@ static int i8259A_shutdown(struct sys_device *dev)
* the kernel initialization code can get it
* out of.
*/
- outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
- outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
+ if (i8259A_auto_eoi >= 0) {
+ outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
+ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
+ }
return 0;
}
@@ -244,7 +255,7 @@ static int __init i8259A_init_sysfs(void)
device_initcall(i8259A_init_sysfs);
-void __init init_8259A(int auto_eoi)
+void init_8259A(int auto_eoi)
{
unsigned long flags;
@@ -317,13 +328,13 @@ void __init init_i8259_irqs (void)
{
int i;
- request_resource(&ioport_resource, &pic1_io_resource);
- request_resource(&ioport_resource, &pic2_io_resource);
+ insert_resource(&ioport_resource, &pic1_io_resource);
+ insert_resource(&ioport_resource, &pic2_io_resource);
init_8259A(0);
- for (i = 0; i < 16; i++)
+ for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++)
set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq);
- setup_irq(PIC_CASCADE_IR, &irq2);
+ setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
}
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index 37cad5de515c..3cc25c05d367 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -10,6 +10,8 @@
* Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com>
* Copyright (C) 2004 - 2005 Steven J. Hill <sjhill@realitydiluted.com>
*/
+#undef DEBUG
+
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/stat.h>
@@ -40,8 +42,6 @@
#include <linux/elf.h>
-#undef DEBUG
-
static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs);
static int load_irix_library(struct file *);
static int irix_core_dump(long signr, struct pt_regs * regs,
@@ -52,72 +52,102 @@ static struct linux_binfmt irix_format = {
irix_core_dump, PAGE_SIZE
};
-#ifdef DEBUG
/* Debugging routines. */
static char *get_elf_p_type(Elf32_Word p_type)
{
- int i = (int) p_type;
-
- switch(i) {
- case PT_NULL: return("PT_NULL"); break;
- case PT_LOAD: return("PT_LOAD"); break;
- case PT_DYNAMIC: return("PT_DYNAMIC"); break;
- case PT_INTERP: return("PT_INTERP"); break;
- case PT_NOTE: return("PT_NOTE"); break;
- case PT_SHLIB: return("PT_SHLIB"); break;
- case PT_PHDR: return("PT_PHDR"); break;
- case PT_LOPROC: return("PT_LOPROC/REGINFO"); break;
- case PT_HIPROC: return("PT_HIPROC"); break;
- default: return("PT_BOGUS"); break;
+#ifdef DEBUG
+ switch (p_type) {
+ case PT_NULL:
+ return "PT_NULL";
+ break;
+
+ case PT_LOAD:
+ return "PT_LOAD";
+ break;
+
+ case PT_DYNAMIC:
+ return "PT_DYNAMIC";
+ break;
+
+ case PT_INTERP:
+ return "PT_INTERP";
+ break;
+
+ case PT_NOTE:
+ return "PT_NOTE";
+ break;
+
+ case PT_SHLIB:
+ return "PT_SHLIB";
+ break;
+
+ case PT_PHDR:
+ return "PT_PHDR";
+ break;
+
+ case PT_LOPROC:
+ return "PT_LOPROC/REGINFO";
+ break;
+
+ case PT_HIPROC:
+ return "PT_HIPROC";
+ break;
+
+ default:
+ return "PT_BOGUS";
+ break;
}
+#endif
}
static void print_elfhdr(struct elfhdr *ehp)
{
int i;
- printk("ELFHDR: e_ident<");
- for(i = 0; i < (EI_NIDENT - 1); i++) printk("%x ", ehp->e_ident[i]);
- printk("%x>\n", ehp->e_ident[i]);
- printk(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n",
- (unsigned short) ehp->e_type, (unsigned short) ehp->e_machine,
- (unsigned long) ehp->e_version);
- printk(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] "
- "e_flags[%08lx]\n",
- (unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff,
- (unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags);
- printk(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n",
- (unsigned short) ehp->e_ehsize, (unsigned short) ehp->e_phentsize,
- (unsigned short) ehp->e_phnum);
- printk(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n",
- (unsigned short) ehp->e_shentsize, (unsigned short) ehp->e_shnum,
- (unsigned short) ehp->e_shstrndx);
+ pr_debug("ELFHDR: e_ident<");
+ for (i = 0; i < (EI_NIDENT - 1); i++)
+ pr_debug("%x ", ehp->e_ident[i]);
+ pr_debug("%x>\n", ehp->e_ident[i]);
+ pr_debug(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n",
+ (unsigned short) ehp->e_type, (unsigned short) ehp->e_machine,
+ (unsigned long) ehp->e_version);
+ pr_debug(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] "
+ "e_flags[%08lx]\n",
+ (unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff,
+ (unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags);
+ pr_debug(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n",
+ (unsigned short) ehp->e_ehsize,
+ (unsigned short) ehp->e_phentsize,
+ (unsigned short) ehp->e_phnum);
+ pr_debug(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n",
+ (unsigned short) ehp->e_shentsize,
+ (unsigned short) ehp->e_shnum,
+ (unsigned short) ehp->e_shstrndx);
}
static void print_phdr(int i, struct elf_phdr *ep)
{
- printk("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] "
- "p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type),
- (unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr,
- (unsigned long) ep->p_paddr);
- printk(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] "
- "p_align[%08lx]\n", (unsigned long) ep->p_filesz,
- (unsigned long) ep->p_memsz, (unsigned long) ep->p_flags,
- (unsigned long) ep->p_align);
+ pr_debug("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] "
+ "p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type),
+ (unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr,
+ (unsigned long) ep->p_paddr);
+ pr_debug(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] "
+ "p_align[%08lx]\n", (unsigned long) ep->p_filesz,
+ (unsigned long) ep->p_memsz, (unsigned long) ep->p_flags,
+ (unsigned long) ep->p_align);
}
static void dump_phdrs(struct elf_phdr *ep, int pnum)
{
int i;
- for(i = 0; i < pnum; i++, ep++) {
- if((ep->p_type == PT_LOAD) ||
- (ep->p_type == PT_INTERP) ||
- (ep->p_type == PT_PHDR))
+ for (i = 0; i < pnum; i++, ep++) {
+ if ((ep->p_type == PT_LOAD) ||
+ (ep->p_type == PT_INTERP) ||
+ (ep->p_type == PT_PHDR))
print_phdr(i, ep);
}
}
-#endif /* DEBUG */
static void set_brk(unsigned long start, unsigned long end)
{
@@ -156,11 +186,10 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
elf_addr_t *envp;
elf_addr_t *sp, *csp;
-#ifdef DEBUG
- printk("create_irix_tables: p[%p] argc[%d] envc[%d] "
- "load_addr[%08x] interp_load_addr[%08x]\n",
- p, argc, envc, load_addr, interp_load_addr);
-#endif
+ pr_debug("create_irix_tables: p[%p] argc[%d] envc[%d] "
+ "load_addr[%08x] interp_load_addr[%08x]\n",
+ p, argc, envc, load_addr, interp_load_addr);
+
sp = (elf_addr_t *) (~15UL & (unsigned long) p);
csp = sp;
csp -= exec ? DLINFO_ITEMS*2 : 2;
@@ -181,7 +210,7 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
sp -= 2;
NEW_AUX_ENT(0, AT_NULL, 0);
- if(exec) {
+ if (exec) {
sp -= 11*2;
NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff);
@@ -245,9 +274,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
last_bss = 0;
error = load_addr = 0;
-#ifdef DEBUG
print_elfhdr(interp_elf_ex);
-#endif
/* First of all, some simple consistency checks */
if ((interp_elf_ex->e_type != ET_EXEC &&
@@ -258,7 +285,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
}
/* Now read in all of the header information */
- if(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) {
+ if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) {
printk("IRIX interp header bigger than a page (%d)\n",
(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum));
return 0xffffffff;
@@ -267,15 +294,15 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
elf_phdata = kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum,
GFP_KERNEL);
- if(!elf_phdata) {
- printk("Cannot kmalloc phdata for IRIX interp.\n");
- return 0xffffffff;
+ if (!elf_phdata) {
+ printk("Cannot kmalloc phdata for IRIX interp.\n");
+ return 0xffffffff;
}
/* If the size of this structure has changed, then punt, since
* we will be doing the wrong thing.
*/
- if(interp_elf_ex->e_phentsize != 32) {
+ if (interp_elf_ex->e_phentsize != 32) {
printk("IRIX interp e_phentsize == %d != 32 ",
interp_elf_ex->e_phentsize);
kfree(elf_phdata);
@@ -286,61 +313,71 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
(char *) elf_phdata,
sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
-#ifdef DEBUG
dump_phdrs(elf_phdata, interp_elf_ex->e_phnum);
-#endif
eppnt = elf_phdata;
- for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
- if(eppnt->p_type == PT_LOAD) {
- int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
- int elf_prot = 0;
- unsigned long vaddr = 0;
- if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
- if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
- if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
- elf_type |= MAP_FIXED;
- vaddr = eppnt->p_vaddr;
-
- pr_debug("INTERP do_mmap(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ",
- interpreter, vaddr,
- (unsigned long) (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)),
- (unsigned long) elf_prot, (unsigned long) elf_type,
- (unsigned long) (eppnt->p_offset & 0xfffff000));
- down_write(&current->mm->mmap_sem);
- error = do_mmap(interpreter, vaddr,
- eppnt->p_filesz + (eppnt->p_vaddr & 0xfff),
- elf_prot, elf_type,
- eppnt->p_offset & 0xfffff000);
- up_write(&current->mm->mmap_sem);
-
- if(error < 0 && error > -1024) {
- printk("Aieee IRIX interp mmap error=%d\n", error);
- break; /* Real error */
- }
- pr_debug("error=%08lx ", (unsigned long) error);
- if(!load_addr && interp_elf_ex->e_type == ET_DYN) {
- load_addr = error;
- pr_debug("load_addr = error ");
- }
-
- /* Find the end of the file mapping for this phdr, and keep
- * track of the largest address we see for this.
- */
- k = eppnt->p_vaddr + eppnt->p_filesz;
- if(k > elf_bss) elf_bss = k;
-
- /* Do the same thing for the memory mapping - between
- * elf_bss and last_bss is the bss section.
- */
- k = eppnt->p_memsz + eppnt->p_vaddr;
- if(k > last_bss) last_bss = k;
- pr_debug("\n");
- }
+ for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
+ if (eppnt->p_type == PT_LOAD) {
+ int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
+ int elf_prot = 0;
+ unsigned long vaddr = 0;
+ if (eppnt->p_flags & PF_R)
+ elf_prot = PROT_READ;
+ if (eppnt->p_flags & PF_W)
+ elf_prot |= PROT_WRITE;
+ if (eppnt->p_flags & PF_X)
+ elf_prot |= PROT_EXEC;
+ elf_type |= MAP_FIXED;
+ vaddr = eppnt->p_vaddr;
+
+ pr_debug("INTERP do_mmap"
+ "(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ",
+ interpreter, vaddr,
+ (unsigned long)
+ (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)),
+ (unsigned long)
+ elf_prot, (unsigned long) elf_type,
+ (unsigned long)
+ (eppnt->p_offset & 0xfffff000));
+
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap(interpreter, vaddr,
+ eppnt->p_filesz + (eppnt->p_vaddr & 0xfff),
+ elf_prot, elf_type,
+ eppnt->p_offset & 0xfffff000);
+ up_write(&current->mm->mmap_sem);
+
+ if (error < 0 && error > -1024) {
+ printk("Aieee IRIX interp mmap error=%d\n",
+ error);
+ break; /* Real error */
+ }
+ pr_debug("error=%08lx ", (unsigned long) error);
+ if (!load_addr && interp_elf_ex->e_type == ET_DYN) {
+ load_addr = error;
+ pr_debug("load_addr = error ");
+ }
+
+ /*
+ * Find the end of the file mapping for this phdr, and
+ * keep track of the largest address we see for this.
+ */
+ k = eppnt->p_vaddr + eppnt->p_filesz;
+ if (k > elf_bss)
+ elf_bss = k;
+
+ /* Do the same thing for the memory mapping - between
+ * elf_bss and last_bss is the bss section.
+ */
+ k = eppnt->p_memsz + eppnt->p_vaddr;
+ if (k > last_bss)
+ last_bss = k;
+ pr_debug("\n");
+ }
}
/* Now use mmap to map the library into memory. */
- if(error < 0 && error > -1024) {
+ if (error < 0 && error > -1024) {
pr_debug("got error %d\n", error);
kfree(elf_phdata);
return 0xffffffff;
@@ -377,7 +414,7 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
return -ENOEXEC;
/* First of all, some simple consistency checks */
- if((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) ||
+ if ((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) ||
!bprm->file->f_op->mmap) {
return -ENOEXEC;
}
@@ -388,7 +425,7 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
* XXX all registers as 64bits on cpu's capable of this at
* XXX exception time plus frob the XTLB exception vector.
*/
- if((ehp->e_flags & EF_MIPS_ABI2))
+ if ((ehp->e_flags & EF_MIPS_ABI2))
return -ENOEXEC;
return 0;
@@ -410,7 +447,7 @@ static inline int look_for_irix_interpreter(char **name,
struct file *file = NULL;
*name = NULL;
- for(i = 0; i < pnum; i++, epp++) {
+ for (i = 0; i < pnum; i++, epp++) {
if (epp->p_type != PT_INTERP)
continue;
@@ -467,8 +504,8 @@ static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnu
unsigned int tmp;
int i, prot;
- for(i = 0; i < pnum; i++, epp++) {
- if(epp->p_type != PT_LOAD)
+ for (i = 0; i < pnum; i++, epp++) {
+ if (epp->p_type != PT_LOAD)
continue;
/* Map it. */
@@ -483,23 +520,23 @@ static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnu
up_write(&current->mm->mmap_sem);
/* Fixup location tracking vars. */
- if((epp->p_vaddr & 0xfffff000) < *estack)
+ if ((epp->p_vaddr & 0xfffff000) < *estack)
*estack = (epp->p_vaddr & 0xfffff000);
- if(!*laddr)
+ if (!*laddr)
*laddr = epp->p_vaddr - epp->p_offset;
- if(epp->p_vaddr < *scode)
+ if (epp->p_vaddr < *scode)
*scode = epp->p_vaddr;
tmp = epp->p_vaddr + epp->p_filesz;
- if(tmp > *ebss)
+ if (tmp > *ebss)
*ebss = tmp;
- if((epp->p_flags & PF_X) && *ecode < tmp)
+ if ((epp->p_flags & PF_X) && *ecode < tmp)
*ecode = tmp;
- if(*edata < tmp)
+ if (*edata < tmp)
*edata = tmp;
tmp = epp->p_vaddr + epp->p_memsz;
- if(tmp > *ebrk)
+ if (tmp > *ebrk)
*ebrk = tmp;
}
@@ -513,12 +550,12 @@ static inline int map_interpreter(struct elf_phdr *epp, struct elfhdr *ihp,
int i;
*eentry = 0xffffffff;
- for(i = 0; i < pnum; i++, epp++) {
- if(epp->p_type != PT_INTERP)
+ for (i = 0; i < pnum; i++, epp++) {
+ if (epp->p_type != PT_INTERP)
continue;
/* We should have fielded this error elsewhere... */
- if(*eentry != 0xffffffff)
+ if (*eentry != 0xffffffff)
return -1;
set_fs(old_fs);
@@ -604,9 +641,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
if (elf_ex.e_shnum > 20)
goto out;
-#ifdef DEBUG
print_elfhdr(&elf_ex);
-#endif
/* Now read in all of the header information */
size = elf_ex.e_phentsize * elf_ex.e_phnum;
@@ -622,13 +657,11 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
if (retval < 0)
goto out_free_ph;
-#ifdef DEBUG
dump_phdrs(elf_phdata, elf_ex.e_phnum);
-#endif
/* Set some things for later. */
- for(i = 0; i < elf_ex.e_phnum; i++) {
- switch(elf_phdata[i].p_type) {
+ for (i = 0; i < elf_ex.e_phnum; i++) {
+ switch (elf_phdata[i].p_type) {
case PT_INTERP:
has_interp = 1;
elf_ihdr = &elf_phdata[i];
@@ -667,7 +700,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
if (elf_interpreter) {
retval = verify_irix_interpreter(&interp_elf_ex);
- if(retval)
+ if (retval)
goto out_free_interp;
}
@@ -706,12 +739,12 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
&load_addr, &start_code, &elf_bss, &end_code,
&end_data, &elf_brk);
- if(elf_interpreter) {
+ if (elf_interpreter) {
retval = map_interpreter(elf_phdata, &interp_elf_ex,
interpreter, &interp_load_addr,
elf_ex.e_phnum, old_fs, &elf_entry);
kfree(elf_interpreter);
- if(retval) {
+ if (retval) {
set_fs(old_fs);
printk("Unable to load IRIX ELF interpreter\n");
send_sig(SIGSEGV, current, 0);
@@ -809,12 +842,12 @@ static int load_irix_library(struct file *file)
return -ENOEXEC;
/* First of all, some simple consistency checks. */
- if(elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
+ if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
!file->f_op->mmap)
return -ENOEXEC;
/* Now read in all of the header information. */
- if(sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE)
+ if (sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE)
return -ENOEXEC;
elf_phdata = kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL);
@@ -825,15 +858,15 @@ static int load_irix_library(struct file *file)
sizeof(struct elf_phdr) * elf_ex.e_phnum);
j = 0;
- for(i=0; i<elf_ex.e_phnum; i++)
- if((elf_phdata + i)->p_type == PT_LOAD) j++;
+ for (i=0; i<elf_ex.e_phnum; i++)
+ if ((elf_phdata + i)->p_type == PT_LOAD) j++;
- if(j != 1) {
+ if (j != 1) {
kfree(elf_phdata);
return -ENOEXEC;
}
- while(elf_phdata->p_type != PT_LOAD) elf_phdata++;
+ while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
/* Now use mmap to map the library into memory. */
down_write(&current->mm->mmap_sem);
@@ -889,9 +922,7 @@ unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt)
return -EFAULT;
}
-#ifdef DEBUG
dump_phdrs(user_phdrp, cnt);
-#endif
for (i = 0; i < cnt; i++, hp++) {
if (__get_user(type, &hp->p_type))
@@ -905,14 +936,14 @@ unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt)
filp = fget(fd);
if (!filp)
return -EACCES;
- if(!filp->f_op) {
+ if (!filp->f_op) {
printk("irix_mapelf: Bogon filp!\n");
fput(filp);
return -EACCES;
}
hp = user_phdrp;
- for(i = 0; i < cnt; i++, hp++) {
+ for (i = 0; i < cnt; i++, hp++) {
int prot;
retval = __get_user(vaddr, &hp->p_vaddr);
@@ -1015,8 +1046,6 @@ static int notesize(struct memelfnote *en)
return sz;
}
-/* #define DEBUG */
-
#define DUMP_WRITE(addr, nr) \
if (!dump_write(file, (addr), (nr))) \
goto end_coredump;
@@ -1093,9 +1122,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
segs++;
}
-#ifdef DEBUG
- printk("irix_core_dump: %d segs taking %d bytes\n", segs, size);
-#endif
+ pr_debug("irix_core_dump: %d segs taking %d bytes\n", segs, size);
/* Set up header. */
memcpy(elf.e_ident, ELFMAG, SELFMAG);
@@ -1221,7 +1248,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
struct elf_phdr phdr;
int sz = 0;
- for(i = 0; i < numnote; i++)
+ for (i = 0; i < numnote; i++)
sz += notesize(&notes[i]);
phdr.p_type = PT_NOTE;
@@ -1241,7 +1268,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
dataoff = offset = roundup(offset, PAGE_SIZE);
/* Write program headers for segments dump. */
- for(vma = current->mm->mmap, i = 0;
+ for (vma = current->mm->mmap, i = 0;
i < segs && vma != NULL; vma = vma->vm_next) {
struct elf_phdr phdr;
size_t sz;
@@ -1267,7 +1294,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
DUMP_WRITE(&phdr, sizeof(phdr));
}
- for(i = 0; i < numnote; i++)
+ for (i = 0; i < numnote; i++)
if (!writenote(&notes[i], file))
goto end_coredump;
@@ -1275,7 +1302,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
DUMP_SEEK(dataoff);
- for(i = 0, vma = current->mm->mmap;
+ for (i = 0, vma = current->mm->mmap;
i < segs && vma != NULL;
vma = vma->vm_next) {
unsigned long addr = vma->vm_start;
@@ -1284,9 +1311,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
if (!maydump(vma))
continue;
i++;
-#ifdef DEBUG
- printk("elf_core_dump: writing %08lx %lx\n", addr, len);
-#endif
+ pr_debug("elf_core_dump: writing %08lx %lx\n", addr, len);
DUMP_WRITE((void __user *)addr, len);
}
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index bcaad6696082..2967537221e2 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -112,7 +112,7 @@ msc_bind_eic_interrupt (unsigned int irq, unsigned int set)
}
struct irq_chip msc_levelirq_type = {
- .typename = "SOC-it-Level",
+ .name = "SOC-it-Level",
.ack = level_mask_and_ack_msc_irq,
.mask = mask_msc_irq,
.mask_ack = level_mask_and_ack_msc_irq,
@@ -122,7 +122,7 @@ struct irq_chip msc_levelirq_type = {
};
struct irq_chip msc_edgeirq_type = {
- .typename = "SOC-it-Edge",
+ .name = "SOC-it-Edge",
.ack = edge_mask_and_ack_msc_irq,
.mask = mask_msc_irq,
.mask_ack = edge_mask_and_ack_msc_irq,
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c
index efbd219845b5..3dd561832e4c 100644
--- a/arch/mips/kernel/irq-mv6434x.c
+++ b/arch/mips/kernel/irq-mv6434x.c
@@ -23,13 +23,13 @@ static unsigned int irq_base;
static inline int ls1bit32(unsigned int x)
{
- int b = 31, s;
+ int b = 31, s;
- s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s;
- s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s;
- s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s;
- s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s;
- s = 1; if (x << 1 == 0) s = 0; b -= s;
+ s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s;
+ s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s;
+ s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s;
+ s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s;
+ s = 1; if (x << 1 == 0) s = 0; b -= s;
return b;
}
@@ -92,7 +92,7 @@ void ll_mv64340_irq(void)
}
struct irq_chip mv64340_irq_type = {
- .typename = "MV-64340",
+ .name = "MV-64340",
.ack = mask_mv64340_irq,
.mask = mask_mv64340_irq,
.mask_ack = mask_mv64340_irq,
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index 123324ba8c14..250732883488 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -17,28 +17,27 @@
#include <asm/mipsregs.h>
#include <asm/system.h>
-static int irq_base;
-
static inline void unmask_rm7k_irq(unsigned int irq)
{
- set_c0_intcontrol(0x100 << (irq - irq_base));
+ set_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE));
}
static inline void mask_rm7k_irq(unsigned int irq)
{
- clear_c0_intcontrol(0x100 << (irq - irq_base));
+ clear_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE));
}
static struct irq_chip rm7k_irq_controller = {
- .typename = "RM7000",
+ .name = "RM7000",
.ack = mask_rm7k_irq,
.mask = mask_rm7k_irq,
.mask_ack = mask_rm7k_irq,
.unmask = unmask_rm7k_irq,
};
-void __init rm7k_cpu_irq_init(int base)
+void __init rm7k_cpu_irq_init(void)
{
+ int base = RM7K_CPU_IRQ_BASE;
int i;
clear_c0_intcontrol(0x00000f00); /* Mask all */
@@ -46,6 +45,4 @@ void __init rm7k_cpu_irq_init(int base)
for (i = base; i < base + 4; i++)
set_irq_chip_and_handler(i, &rm7k_irq_controller,
handle_level_irq);
-
- irq_base = base;
}
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index 0e6f4c5349d2..ae83d2df6f31 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -18,16 +18,14 @@
#include <asm/mipsregs.h>
#include <asm/system.h>
-static int irq_base;
-
static inline void unmask_rm9k_irq(unsigned int irq)
{
- set_c0_intcontrol(0x1000 << (irq - irq_base));
+ set_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE));
}
static inline void mask_rm9k_irq(unsigned int irq)
{
- clear_c0_intcontrol(0x1000 << (irq - irq_base));
+ clear_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE));
}
static inline void rm9k_cpu_irq_enable(unsigned int irq)
@@ -39,15 +37,6 @@ static inline void rm9k_cpu_irq_enable(unsigned int irq)
local_irq_restore(flags);
}
-static void rm9k_cpu_irq_disable(unsigned int irq)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- mask_rm9k_irq(irq);
- local_irq_restore(flags);
-}
-
/*
* Performance counter interrupts are global on all processors.
*/
@@ -81,7 +70,7 @@ static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
}
static struct irq_chip rm9k_irq_controller = {
- .typename = "RM9000",
+ .name = "RM9000",
.ack = mask_rm9k_irq,
.mask = mask_rm9k_irq,
.mask_ack = mask_rm9k_irq,
@@ -89,7 +78,7 @@ static struct irq_chip rm9k_irq_controller = {
};
static struct irq_chip rm9k_perfcounter_irq = {
- .typename = "RM9000",
+ .name = "RM9000",
.startup = rm9k_perfcounter_irq_startup,
.shutdown = rm9k_perfcounter_irq_shutdown,
.ack = mask_rm9k_irq,
@@ -102,8 +91,9 @@ unsigned int rm9000_perfcount_irq;
EXPORT_SYMBOL(rm9000_perfcount_irq);
-void __init rm9k_cpu_irq_init(int base)
+void __init rm9k_cpu_irq_init(void)
{
+ int base = RM9K_CPU_IRQ_BASE;
int i;
clear_c0_intcontrol(0x0000f000); /* Mask all */
@@ -115,6 +105,4 @@ void __init rm9k_cpu_irq_init(int base)
rm9000_perfcount_irq = base + 1;
set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
handle_level_irq);
-
- irq_base = base;
}
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index fcc86b96ccf6..7b66e03b5899 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -25,7 +25,7 @@
* Don't even think about using this on SMP. You have been warned.
*
* This file exports one global function:
- * void mips_cpu_irq_init(int irq_base);
+ * void mips_cpu_irq_init(void);
*/
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -36,22 +36,20 @@
#include <asm/mipsmtregs.h>
#include <asm/system.h>
-static int mips_cpu_irq_base;
-
static inline void unmask_mips_irq(unsigned int irq)
{
- set_c0_status(0x100 << (irq - mips_cpu_irq_base));
+ set_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE));
irq_enable_hazard();
}
static inline void mask_mips_irq(unsigned int irq)
{
- clear_c0_status(0x100 << (irq - mips_cpu_irq_base));
+ clear_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE));
irq_disable_hazard();
}
static struct irq_chip mips_cpu_irq_controller = {
- .typename = "MIPS",
+ .name = "MIPS",
.ack = mask_mips_irq,
.mask = mask_mips_irq,
.mask_ack = mask_mips_irq,
@@ -70,7 +68,7 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
{
unsigned int vpflags = dvpe();
- clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
+ clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
evpe(vpflags);
unmask_mips_mt_irq(irq);
@@ -84,13 +82,13 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
static void mips_mt_cpu_irq_ack(unsigned int irq)
{
unsigned int vpflags = dvpe();
- clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
+ clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
evpe(vpflags);
mask_mips_mt_irq(irq);
}
static struct irq_chip mips_mt_cpu_irq_controller = {
- .typename = "MIPS",
+ .name = "MIPS",
.startup = mips_mt_cpu_irq_startup,
.ack = mips_mt_cpu_irq_ack,
.mask = mask_mips_mt_irq,
@@ -99,8 +97,9 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
.eoi = unmask_mips_mt_irq,
};
-void __init mips_cpu_irq_init(int irq_base)
+void __init mips_cpu_irq_init(void)
{
+ int irq_base = MIPS_CPU_IRQ_BASE;
int i;
/* Mask interrupts. */
@@ -118,6 +117,4 @@ void __init mips_cpu_irq_init(int irq_base)
for (i = irq_base + 2; i < irq_base + 8; i++)
set_irq_chip_and_handler(i, &mips_cpu_irq_controller,
handle_level_irq);
-
- mips_cpu_irq_base = irq_base;
}
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index 5929f883e46b..c6580018c94b 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -17,6 +17,7 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/sched.h>
#include <linux/unistd.h>
#include <linux/file.h>
#include <linux/fs.h>
@@ -70,6 +71,7 @@ static int sp_stopping = 0;
#define MTSP_SYSCALL_GETTIME (MTSP_SYSCALL_BASE + 7)
#define MTSP_SYSCALL_PIPEFREQ (MTSP_SYSCALL_BASE + 8)
#define MTSP_SYSCALL_GETTOD (MTSP_SYSCALL_BASE + 9)
+#define MTSP_SYSCALL_IOCTL (MTSP_SYSCALL_BASE + 10)
#define MTSP_O_RDONLY 0x0000
#define MTSP_O_WRONLY 0x0001
@@ -110,7 +112,8 @@ struct apsp_table syscall_command_table[] = {
{ MTSP_SYSCALL_CLOSE, __NR_close },
{ MTSP_SYSCALL_READ, __NR_read },
{ MTSP_SYSCALL_WRITE, __NR_write },
- { MTSP_SYSCALL_LSEEK32, __NR_lseek }
+ { MTSP_SYSCALL_LSEEK32, __NR_lseek },
+ { MTSP_SYSCALL_IOCTL, __NR_ioctl }
};
static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3)
@@ -189,17 +192,22 @@ void sp_work_handle_request(void)
struct mtsp_syscall_generic generic;
struct mtsp_syscall_ret ret;
struct kspd_notifications *n;
+ unsigned long written;
+ mm_segment_t old_fs;
struct timeval tv;
struct timezone tz;
int cmd;
char *vcwd;
- mm_segment_t old_fs;
int size;
ret.retval = -1;
- if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall), 0)) {
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall))) {
+ set_fs(old_fs);
printk(KERN_ERR "Expected request but nothing to read\n");
return;
}
@@ -207,7 +215,8 @@ void sp_work_handle_request(void)
size = sc.size;
if (size) {
- if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size, 0)) {
+ if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size)) {
+ set_fs(old_fs);
printk(KERN_ERR "Expected request but nothing to read\n");
return;
}
@@ -232,8 +241,6 @@ void sp_work_handle_request(void)
if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
(int)&tz, 0,0)) == 0)
ret.retval = tv.tv_sec;
-
- ret.errno = errno;
break;
case MTSP_SYSCALL_EXIT:
@@ -270,7 +277,6 @@ void sp_work_handle_request(void)
if (cmd >= 0) {
ret.retval = sp_syscall(cmd, generic.arg0, generic.arg1,
generic.arg2, generic.arg3);
- ret.errno = errno;
} else
printk(KERN_WARNING
"KSPD: Unknown SP syscall number %d\n", sc.cmd);
@@ -280,8 +286,11 @@ void sp_work_handle_request(void)
if (vpe_getuid(SP_VPE))
sp_setfsuidgid( 0, 0);
- if ((rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(struct mtsp_syscall_ret), 0))
- < sizeof(struct mtsp_syscall_ret))
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ written = rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(ret));
+ set_fs(old_fs);
+ if (written < sizeof(ret))
printk("KSPD: sp_work_handle_request failed to send to SP\n");
}
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index de3fae260ff8..37849edd0645 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -39,6 +39,7 @@
#include <net/sock.h>
#include <net/scm.h>
+#include <asm/compat-signal.h>
#include <asm/ipc.h>
#include <asm/sim.h>
#include <asm/uaccess.h>
@@ -165,78 +166,6 @@ out:
return error;
}
-asmlinkage long
-sysn32_waitid(int which, compat_pid_t pid,
- siginfo_t __user *uinfo, int options,
- struct compat_rusage __user *uru)
-{
- struct rusage ru;
- long ret;
- mm_segment_t old_fs = get_fs();
- int si_signo;
-
- if (!access_ok(VERIFY_WRITE, uinfo, sizeof(*uinfo)))
- return -EFAULT;
-
- set_fs (KERNEL_DS);
- ret = sys_waitid(which, pid, uinfo, options,
- uru ? (struct rusage __user *) &ru : NULL);
- set_fs (old_fs);
-
- if (__get_user(si_signo, &uinfo->si_signo))
- return -EFAULT;
- if (ret < 0 || si_signo == 0)
- return ret;
-
- if (uru)
- ret = put_compat_rusage(&ru, uru);
- return ret;
-}
-
-struct sysinfo32 {
- s32 uptime;
- u32 loads[3];
- u32 totalram;
- u32 freeram;
- u32 sharedram;
- u32 bufferram;
- u32 totalswap;
- u32 freeswap;
- u16 procs;
- u32 totalhigh;
- u32 freehigh;
- u32 mem_unit;
- char _f[8];
-};
-
-asmlinkage int sys32_sysinfo(struct sysinfo32 __user *info)
-{
- struct sysinfo s;
- int ret, err;
- mm_segment_t old_fs = get_fs ();
-
- set_fs (KERNEL_DS);
- ret = sys_sysinfo((struct sysinfo __user *)&s);
- set_fs (old_fs);
- err = put_user (s.uptime, &info->uptime);
- err |= __put_user (s.loads[0], &info->loads[0]);
- err |= __put_user (s.loads[1], &info->loads[1]);
- err |= __put_user (s.loads[2], &info->loads[2]);
- err |= __put_user (s.totalram, &info->totalram);
- err |= __put_user (s.freeram, &info->freeram);
- err |= __put_user (s.sharedram, &info->sharedram);
- err |= __put_user (s.bufferram, &info->bufferram);
- err |= __put_user (s.totalswap, &info->totalswap);
- err |= __put_user (s.freeswap, &info->freeswap);
- err |= __put_user (s.procs, &info->procs);
- err |= __put_user (s.totalhigh, &info->totalhigh);
- err |= __put_user (s.freehigh, &info->freehigh);
- err |= __put_user (s.mem_unit, &info->mem_unit);
- if (err)
- return -EFAULT;
- return ret;
-}
-
#define RLIM_INFINITY32 0x7fffffff
#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
@@ -382,6 +311,8 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
return ret;
}
+#ifdef CONFIG_SYSVIPC
+
asmlinkage long
sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
{
@@ -439,6 +370,16 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
return err;
}
+#else
+
+asmlinkage long
+sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_SYSVIPC */
+
#ifdef CONFIG_MIPS32_N32
asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, u32 arg)
{
@@ -558,7 +499,7 @@ extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf);
asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32)
{
int err;
- struct ustat tmp;
+ struct ustat tmp;
struct ustat32 tmp32;
mm_segment_t old_fs = get_fs();
@@ -569,11 +510,11 @@ asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32)
if (err)
goto out;
- memset(&tmp32,0,sizeof(struct ustat32));
- tmp32.f_tfree = tmp.f_tfree;
- tmp32.f_tinode = tmp.f_tinode;
+ memset(&tmp32,0,sizeof(struct ustat32));
+ tmp32.f_tfree = tmp.f_tfree;
+ tmp32.f_tinode = tmp.f_tinode;
- err = copy_to_user(ubuf32,&tmp32,sizeof(struct ustat32)) ? -EFAULT : 0;
+ err = copy_to_user(ubuf32,&tmp32,sizeof(struct ustat32)) ? -EFAULT : 0;
out:
return err;
@@ -615,151 +556,6 @@ asmlinkage long sys32_sync_file_range(int fd, int __pad,
flags);
}
-/* Argument list sizes for sys_socketcall */
-#define AL(x) ((x) * sizeof(unsigned int))
-static unsigned char socketcall_nargs[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
- AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
- AL(6),AL(2),AL(5),AL(5),AL(3),AL(3)};
-#undef AL
-
-/*
- * System call vectors.
- *
- * Argument checking cleaned up. Saved 20% in size.
- * This function doesn't need to set the kernel lock because
- * it is set by the callees.
- */
-
-asmlinkage long sys32_socketcall(int call, unsigned int __user *args32)
-{
- unsigned int a[6];
- unsigned int a0,a1;
- int err;
-
- extern asmlinkage long sys_socket(int family, int type, int protocol);
- extern asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen);
- extern asmlinkage long sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen);
- extern asmlinkage long sys_listen(int fd, int backlog);
- extern asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen);
- extern asmlinkage long sys_getsockname(int fd, struct sockaddr __user *usockaddr, int __user *usockaddr_len);
- extern asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr, int __user *usockaddr_len);
- extern asmlinkage long sys_socketpair(int family, int type, int protocol, int __user *usockvec);
- extern asmlinkage long sys_send(int fd, void __user * buff, size_t len, unsigned flags);
- extern asmlinkage long sys_sendto(int fd, void __user * buff, size_t len, unsigned flags,
- struct sockaddr __user *addr, int addr_len);
- extern asmlinkage long sys_recv(int fd, void __user * ubuf, size_t size, unsigned flags);
- extern asmlinkage long sys_recvfrom(int fd, void __user * ubuf, size_t size, unsigned flags,
- struct sockaddr __user *addr, int __user *addr_len);
- extern asmlinkage long sys_shutdown(int fd, int how);
- extern asmlinkage long sys_setsockopt(int fd, int level, int optname, char __user *optval, int optlen);
- extern asmlinkage long sys_getsockopt(int fd, int level, int optname, char __user *optval, int __user *optlen);
- extern asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
- extern asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, unsigned int flags);
-
-
- if(call<1||call>SYS_RECVMSG)
- return -EINVAL;
-
- /* copy_from_user should be SMP safe. */
- if (copy_from_user(a, args32, socketcall_nargs[call]))
- return -EFAULT;
-
- a0=a[0];
- a1=a[1];
-
- switch(call)
- {
- case SYS_SOCKET:
- err = sys_socket(a0,a1,a[2]);
- break;
- case SYS_BIND:
- err = sys_bind(a0,(struct sockaddr __user *)A(a1), a[2]);
- break;
- case SYS_CONNECT:
- err = sys_connect(a0, (struct sockaddr __user *)A(a1), a[2]);
- break;
- case SYS_LISTEN:
- err = sys_listen(a0,a1);
- break;
- case SYS_ACCEPT:
- err = sys_accept(a0,(struct sockaddr __user *)A(a1), (int __user *)A(a[2]));
- break;
- case SYS_GETSOCKNAME:
- err = sys_getsockname(a0,(struct sockaddr __user *)A(a1), (int __user *)A(a[2]));
- break;
- case SYS_GETPEERNAME:
- err = sys_getpeername(a0, (struct sockaddr __user *)A(a1), (int __user *)A(a[2]));
- break;
- case SYS_SOCKETPAIR:
- err = sys_socketpair(a0,a1, a[2], (int __user *)A(a[3]));
- break;
- case SYS_SEND:
- err = sys_send(a0, (void __user *)A(a1), a[2], a[3]);
- break;
- case SYS_SENDTO:
- err = sys_sendto(a0,(void __user *)A(a1), a[2], a[3],
- (struct sockaddr __user *)A(a[4]), a[5]);
- break;
- case SYS_RECV:
- err = sys_recv(a0, (void __user *)A(a1), a[2], a[3]);
- break;
- case SYS_RECVFROM:
- err = sys_recvfrom(a0, (void __user *)A(a1), a[2], a[3],
- (struct sockaddr __user *)A(a[4]), (int __user *)A(a[5]));
- break;
- case SYS_SHUTDOWN:
- err = sys_shutdown(a0,a1);
- break;
- case SYS_SETSOCKOPT:
- err = sys_setsockopt(a0, a1, a[2], (char __user *)A(a[3]), a[4]);
- break;
- case SYS_GETSOCKOPT:
- err = sys_getsockopt(a0, a1, a[2], (char __user *)A(a[3]), (int __user *)A(a[4]));
- break;
- case SYS_SENDMSG:
- err = sys_sendmsg(a0, (struct msghdr __user *) A(a1), a[2]);
- break;
- case SYS_RECVMSG:
- err = sys_recvmsg(a0, (struct msghdr __user *) A(a1), a[2]);
- break;
- default:
- err = -EINVAL;
- break;
- }
- return err;
-}
-
-struct sigevent32 {
- u32 sigev_value;
- u32 sigev_signo;
- u32 sigev_notify;
- u32 payload[(64 / 4) - 3];
-};
-
-extern asmlinkage long
-sys_timer_create(clockid_t which_clock,
- struct sigevent __user *timer_event_spec,
- timer_t __user * created_timer_id);
-
-long
-sys32_timer_create(u32 clock, struct sigevent32 __user *se32, timer_t __user *timer_id)
-{
- struct sigevent __user *p = NULL;
- if (se32) {
- struct sigevent se;
- p = compat_alloc_user_space(sizeof(struct sigevent));
- memset(&se, 0, sizeof(struct sigevent));
- if (get_user(se.sigev_value.sival_int, &se32->sigev_value) ||
- __get_user(se.sigev_signo, &se32->sigev_signo) ||
- __get_user(se.sigev_notify, &se32->sigev_notify) ||
- __copy_from_user(&se._sigev_un._pad, &se32->payload,
- sizeof(se32->payload)) ||
- copy_to_user(p, &se, sizeof(se)))
- return -EFAULT;
- }
- return sys_timer_create(clock, p, timer_id);
-}
-
save_static_function(sys32_clone);
__attribute_used__ noinline static int
_sys32_clone(nabi_no_regargs struct pt_regs regs)
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
index e0ad754c7edd..8f42fa85ac9e 100644
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -13,8 +13,8 @@
#include <asm/cacheflush.h>
#include <asm/page.h>
-const extern unsigned char relocate_new_kernel[];
-const extern unsigned int relocate_new_kernel_size;
+extern const unsigned char relocate_new_kernel[];
+extern const unsigned int relocate_new_kernel_size;
extern unsigned long kexec_start_address;
extern unsigned long kexec_indirection_page;
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index c1373a6e668b..ba01800b6018 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -3,9 +3,11 @@
* Copyright (C) 2005 Mips Technologies, Inc
*/
+#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/security.h>
@@ -96,6 +98,10 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
goto out_unlock;
}
+ retval = security_task_setscheduler(p, 0, NULL);
+ if (retval)
+ goto out_unlock;
+
/* Record new user-specified CPU set for future reference */
p->thread.user_cpus_allowed = new_mask;
@@ -141,8 +147,9 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
p = find_process_by_pid(pid);
if (!p)
goto out_unlock;
-
- retval = 0;
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
@@ -448,3 +455,20 @@ void mt_cflush_release(void)
#endif /* CONFIG_MIPS_MT_SMTC */
/* FILL IN VSMP and AP/SP VERSIONS HERE */
}
+
+struct class *mt_class;
+
+static int __init mt_init(void)
+{
+ struct class *mtc;
+
+ mtc = class_create(THIS_MODULE, "mt");
+ if (IS_ERR(mtc))
+ return PTR_ERR(mtc);
+
+ mt_class = mtc;
+
+ return 0;
+}
+
+subsys_initcall(mt_init);
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 2ef857c3ee53..225755d0c1f6 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -37,6 +37,7 @@ EXPORT_SYMBOL(kernel_thread);
* Userspace access stuff.
*/
EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(__copy_user_inatomic);
EXPORT_SYMBOL(__bzero);
EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm);
EXPORT_SYMBOL(__strncpy_from_user_asm);
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 4ed37ba19731..5ddc2e9deecf 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -31,13 +31,13 @@ static const char *cpu_name[] = {
[CPU_R4000PC] = "R4000PC",
[CPU_R4000SC] = "R4000SC",
[CPU_R4000MC] = "R4000MC",
- [CPU_R4200] = "R4200",
+ [CPU_R4200] = "R4200",
[CPU_R4400PC] = "R4400PC",
[CPU_R4400SC] = "R4400SC",
[CPU_R4400MC] = "R4400MC",
[CPU_R4600] = "R4600",
[CPU_R6000] = "R6000",
- [CPU_R6000A] = "R6000A",
+ [CPU_R6000A] = "R6000A",
[CPU_R8000] = "R8000",
[CPU_R10000] = "R10000",
[CPU_R12000] = "R12000",
@@ -46,14 +46,14 @@ static const char *cpu_name[] = {
[CPU_R4650] = "R4650",
[CPU_R4700] = "R4700",
[CPU_R5000] = "R5000",
- [CPU_R5000A] = "R5000A",
+ [CPU_R5000A] = "R5000A",
[CPU_R4640] = "R4640",
[CPU_NEVADA] = "Nevada",
[CPU_RM7000] = "RM7000",
[CPU_RM9000] = "RM9000",
[CPU_R5432] = "R5432",
[CPU_4KC] = "MIPS 4Kc",
- [CPU_5KC] = "MIPS 5Kc",
+ [CPU_5KC] = "MIPS 5Kc",
[CPU_R4310] = "R4310",
[CPU_SB1] = "SiByte SB1",
[CPU_SB1A] = "SiByte SB1A",
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index ec8209f3a0c6..6bdfb5a9fa1a 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -26,7 +26,6 @@
#include <linux/completion.h>
#include <linux/kallsyms.h>
-#include <asm/abi.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/dsp.h>
@@ -41,10 +40,6 @@
#include <asm/isadep.h>
#include <asm/inst.h>
#include <asm/stacktrace.h>
-#ifdef CONFIG_MIPS_MT_SMTC
-#include <asm/mipsmtregs.h>
-extern void smtc_idle_loop_hook(void);
-#endif /* CONFIG_MIPS_MT_SMTC */
/*
* The idle thread. There's no useful work to be done, so just try to conserve
@@ -56,9 +51,11 @@ ATTRIB_NORET void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
while (!need_resched()) {
-#ifdef CONFIG_MIPS_MT_SMTC
+#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
+ extern void smtc_idle_loop_hook(void);
+
smtc_idle_loop_hook();
-#endif /* CONFIG_MIPS_MT_SMTC */
+#endif
if (cpu_wait)
(*cpu_wait)();
}
@@ -68,38 +65,6 @@ ATTRIB_NORET void cpu_idle(void)
}
}
-/*
- * Native o32 and N64 ABI without DSP ASE
- */
-struct mips_abi mips_abi = {
- .do_signal = do_signal,
-#ifdef CONFIG_TRAD_SIGNALS
- .setup_frame = setup_frame,
-#endif
- .setup_rt_frame = setup_rt_frame
-};
-
-#ifdef CONFIG_MIPS32_O32
-/*
- * o32 compatibility on 64-bit kernels, without DSP ASE
- */
-struct mips_abi mips_abi_32 = {
- .do_signal = do_signal32,
- .setup_frame = setup_frame_32,
- .setup_rt_frame = setup_rt_frame_32
-};
-#endif /* CONFIG_MIPS32_O32 */
-
-#ifdef CONFIG_MIPS32_N32
-/*
- * N32 on 64-bit kernels, without DSP ASE
- */
-struct mips_abi mips_abi_n32 = {
- .do_signal = do_signal,
- .setup_rt_frame = setup_rt_frame_n32
-};
-#endif /* CONFIG_MIPS32_N32 */
-
asmlinkage void ret_from_fork(void);
void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
@@ -248,7 +213,7 @@ int dump_task_fpu (struct task_struct *t, elf_fpregset_t *fpr)
/*
* Create a kernel thread
*/
-ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *))
+static ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *))
{
do_exit(fn(arg));
}
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 258d74fd0b63..201ae194d1b8 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -236,6 +236,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case MMLO:
tmp = regs->lo;
break;
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ case ACX:
+ tmp = regs->acx;
+ break;
+#endif
case FPC_CSR:
tmp = child->thread.fpu.fcr31;
break;
@@ -362,6 +367,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case MMLO:
regs->lo = data;
break;
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ case ACX:
+ regs->acx = data;
+ break;
+#endif
case FPC_CSR:
child->thread.fpu.fcr31 = data;
break;
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 880fa6e841ee..dbd42adc52ed 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -177,9 +177,10 @@ LEAF(_restore_fp_context32)
jr ra
li v0, 0 # success
END(_restore_fp_context32)
- .set reorder
#endif
+ .set reorder
+
.type fault@function
.ent fault
fault: li v0, -EFAULT # failure
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 5a99e3e0c96d..bfc8ca168f83 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -17,6 +17,7 @@
*
*/
+#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/fs.h>
@@ -34,6 +35,7 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
#include <asm/cacheflush.h>
#include <asm/atomic.h>
#include <asm/cpu.h>
@@ -51,7 +53,8 @@ static char module_name[] = "rtlx";
static struct chan_waitqueues {
wait_queue_head_t rt_queue;
wait_queue_head_t lx_queue;
- int in_open;
+ atomic_t in_open;
+ struct mutex mutex;
} channel_wqs[RTLX_CHANNELS];
static struct irqaction irq;
@@ -63,7 +66,7 @@ extern void *vpe_get_shared(int index);
static void rtlx_dispatch(void)
{
- do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ);
+ do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ);
}
@@ -144,110 +147,91 @@ static void stopping(int vpe)
int rtlx_open(int index, int can_sleep)
{
- int ret;
+ struct rtlx_info **p;
struct rtlx_channel *chan;
- volatile struct rtlx_info **p;
+ enum rtlx_state state;
+ int ret = 0;
if (index >= RTLX_CHANNELS) {
printk(KERN_DEBUG "rtlx_open index out of range\n");
return -ENOSYS;
}
- if (channel_wqs[index].in_open) {
- printk(KERN_DEBUG "rtlx_open channel %d already opened\n", index);
- return -EBUSY;
+ if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
+ printk(KERN_DEBUG "rtlx_open channel %d already opened\n",
+ index);
+ ret = -EBUSY;
+ goto out_fail;
}
- channel_wqs[index].in_open++;
-
if (rtlx == NULL) {
if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) {
if (can_sleep) {
- DECLARE_WAITQUEUE(wait, current);
-
- /* go to sleep */
- add_wait_queue(&channel_wqs[index].lx_queue, &wait);
-
- set_current_state(TASK_INTERRUPTIBLE);
- while ((p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) {
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
- }
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
-
- /* back running */
+ __wait_event_interruptible(channel_wqs[index].lx_queue,
+ (p = vpe_get_shared(RTLX_TARG_VPE)),
+ ret);
+ if (ret)
+ goto out_fail;
} else {
- printk( KERN_DEBUG "No SP program loaded, and device "
+ printk(KERN_DEBUG "No SP program loaded, and device "
"opened with O_NONBLOCK\n");
- channel_wqs[index].in_open = 0;
- return -ENOSYS;
+ ret = -ENOSYS;
+ goto out_fail;
}
}
+ smp_rmb();
if (*p == NULL) {
if (can_sleep) {
- DECLARE_WAITQUEUE(wait, current);
-
- /* go to sleep */
- add_wait_queue(&channel_wqs[index].lx_queue, &wait);
-
- set_current_state(TASK_INTERRUPTIBLE);
- while (*p == NULL) {
- schedule();
-
- /* reset task state to interruptable otherwise
- we'll whizz round here like a very fast loopy
- thing. schedule() appears to return with state
- set to TASK_RUNNING.
-
- If the loaded SP program, for whatever reason,
- doesn't set up the shared structure *p will never
- become true. So whoever connected to either /dev/rt?
- or if it was kspd, will then take up rather a lot of
- processor cycles.
- */
-
- set_current_state(TASK_INTERRUPTIBLE);
+ DEFINE_WAIT(wait);
+
+ for (;;) {
+ prepare_to_wait(&channel_wqs[index].lx_queue, &wait, TASK_INTERRUPTIBLE);
+ smp_rmb();
+ if (*p != NULL)
+ break;
+ if (!signal_pending(current)) {
+ schedule();
+ continue;
+ }
+ ret = -ERESTARTSYS;
+ goto out_fail;
}
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
-
- /* back running */
- }
- else {
+ finish_wait(&channel_wqs[index].lx_queue, &wait);
+ } else {
printk(" *vpe_get_shared is NULL. "
"Has an SP program been loaded?\n");
- channel_wqs[index].in_open = 0;
- return -ENOSYS;
+ ret = -ENOSYS;
+ goto out_fail;
}
}
if ((unsigned int)*p < KSEG0) {
printk(KERN_WARNING "vpe_get_shared returned an invalid pointer "
"maybe an error code %d\n", (int)*p);
- channel_wqs[index].in_open = 0;
- return -ENOSYS;
+ ret = -ENOSYS;
+ goto out_fail;
}
- if ((ret = rtlx_init(*p)) < 0) {
- channel_wqs[index].in_open = 0;
- return ret;
- }
+ if ((ret = rtlx_init(*p)) < 0)
+ goto out_ret;
}
chan = &rtlx->channel[index];
- if (chan->lx_state == RTLX_STATE_OPENED) {
- channel_wqs[index].in_open = 0;
- return -EBUSY;
- }
+ state = xchg(&chan->lx_state, RTLX_STATE_OPENED);
+ if (state == RTLX_STATE_OPENED) {
+ ret = -EBUSY;
+ goto out_fail;
+ }
- chan->lx_state = RTLX_STATE_OPENED;
- channel_wqs[index].in_open = 0;
- return 0;
+out_fail:
+ smp_mb();
+ atomic_dec(&channel_wqs[index].in_open);
+ smp_mb();
+
+out_ret:
+ return ret;
}
int rtlx_release(int index)
@@ -268,30 +252,17 @@ unsigned int rtlx_read_poll(int index, int can_sleep)
/* data available to read? */
if (chan->lx_read == chan->lx_write) {
if (can_sleep) {
- DECLARE_WAITQUEUE(wait, current);
-
- /* go to sleep */
- add_wait_queue(&channel_wqs[index].lx_queue, &wait);
-
- set_current_state(TASK_INTERRUPTIBLE);
- while (chan->lx_read == chan->lx_write) {
- schedule();
-
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (sp_stopping) {
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
- return 0;
- }
- }
+ int ret = 0;
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
+ __wait_event_interruptible(channel_wqs[index].lx_queue,
+ chan->lx_read != chan->lx_write || sp_stopping,
+ ret);
+ if (ret)
+ return ret;
- /* back running */
- }
- else
+ if (sp_stopping)
+ return 0;
+ } else
return 0;
}
@@ -318,56 +289,53 @@ unsigned int rtlx_write_poll(int index)
return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size);
}
-static inline void copy_to(void *dst, void *src, size_t count, int user)
-{
- if (user)
- copy_to_user(dst, src, count);
- else
- memcpy(dst, src, count);
-}
-
-static inline void copy_from(void *dst, void *src, size_t count, int user)
-{
- if (user)
- copy_from_user(dst, src, count);
- else
- memcpy(dst, src, count);
-}
-
-ssize_t rtlx_read(int index, void *buff, size_t count, int user)
+ssize_t rtlx_read(int index, void __user *buff, size_t count)
{
- size_t fl = 0L;
+ size_t lx_write, fl = 0L;
struct rtlx_channel *lx;
+ unsigned long failed;
if (rtlx == NULL)
return -ENOSYS;
lx = &rtlx->channel[index];
+ mutex_lock(&channel_wqs[index].mutex);
+ smp_rmb();
+ lx_write = lx->lx_write;
+
/* find out how much in total */
count = min(count,
- (size_t)(lx->lx_write + lx->buffer_size - lx->lx_read)
+ (size_t)(lx_write + lx->buffer_size - lx->lx_read)
% lx->buffer_size);
/* then how much from the read pointer onwards */
- fl = min( count, (size_t)lx->buffer_size - lx->lx_read);
+ fl = min(count, (size_t)lx->buffer_size - lx->lx_read);
- copy_to(buff, &lx->lx_buffer[lx->lx_read], fl, user);
+ failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl);
+ if (failed)
+ goto out;
/* and if there is anything left at the beginning of the buffer */
- if ( count - fl )
- copy_to (buff + fl, lx->lx_buffer, count - fl, user);
+ if (count - fl)
+ failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl);
+
+out:
+ count -= failed;
- /* update the index */
- lx->lx_read += count;
- lx->lx_read %= lx->buffer_size;
+ smp_wmb();
+ lx->lx_read = (lx->lx_read + count) % lx->buffer_size;
+ smp_wmb();
+ mutex_unlock(&channel_wqs[index].mutex);
return count;
}
-ssize_t rtlx_write(int index, void *buffer, size_t count, int user)
+ssize_t rtlx_write(int index, const void __user *buffer, size_t count)
{
struct rtlx_channel *rt;
+ unsigned long failed;
+ size_t rt_read;
size_t fl;
if (rtlx == NULL)
@@ -375,24 +343,35 @@ ssize_t rtlx_write(int index, void *buffer, size_t count, int user)
rt = &rtlx->channel[index];
+ mutex_lock(&channel_wqs[index].mutex);
+ smp_rmb();
+ rt_read = rt->rt_read;
+
/* total number of bytes to copy */
count = min(count,
- (size_t)write_spacefree(rt->rt_read, rt->rt_write,
- rt->buffer_size));
+ (size_t)write_spacefree(rt_read, rt->rt_write, rt->buffer_size));
/* first bit from write pointer to the end of the buffer, or count */
fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
- copy_from (&rt->rt_buffer[rt->rt_write], buffer, fl, user);
+ failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl);
+ if (failed)
+ goto out;
/* if there's any left copy to the beginning of the buffer */
- if( count - fl )
- copy_from (rt->rt_buffer, buffer + fl, count - fl, user);
+ if (count - fl) {
+ failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
+ }
+
+out:
+ count -= failed;
- rt->rt_write += count;
- rt->rt_write %= rt->buffer_size;
+ smp_wmb();
+ rt->rt_write = (rt->rt_write + count) % rt->buffer_size;
+ smp_wmb();
+ mutex_unlock(&channel_wqs[index].mutex);
- return(count);
+ return count;
}
@@ -444,7 +423,7 @@ static ssize_t file_read(struct file *file, char __user * buffer, size_t count,
return 0; // -EAGAIN makes cat whinge
}
- return rtlx_read(minor, buffer, count, 1);
+ return rtlx_read(minor, buffer, count);
}
static ssize_t file_write(struct file *file, const char __user * buffer,
@@ -452,31 +431,28 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
{
int minor;
struct rtlx_channel *rt;
- DECLARE_WAITQUEUE(wait, current);
minor = iminor(file->f_path.dentry->d_inode);
rt = &rtlx->channel[minor];
/* any space left... */
if (!rtlx_write_poll(minor)) {
+ int ret = 0;
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
- add_wait_queue(&channel_wqs[minor].rt_queue, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
-
- while (!rtlx_write_poll(minor))
- schedule();
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&channel_wqs[minor].rt_queue, &wait);
+ __wait_event_interruptible(channel_wqs[minor].rt_queue,
+ rtlx_write_poll(minor),
+ ret);
+ if (ret)
+ return ret;
}
- return rtlx_write(minor, (void *)buffer, count, 1);
+ return rtlx_write(minor, buffer, count);
}
-static struct file_operations rtlx_fops = {
+static const struct file_operations rtlx_fops = {
.owner = THIS_MODULE,
.open = file_open,
.release = file_release,
@@ -491,14 +467,15 @@ static struct irqaction rtlx_irq = {
.name = "RTLX",
};
-static int rtlx_irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ;
+static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ;
static char register_chrdev_failed[] __initdata =
KERN_ERR "rtlx_module_init: unable to register device\n";
static int rtlx_module_init(void)
{
- int i;
+ struct device *dev;
+ int i, err;
major = register_chrdev(0, module_name, &rtlx_fops);
if (major < 0) {
@@ -510,7 +487,15 @@ static int rtlx_module_init(void)
for (i = 0; i < RTLX_CHANNELS; i++) {
init_waitqueue_head(&channel_wqs[i].rt_queue);
init_waitqueue_head(&channel_wqs[i].lx_queue);
- channel_wqs[i].in_open = 0;
+ atomic_set(&channel_wqs[i].in_open, 0);
+ mutex_init(&channel_wqs[i].mutex);
+
+ dev = device_create(mt_class, NULL, MKDEV(major, i),
+ "%s%d", module_name, i);
+ if (IS_ERR(dev)) {
+ err = PTR_ERR(dev);
+ goto out_chrdev;
+ }
}
/* set up notifiers */
@@ -525,10 +510,21 @@ static int rtlx_module_init(void)
setup_irq(rtlx_irq_num, &rtlx_irq);
return 0;
+
+out_chrdev:
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ device_destroy(mt_class, MKDEV(major, i));
+
+ return err;
}
static void __exit rtlx_module_exit(void)
{
+ int i;
+
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ device_destroy(mt_class, MKDEV(major, i));
+
unregister_chrdev(major, module_name);
}
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 7c0b3936ba44..0c9a9ff8cd25 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -656,6 +656,8 @@ einval: li v0, -EINVAL
sys sys_kexec_load 4
sys sys_getcpu 3
sys sys_epoll_pwait 6
+ sys sys_ioprio_set 3
+ sys sys_ioprio_get 2
.endm
/* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index e569b846e9a3..23f3b118f718 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -471,3 +471,6 @@ sys_call_table:
PTR sys_kexec_load /* 5270 */
PTR sys_getcpu
PTR sys_epoll_pwait
+ PTR sys_ioprio_set
+ PTR sys_ioprio_get
+ .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index a7bff2a54723..6eac28337423 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -217,7 +217,7 @@ EXPORT(sysn32_call_table)
PTR sys32_gettimeofday
PTR compat_sys_getrlimit /* 6095 */
PTR compat_sys_getrusage
- PTR sys32_sysinfo
+ PTR compat_sys_sysinfo
PTR compat_sys_times
PTR sys32_ptrace
PTR sys_getuid /* 6100 */
@@ -340,7 +340,7 @@ EXPORT(sysn32_call_table)
PTR compat_sys_statfs64
PTR compat_sys_fstatfs64
PTR sys_sendfile64
- PTR sys32_timer_create /* 6220 */
+ PTR compat_sys_timer_create /* 6220 */
PTR compat_sys_timer_settime
PTR compat_sys_timer_gettime
PTR sys_timer_getoverrun
@@ -361,7 +361,7 @@ EXPORT(sysn32_call_table)
PTR compat_sys_mq_notify
PTR compat_sys_mq_getsetattr
PTR sys_ni_syscall /* 6240, sys_vserver */
- PTR sysn32_waitid
+ PTR compat_sys_waitid
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key
PTR sys_request_key
@@ -384,7 +384,7 @@ EXPORT(sysn32_call_table)
PTR sys_readlinkat
PTR sys_fchmodat
PTR sys_faccessat
- PTR sys_pselect6
+ PTR compat_sys_pselect6
PTR sys_ppoll /* 6265 */
PTR sys_unshare
PTR sys_splice
@@ -395,5 +395,8 @@ EXPORT(sysn32_call_table)
PTR compat_sys_set_robust_list
PTR compat_sys_get_robust_list
PTR compat_sys_kexec_load
- PTR sys_getcpu
- PTR sys_epoll_pwait
+ PTR sys_getcpu /* 6275 */
+ PTR compat_sys_epoll_pwait
+ PTR sys_ioprio_set
+ PTR sys_ioprio_get
+ .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index e91379c1be1d..7e74b412a782 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -307,7 +307,7 @@ sys_call_table:
PTR compat_sys_statfs
PTR compat_sys_fstatfs /* 4100 */
PTR sys_ni_syscall /* sys_ioperm */
- PTR sys32_socketcall
+ PTR compat_sys_socketcall
PTR sys_syslog
PTR compat_sys_setitimer
PTR compat_sys_getitimer /* 4105 */
@@ -321,7 +321,7 @@ sys_call_table:
PTR sys_ni_syscall /* sys_vm86 */
PTR compat_sys_wait4
PTR sys_swapoff /* 4115 */
- PTR sys32_sysinfo
+ PTR compat_sys_sysinfo
PTR sys32_ipc
PTR sys_fsync
PTR sys32_sigreturn
@@ -462,7 +462,7 @@ sys_call_table:
PTR sys_fadvise64_64
PTR compat_sys_statfs64 /* 4255 */
PTR compat_sys_fstatfs64
- PTR sys32_timer_create
+ PTR compat_sys_timer_create
PTR compat_sys_timer_settime
PTR compat_sys_timer_gettime
PTR sys_timer_getoverrun /* 4260 */
@@ -506,7 +506,7 @@ sys_call_table:
PTR sys_readlinkat
PTR sys_fchmodat
PTR sys_faccessat /* 4300 */
- PTR sys_pselect6
+ PTR compat_sys_pselect6
PTR sys_ppoll
PTR sys_unshare
PTR sys_splice
@@ -518,5 +518,7 @@ sys_call_table:
PTR compat_sys_get_robust_list /* 4310 */
PTR compat_sys_kexec_load
PTR sys_getcpu
- PTR sys_epoll_pwait
+ PTR compat_sys_epoll_pwait
+ PTR sys_ioprio_set
+ PTR sys_ioprio_get /* 4315 */
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 89440a0d8528..4975da0bfb63 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -271,8 +271,7 @@ static void __init bootmem_init(void)
static void __init bootmem_init(void)
{
unsigned long reserved_end;
- unsigned long highest = 0;
- unsigned long mapstart = -1UL;
+ unsigned long mapstart = ~0UL;
unsigned long bootmap_size;
int i;
@@ -284,6 +283,13 @@ static void __init bootmem_init(void)
reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end)));
/*
+ * max_low_pfn is not a number of pages. The number of pages
+ * of the system is given by 'max_low_pfn - min_low_pfn'.
+ */
+ min_low_pfn = ~0UL;
+ max_low_pfn = 0;
+
+ /*
* Find the highest page frame number we have available.
*/
for (i = 0; i < boot_mem_map.nr_map; i++) {
@@ -296,8 +302,10 @@ static void __init bootmem_init(void)
end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);
- if (end > highest)
- highest = end;
+ if (end > max_low_pfn)
+ max_low_pfn = end;
+ if (start < min_low_pfn)
+ min_low_pfn = start;
if (end <= reserved_end)
continue;
if (start >= mapstart)
@@ -305,22 +313,36 @@ static void __init bootmem_init(void)
mapstart = max(reserved_end, start);
}
+ if (min_low_pfn >= max_low_pfn)
+ panic("Incorrect memory mapping !!!");
+ if (min_low_pfn > ARCH_PFN_OFFSET) {
+ printk(KERN_INFO
+ "Wasting %lu bytes for tracking %lu unused pages\n",
+ (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
+ min_low_pfn - ARCH_PFN_OFFSET);
+ } else if (min_low_pfn < ARCH_PFN_OFFSET) {
+ printk(KERN_INFO
+ "%lu free pages won't be used\n",
+ ARCH_PFN_OFFSET - min_low_pfn);
+ }
+ min_low_pfn = ARCH_PFN_OFFSET;
+
/*
* Determine low and high memory ranges
*/
- if (highest > PFN_DOWN(HIGHMEM_START)) {
+ if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
#ifdef CONFIG_HIGHMEM
highstart_pfn = PFN_DOWN(HIGHMEM_START);
- highend_pfn = highest;
+ highend_pfn = max_low_pfn;
#endif
- highest = PFN_DOWN(HIGHMEM_START);
+ max_low_pfn = PFN_DOWN(HIGHMEM_START);
}
/*
* Initialize the boot-time allocator with low memory only.
*/
- bootmap_size = init_bootmem(mapstart, highest);
-
+ bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
+ min_low_pfn, max_low_pfn);
/*
* Register fully available low RAM pages with the bootmem allocator.
*/
@@ -430,7 +452,7 @@ static void __init arch_mem_init(char **cmdline_p)
print_memory_map();
strlcpy(command_line, arcs_cmdline, sizeof(command_line));
- strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
+ strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
@@ -503,13 +525,21 @@ void __init setup_arch(char **cmdline_p)
{
cpu_probe();
prom_init();
+
+#ifdef CONFIG_EARLY_PRINTK
+ {
+ extern void setup_early_printk(void);
+
+ setup_early_printk();
+ }
+#endif
cpu_report();
#if defined(CONFIG_VT)
#if defined(CONFIG_VGA_CONSOLE)
- conswitchp = &vga_con;
+ conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
- conswitchp = &dummy_con;
+ conswitchp = &dummy_con;
#endif
#endif
@@ -521,7 +551,7 @@ void __init setup_arch(char **cmdline_p)
#endif
}
-int __init fpu_disable(char *s)
+static int __init fpu_disable(char *s)
{
int i;
@@ -533,7 +563,7 @@ int __init fpu_disable(char *s)
__setup("nofpu", fpu_disable);
-int __init dsp_disable(char *s)
+static int __init dsp_disable(char *s)
{
cpu_data[0].ases &= ~MIPS_ASE_DSP;
@@ -541,3 +571,6 @@ int __init dsp_disable(char *s)
}
__setup("nodsp", dsp_disable);
+
+unsigned long kernelsp[NR_CPUS];
+unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index b1f09d54ebe6..c0faabd52010 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -8,169 +8,39 @@
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
+#ifndef __SIGNAL_COMMON_H
+#define __SIGNAL_COMMON_H
-static inline int
-setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
-{
- int err = 0;
+/* #define DEBUG_SIG */
- err |= __put_user(regs->cp0_epc, &sc->sc_pc);
+#ifdef DEBUG_SIG
+# define DEBUGP(fmt, args...) printk("%s: " fmt, __FUNCTION__ , ##args)
+#else
+# define DEBUGP(fmt, args...)
+#endif
-#define save_gp_reg(i) do { \
- err |= __put_user(regs->regs[i], &sc->sc_regs[i]); \
-} while(0)
- __put_user(0, &sc->sc_regs[0]); save_gp_reg(1); save_gp_reg(2);
- save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); save_gp_reg(6);
- save_gp_reg(7); save_gp_reg(8); save_gp_reg(9); save_gp_reg(10);
- save_gp_reg(11); save_gp_reg(12); save_gp_reg(13); save_gp_reg(14);
- save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); save_gp_reg(18);
- save_gp_reg(19); save_gp_reg(20); save_gp_reg(21); save_gp_reg(22);
- save_gp_reg(23); save_gp_reg(24); save_gp_reg(25); save_gp_reg(26);
- save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); save_gp_reg(30);
- save_gp_reg(31);
-#undef save_gp_reg
-
- err |= __put_user(regs->hi, &sc->sc_mdhi);
- err |= __put_user(regs->lo, &sc->sc_mdlo);
- if (cpu_has_dsp) {
- err |= __put_user(mfhi1(), &sc->sc_hi1);
- err |= __put_user(mflo1(), &sc->sc_lo1);
- err |= __put_user(mfhi2(), &sc->sc_hi2);
- err |= __put_user(mflo2(), &sc->sc_lo2);
- err |= __put_user(mfhi3(), &sc->sc_hi3);
- err |= __put_user(mflo3(), &sc->sc_lo3);
- err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
- }
-
- err |= __put_user(!!used_math(), &sc->sc_used_math);
-
- if (!used_math())
- goto out;
-
- /*
- * Save FPU state to signal context. Signal handler will "inherit"
- * current FPU state.
- */
- preempt_disable();
-
- if (!is_fpu_owner()) {
- own_fpu();
- restore_fp(current);
- }
- err |= save_fp_context(sc);
-
- preempt_enable();
-
-out:
- return err;
-}
-
-static inline int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
-{
- unsigned int used_math;
- unsigned long treg;
- int err = 0;
-
- /* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
- err |= __get_user(regs->cp0_epc, &sc->sc_pc);
- err |= __get_user(regs->hi, &sc->sc_mdhi);
- err |= __get_user(regs->lo, &sc->sc_mdlo);
- if (cpu_has_dsp) {
- err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
- err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
- err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
- err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
- err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
- err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
- err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
- }
-
-#define restore_gp_reg(i) do { \
- err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \
-} while(0)
- restore_gp_reg( 1); restore_gp_reg( 2); restore_gp_reg( 3);
- restore_gp_reg( 4); restore_gp_reg( 5); restore_gp_reg( 6);
- restore_gp_reg( 7); restore_gp_reg( 8); restore_gp_reg( 9);
- restore_gp_reg(10); restore_gp_reg(11); restore_gp_reg(12);
- restore_gp_reg(13); restore_gp_reg(14); restore_gp_reg(15);
- restore_gp_reg(16); restore_gp_reg(17); restore_gp_reg(18);
- restore_gp_reg(19); restore_gp_reg(20); restore_gp_reg(21);
- restore_gp_reg(22); restore_gp_reg(23); restore_gp_reg(24);
- restore_gp_reg(25); restore_gp_reg(26); restore_gp_reg(27);
- restore_gp_reg(28); restore_gp_reg(29); restore_gp_reg(30);
- restore_gp_reg(31);
-#undef restore_gp_reg
-
- err |= __get_user(used_math, &sc->sc_used_math);
- conditional_used_math(used_math);
-
- preempt_disable();
-
- if (used_math()) {
- /* restore fpu context if we have used it before */
- own_fpu();
- err |= restore_fp_context(sc);
- } else {
- /* signal handler may have used FPU. Give it up. */
- lose_fpu();
- }
-
- preempt_enable();
-
- return err;
-}
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
/*
* Determine which stack to use..
*/
-static inline void __user *
-get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
-{
- unsigned long sp;
-
- /* Default to using normal stack */
- sp = regs->regs[29];
-
- /*
- * FPU emulator may have it's own trampoline active just
- * above the user stack, 16-bytes before the next lowest
- * 16 byte boundary. Try to avoid trashing it.
- */
- sp -= 32;
-
- /* This is the X/Open sanctioned signal stack switching. */
- if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0))
- sp = current->sas_ss_sp + current->sas_ss_size;
-
- return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK));
-}
-
-static inline int install_sigtramp(unsigned int __user *tramp,
- unsigned int syscall)
-{
- int err;
+extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+ size_t frame_size);
+/*
+ * install trampoline code to get back from the sig handler
+ */
+extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall);
- /*
- * Set up the return code ...
- *
- * li v0, __NR__foo_sigreturn
- * syscall
- */
+/* Check and clear pending FPU exceptions in saved CSR */
+extern int fpcsr_pending(unsigned int __user *fpcsr);
- err = __put_user(0x24020000 + syscall, tramp + 0);
- err |= __put_user(0x0000000c , tramp + 1);
- if (ICACHE_REFILLS_WORKAROUND_WAR) {
- err |= __put_user(0, tramp + 2);
- err |= __put_user(0, tramp + 3);
- err |= __put_user(0, tramp + 4);
- err |= __put_user(0, tramp + 5);
- err |= __put_user(0, tramp + 6);
- err |= __put_user(0, tramp + 7);
- }
- flush_cache_sigtramp((unsigned long) tramp);
+/* Make sure we will not lose FPU ownership */
+#ifdef CONFIG_PREEMPT
+#define lock_fpu_owner() preempt_disable()
+#define unlock_fpu_owner() preempt_enable()
+#else
+#define lock_fpu_owner() pagefault_disable()
+#define unlock_fpu_owner() pagefault_enable()
+#endif
- return err;
-}
+#endif /* __SIGNAL_COMMON_H */
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index b9d358e05214..07d67309451a 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -20,6 +20,7 @@
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/compiler.h>
+#include <linux/uaccess.h>
#include <asm/abi.h>
#include <asm/asm.h>
@@ -27,25 +28,266 @@
#include <asm/cacheflush.h>
#include <asm/fpu.h>
#include <asm/sim.h>
-#include <asm/uaccess.h>
#include <asm/ucontext.h>
#include <asm/cpu-features.h>
#include <asm/war.h>
#include "signal-common.h"
-#define DEBUG_SIG 0
+/*
+ * Horribly complicated - with the bloody RM9000 workarounds enabled
+ * the signal trampolines is moving to the end of the structure so we can
+ * increase the alignment without breaking software compatibility.
+ */
+#if ICACHE_REFILLS_WORKAROUND_WAR == 0
+
+struct sigframe {
+ u32 sf_ass[4]; /* argument save space for o32 */
+ u32 sf_code[2]; /* signal trampoline */
+ struct sigcontext sf_sc;
+ sigset_t sf_mask;
+};
+
+struct rt_sigframe {
+ u32 rs_ass[4]; /* argument save space for o32 */
+ u32 rs_code[2]; /* signal trampoline */
+ struct siginfo rs_info;
+ struct ucontext rs_uc;
+};
+
+#else
+
+struct sigframe {
+ u32 sf_ass[4]; /* argument save space for o32 */
+ u32 sf_pad[2];
+ struct sigcontext sf_sc; /* hw context */
+ sigset_t sf_mask;
+ u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */
+};
+
+struct rt_sigframe {
+ u32 rs_ass[4]; /* argument save space for o32 */
+ u32 rs_pad[2];
+ struct siginfo rs_info;
+ struct ucontext rs_uc;
+ u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */
+};
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+#endif
+
+/*
+ * Helper routines
+ */
+static int protected_save_fp_context(struct sigcontext __user *sc)
+{
+ int err;
+ while (1) {
+ lock_fpu_owner();
+ own_fpu_inatomic(1);
+ err = save_fp_context(sc); /* this might fail */
+ unlock_fpu_owner();
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __put_user(0, &sc->sc_fpregs[0]) |
+ __put_user(0, &sc->sc_fpregs[31]) |
+ __put_user(0, &sc->sc_fpc_csr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+ return err;
+}
+
+static int protected_restore_fp_context(struct sigcontext __user *sc)
+{
+ int err, tmp;
+ while (1) {
+ lock_fpu_owner();
+ own_fpu_inatomic(0);
+ err = restore_fp_context(sc); /* this might fail */
+ unlock_fpu_owner();
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __get_user(tmp, &sc->sc_fpregs[0]) |
+ __get_user(tmp, &sc->sc_fpregs[31]) |
+ __get_user(tmp, &sc->sc_fpc_csr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+ return err;
+}
+
+int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ int err = 0;
+ int i;
+ unsigned int used_math;
+
+ err |= __put_user(regs->cp0_epc, &sc->sc_pc);
+
+ err |= __put_user(0, &sc->sc_regs[0]);
+ for (i = 1; i < 32; i++)
+ err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ err |= __put_user(regs->acx, &sc->sc_acx);
+#endif
+ err |= __put_user(regs->hi, &sc->sc_mdhi);
+ err |= __put_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __put_user(mfhi1(), &sc->sc_hi1);
+ err |= __put_user(mflo1(), &sc->sc_lo1);
+ err |= __put_user(mfhi2(), &sc->sc_hi2);
+ err |= __put_user(mflo2(), &sc->sc_lo2);
+ err |= __put_user(mfhi3(), &sc->sc_hi3);
+ err |= __put_user(mflo3(), &sc->sc_lo3);
+ err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
+ }
+
+ used_math = !!used_math();
+ err |= __put_user(used_math, &sc->sc_used_math);
+
+ if (used_math) {
+ /*
+ * Save FPU state to signal context. Signal handler
+ * will "inherit" current FPU state.
+ */
+ err |= protected_save_fp_context(sc);
+ }
+ return err;
+}
+
+int fpcsr_pending(unsigned int __user *fpcsr)
+{
+ int err, sig = 0;
+ unsigned int csr, enabled;
+
+ err = __get_user(csr, fpcsr);
+ enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5);
+ /*
+ * If the signal handler set some FPU exceptions, clear it and
+ * send SIGFPE.
+ */
+ if (csr & enabled) {
+ csr &= ~enabled;
+ err |= __put_user(csr, fpcsr);
+ sig = SIGFPE;
+ }
+ return err ?: sig;
+}
+
+static int
+check_and_restore_fp_context(struct sigcontext __user *sc)
+{
+ int err, sig;
+
+ err = sig = fpcsr_pending(&sc->sc_fpc_csr);
+ if (err > 0)
+ err = 0;
+ err |= protected_restore_fp_context(sc);
+ return err ?: sig;
+}
+
+int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ unsigned int used_math;
+ unsigned long treg;
+ int err = 0;
+ int i;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ err |= __get_user(regs->cp0_epc, &sc->sc_pc);
+
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ err |= __get_user(regs->acx, &sc->sc_acx);
+#endif
+ err |= __get_user(regs->hi, &sc->sc_mdhi);
+ err |= __get_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
+ err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
+ err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
+ err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
+ }
+
+ for (i = 1; i < 32; i++)
+ err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
+
+ err |= __get_user(used_math, &sc->sc_used_math);
+ conditional_used_math(used_math);
+
+ if (used_math) {
+ /* restore fpu context if we have used it before */
+ if (!err)
+ err = check_and_restore_fp_context(sc);
+ } else {
+ /* signal handler may have used FPU. Give it up. */
+ lose_fpu(0);
+ }
+
+ return err;
+}
+
+void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+ size_t frame_size)
+{
+ unsigned long sp;
+
+ /* Default to using normal stack */
+ sp = regs->regs[29];
+
+ /*
+ * FPU emulator may have it's own trampoline active just
+ * above the user stack, 16-bytes before the next lowest
+ * 16 byte boundary. Try to avoid trashing it.
+ */
+ sp -= 32;
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+
+ return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK));
+}
+
+int install_sigtramp(unsigned int __user *tramp, unsigned int syscall)
+{
+ int err;
+
+ /*
+ * Set up the return code ...
+ *
+ * li v0, __NR__foo_sigreturn
+ * syscall
+ */
+
+ err = __put_user(0x24020000 + syscall, tramp + 0);
+ err |= __put_user(0x0000000c , tramp + 1);
+ if (ICACHE_REFILLS_WORKAROUND_WAR) {
+ err |= __put_user(0, tramp + 2);
+ err |= __put_user(0, tramp + 3);
+ err |= __put_user(0, tramp + 4);
+ err |= __put_user(0, tramp + 5);
+ err |= __put_user(0, tramp + 6);
+ err |= __put_user(0, tramp + 7);
+ }
+ flush_cache_sigtramp((unsigned long) tramp);
+
+ return err;
+}
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
#ifdef CONFIG_TRAD_SIGNALS
-save_static_function(sys_sigsuspend);
-__attribute_used__ noinline static int
-_sys_sigsuspend(nabi_no_regargs struct pt_regs regs)
+asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
sigset_t newset;
sigset_t __user *uset;
@@ -68,9 +310,7 @@ _sys_sigsuspend(nabi_no_regargs struct pt_regs regs)
}
#endif
-save_static_function(sys_rt_sigsuspend);
-__attribute_used__ noinline static int
-_sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
+asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
sigset_t newset;
sigset_t __user *unewset;
@@ -89,7 +329,7 @@ _sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
spin_lock_irq(&current->sighand->siglock);
current->saved_sigmask = current->blocked;
current->blocked = newset;
- recalc_sigpending();
+ recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
current->state = TASK_INTERRUPTIBLE;
@@ -124,7 +364,7 @@ asmlinkage int sys_sigaction(int sig, const struct sigaction __user *act,
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
- return -EFAULT;
+ return -EFAULT;
err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
@@ -148,48 +388,12 @@ asmlinkage int sys_sigaltstack(nabi_no_regargs struct pt_regs regs)
return do_sigaltstack(uss, uoss, usp);
}
-/*
- * Horribly complicated - with the bloody RM9000 workarounds enabled
- * the signal trampolines is moving to the end of the structure so we can
- * increase the alignment without breaking software compatibility.
- */
#ifdef CONFIG_TRAD_SIGNALS
-struct sigframe {
- u32 sf_ass[4]; /* argument save space for o32 */
-#if ICACHE_REFILLS_WORKAROUND_WAR
- u32 sf_pad[2];
-#else
- u32 sf_code[2]; /* signal trampoline */
-#endif
- struct sigcontext sf_sc;
- sigset_t sf_mask;
-#if ICACHE_REFILLS_WORKAROUND_WAR
- u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */
-#endif
-};
-#endif
-
-struct rt_sigframe {
- u32 rs_ass[4]; /* argument save space for o32 */
-#if ICACHE_REFILLS_WORKAROUND_WAR
- u32 rs_pad[2];
-#else
- u32 rs_code[2]; /* signal trampoline */
-#endif
- struct siginfo rs_info;
- struct ucontext rs_uc;
-#if ICACHE_REFILLS_WORKAROUND_WAR
- u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */
-#endif
-};
-
-#ifdef CONFIG_TRAD_SIGNALS
-save_static_function(sys_sigreturn);
-__attribute_used__ noinline static void
-_sys_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct sigframe __user *frame;
sigset_t blocked;
+ int sig;
frame = (struct sigframe __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@@ -203,8 +407,11 @@ _sys_sigreturn(nabi_no_regargs struct pt_regs regs)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- if (restore_sigcontext(&regs, &frame->sf_sc))
+ sig = restore_sigcontext(&regs, &frame->sf_sc);
+ if (sig < 0)
goto badframe;
+ else if (sig)
+ force_sig(sig, current);
/*
* Don't let your children do this ...
@@ -221,13 +428,12 @@ badframe:
}
#endif /* CONFIG_TRAD_SIGNALS */
-save_static_function(sys_rt_sigreturn);
-__attribute_used__ noinline static void
-_sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe __user *frame;
sigset_t set;
stack_t st;
+ int sig;
frame = (struct rt_sigframe __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@@ -241,8 +447,11 @@ _sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- if (restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext))
+ sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
+ if (sig < 0)
goto badframe;
+ else if (sig)
+ force_sig(sig, current);
if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
goto badframe;
@@ -265,7 +474,7 @@ badframe:
}
#ifdef CONFIG_TRAD_SIGNALS
-int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
+static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
int signr, sigset_t *set)
{
struct sigframe __user *frame;
@@ -275,7 +484,7 @@ int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
- install_sigtramp(frame->sf_code, __NR_sigreturn);
+ err |= install_sigtramp(frame->sf_code, __NR_sigreturn);
err |= setup_sigcontext(regs, &frame->sf_sc);
err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
@@ -299,12 +508,10 @@ int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
regs->regs[31] = (unsigned long) frame->sf_code;
regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
-#if DEBUG_SIG
- printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n",
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
- frame, regs->cp0_epc, frame->regs[31]);
-#endif
- return 0;
+ frame, regs->cp0_epc, regs->regs[31]);
+ return 0;
give_sigsegv:
force_sigsegv(signr, current);
@@ -312,7 +519,7 @@ give_sigsegv:
}
#endif
-int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
+static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
int signr, sigset_t *set, siginfo_t *info)
{
struct rt_sigframe __user *frame;
@@ -322,7 +529,7 @@ int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
- install_sigtramp(frame->rs_code, __NR_rt_sigreturn);
+ err |= install_sigtramp(frame->rs_code, __NR_rt_sigreturn);
/* Create siginfo. */
err |= copy_siginfo_to_user(&frame->rs_info, info);
@@ -359,11 +566,10 @@ int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
regs->regs[31] = (unsigned long) frame->rs_code;
regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
-#if DEBUG_SIG
- printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n",
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
-#endif
+
return 0;
give_sigsegv:
@@ -371,7 +577,15 @@ give_sigsegv:
return -EFAULT;
}
-static inline int handle_signal(unsigned long sig, siginfo_t *info,
+struct mips_abi mips_abi = {
+#ifdef CONFIG_TRAD_SIGNALS
+ .setup_frame = setup_frame,
+#endif
+ .setup_rt_frame = setup_rt_frame,
+ .restart = __NR_restart_syscall
+};
+
+static int handle_signal(unsigned long sig, siginfo_t *info,
struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs)
{
int ret;
@@ -409,7 +623,7 @@ static inline int handle_signal(unsigned long sig, siginfo_t *info,
return ret;
}
-void do_signal(struct pt_regs *regs)
+static void do_signal(struct pt_regs *regs)
{
struct k_sigaction ka;
sigset_t *oldset;
@@ -459,7 +673,7 @@ void do_signal(struct pt_regs *regs)
regs->cp0_epc -= 8;
}
if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
- regs->regs[2] = __NR_restart_syscall;
+ regs->regs[2] = current->thread.abi->restart;
regs->regs[7] = regs->regs[26];
regs->cp0_epc -= 4;
}
@@ -485,5 +699,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
{
/* deal with pending signal delivery */
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
- current->thread.abi->do_signal(regs);
+ do_signal(regs);
}
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index c86a5ddff050..b9a014411f83 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -8,6 +8,7 @@
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/cache.h>
+#include <linux/compat.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
@@ -21,18 +22,21 @@
#include <linux/compat.h>
#include <linux/suspend.h>
#include <linux/compiler.h>
+#include <linux/uaccess.h>
#include <asm/abi.h>
#include <asm/asm.h>
+#include <asm/compat-signal.h>
#include <linux/bitops.h>
#include <asm/cacheflush.h>
#include <asm/sim.h>
-#include <asm/uaccess.h>
#include <asm/ucontext.h>
#include <asm/system.h>
#include <asm/fpu.h>
#include <asm/war.h>
+#include "signal-common.h"
+
#define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3)
typedef struct compat_siginfo {
@@ -100,21 +104,10 @@ typedef struct compat_siginfo {
*/
#define __NR_O32_sigreturn 4119
#define __NR_O32_rt_sigreturn 4193
-#define __NR_O32_restart_syscall 4253
-
-#define DEBUG_SIG 0
-
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+#define __NR_O32_restart_syscall 4253
/* 32-bit compatibility types */
-#define _NSIG_BPW32 32
-#define _NSIG_WORDS32 (_NSIG / _NSIG_BPW32)
-
-typedef struct {
- unsigned int sig[_NSIG_WORDS32];
-} sigset_t32;
-
typedef unsigned int __sighandler32_t;
typedef void (*vfptr_t)(void);
@@ -136,9 +129,188 @@ struct ucontext32 {
s32 uc_link;
stack32_t uc_stack;
struct sigcontext32 uc_mcontext;
- sigset_t32 uc_sigmask; /* mask last for extensibility */
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+/*
+ * Horribly complicated - with the bloody RM9000 workarounds enabled
+ * the signal trampolines is moving to the end of the structure so we can
+ * increase the alignment without breaking software compatibility.
+ */
+#if ICACHE_REFILLS_WORKAROUND_WAR == 0
+
+struct sigframe32 {
+ u32 sf_ass[4]; /* argument save space for o32 */
+ u32 sf_code[2]; /* signal trampoline */
+ struct sigcontext32 sf_sc;
+ compat_sigset_t sf_mask;
+};
+
+struct rt_sigframe32 {
+ u32 rs_ass[4]; /* argument save space for o32 */
+ u32 rs_code[2]; /* signal trampoline */
+ compat_siginfo_t rs_info;
+ struct ucontext32 rs_uc;
+};
+
+#else /* ICACHE_REFILLS_WORKAROUND_WAR */
+
+struct sigframe32 {
+ u32 sf_ass[4]; /* argument save space for o32 */
+ u32 sf_pad[2];
+ struct sigcontext32 sf_sc; /* hw context */
+ compat_sigset_t sf_mask;
+ u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */
};
+struct rt_sigframe32 {
+ u32 rs_ass[4]; /* argument save space for o32 */
+ u32 rs_pad[2];
+ compat_siginfo_t rs_info;
+ struct ucontext32 rs_uc;
+ u32 rs_code[8] __attribute__((aligned(32))); /* signal trampoline */
+};
+
+#endif /* !ICACHE_REFILLS_WORKAROUND_WAR */
+
+/*
+ * sigcontext handlers
+ */
+static int protected_save_fp_context32(struct sigcontext32 __user *sc)
+{
+ int err;
+ while (1) {
+ lock_fpu_owner();
+ own_fpu_inatomic(1);
+ err = save_fp_context32(sc); /* this might fail */
+ unlock_fpu_owner();
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __put_user(0, &sc->sc_fpregs[0]) |
+ __put_user(0, &sc->sc_fpregs[31]) |
+ __put_user(0, &sc->sc_fpc_csr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+ return err;
+}
+
+static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
+{
+ int err, tmp;
+ while (1) {
+ lock_fpu_owner();
+ own_fpu_inatomic(0);
+ err = restore_fp_context32(sc); /* this might fail */
+ unlock_fpu_owner();
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __get_user(tmp, &sc->sc_fpregs[0]) |
+ __get_user(tmp, &sc->sc_fpregs[31]) |
+ __get_user(tmp, &sc->sc_fpc_csr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+ return err;
+}
+
+static int setup_sigcontext32(struct pt_regs *regs,
+ struct sigcontext32 __user *sc)
+{
+ int err = 0;
+ int i;
+ u32 used_math;
+
+ err |= __put_user(regs->cp0_epc, &sc->sc_pc);
+
+ err |= __put_user(0, &sc->sc_regs[0]);
+ for (i = 1; i < 32; i++)
+ err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+
+ err |= __put_user(regs->hi, &sc->sc_mdhi);
+ err |= __put_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
+ err |= __put_user(mfhi1(), &sc->sc_hi1);
+ err |= __put_user(mflo1(), &sc->sc_lo1);
+ err |= __put_user(mfhi2(), &sc->sc_hi2);
+ err |= __put_user(mflo2(), &sc->sc_lo2);
+ err |= __put_user(mfhi3(), &sc->sc_hi3);
+ err |= __put_user(mflo3(), &sc->sc_lo3);
+ }
+
+ used_math = !!used_math();
+ err |= __put_user(used_math, &sc->sc_used_math);
+
+ if (used_math) {
+ /*
+ * Save FPU state to signal context. Signal handler
+ * will "inherit" current FPU state.
+ */
+ err |= protected_save_fp_context32(sc);
+ }
+ return err;
+}
+
+static int
+check_and_restore_fp_context32(struct sigcontext32 __user *sc)
+{
+ int err, sig;
+
+ err = sig = fpcsr_pending(&sc->sc_fpc_csr);
+ if (err > 0)
+ err = 0;
+ err |= protected_restore_fp_context32(sc);
+ return err ?: sig;
+}
+
+static int restore_sigcontext32(struct pt_regs *regs,
+ struct sigcontext32 __user *sc)
+{
+ u32 used_math;
+ int err = 0;
+ s32 treg;
+ int i;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ err |= __get_user(regs->cp0_epc, &sc->sc_pc);
+ err |= __get_user(regs->hi, &sc->sc_mdhi);
+ err |= __get_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
+ err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
+ err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
+ err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
+ }
+
+ for (i = 1; i < 32; i++)
+ err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
+
+ err |= __get_user(used_math, &sc->sc_used_math);
+ conditional_used_math(used_math);
+
+ if (used_math) {
+ /* restore fpu context if we have used it before */
+ if (!err)
+ err = check_and_restore_fp_context32(sc);
+ } else {
+ /* signal handler may have used FPU. Give it up. */
+ lose_fpu(0);
+ }
+
+ return err;
+}
+
+/*
+ *
+ */
extern void __put_sigset_unknown_nsig(void);
extern void __get_sigset_unknown_nsig(void);
@@ -191,9 +363,7 @@ static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t __user *ubuf)
* Atomically swap in the new signal mask, and wait for a signal.
*/
-save_static_function(sys32_sigsuspend);
-__attribute_used__ noinline static int
-_sys32_sigsuspend(nabi_no_regargs struct pt_regs regs)
+asmlinkage int sys32_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
compat_sigset_t __user *uset;
sigset_t newset;
@@ -215,9 +385,7 @@ _sys32_sigsuspend(nabi_no_regargs struct pt_regs regs)
return -ERESTARTNOHAND;
}
-save_static_function(sys32_rt_sigsuspend);
-__attribute_used__ noinline static int
-_sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
+asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
compat_sigset_t __user *uset;
sigset_t newset;
@@ -326,91 +494,6 @@ asmlinkage int sys32_sigaltstack(nabi_no_regargs struct pt_regs regs)
return ret;
}
-static int restore_sigcontext32(struct pt_regs *regs, struct sigcontext32 __user *sc)
-{
- u32 used_math;
- int err = 0;
- s32 treg;
-
- /* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
- err |= __get_user(regs->cp0_epc, &sc->sc_pc);
- err |= __get_user(regs->hi, &sc->sc_mdhi);
- err |= __get_user(regs->lo, &sc->sc_mdlo);
- if (cpu_has_dsp) {
- err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
- err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
- err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
- err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
- err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
- err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
- err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
- }
-
-#define restore_gp_reg(i) do { \
- err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \
-} while(0)
- restore_gp_reg( 1); restore_gp_reg( 2); restore_gp_reg( 3);
- restore_gp_reg( 4); restore_gp_reg( 5); restore_gp_reg( 6);
- restore_gp_reg( 7); restore_gp_reg( 8); restore_gp_reg( 9);
- restore_gp_reg(10); restore_gp_reg(11); restore_gp_reg(12);
- restore_gp_reg(13); restore_gp_reg(14); restore_gp_reg(15);
- restore_gp_reg(16); restore_gp_reg(17); restore_gp_reg(18);
- restore_gp_reg(19); restore_gp_reg(20); restore_gp_reg(21);
- restore_gp_reg(22); restore_gp_reg(23); restore_gp_reg(24);
- restore_gp_reg(25); restore_gp_reg(26); restore_gp_reg(27);
- restore_gp_reg(28); restore_gp_reg(29); restore_gp_reg(30);
- restore_gp_reg(31);
-#undef restore_gp_reg
-
- err |= __get_user(used_math, &sc->sc_used_math);
- conditional_used_math(used_math);
-
- preempt_disable();
-
- if (used_math()) {
- /* restore fpu context if we have used it before */
- own_fpu();
- err |= restore_fp_context32(sc);
- } else {
- /* signal handler may have used FPU. Give it up. */
- lose_fpu();
- }
-
- preempt_enable();
-
- return err;
-}
-
-struct sigframe {
- u32 sf_ass[4]; /* argument save space for o32 */
-#if ICACHE_REFILLS_WORKAROUND_WAR
- u32 sf_pad[2];
-#else
- u32 sf_code[2]; /* signal trampoline */
-#endif
- struct sigcontext32 sf_sc;
- sigset_t sf_mask;
-#if ICACHE_REFILLS_WORKAROUND_WAR
- u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */
-#endif
-};
-
-struct rt_sigframe32 {
- u32 rs_ass[4]; /* argument save space for o32 */
-#if ICACHE_REFILLS_WORKAROUND_WAR
- u32 rs_pad[2];
-#else
- u32 rs_code[2]; /* signal trampoline */
-#endif
- compat_siginfo_t rs_info;
- struct ucontext32 rs_uc;
-#if ICACHE_REFILLS_WORKAROUND_WAR
- u32 rs_code[8] __attribute__((aligned(32))); /* signal trampoline */
-#endif
-};
-
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
{
int err;
@@ -463,17 +546,16 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
return err;
}
-save_static_function(sys32_sigreturn);
-__attribute_used__ noinline static void
-_sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
{
- struct sigframe __user *frame;
+ struct sigframe32 __user *frame;
sigset_t blocked;
+ int sig;
- frame = (struct sigframe __user *) regs.regs[29];
+ frame = (struct sigframe32 __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
- if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
+ if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
goto badframe;
sigdelsetmask(&blocked, ~_BLOCKABLE);
@@ -482,8 +564,11 @@ _sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- if (restore_sigcontext32(&regs, &frame->sf_sc))
+ sig = restore_sigcontext32(&regs, &frame->sf_sc);
+ if (sig < 0)
goto badframe;
+ else if (sig)
+ force_sig(sig, current);
/*
* Don't let your children do this ...
@@ -499,20 +584,19 @@ badframe:
force_sig(SIGSEGV, current);
}
-save_static_function(sys32_rt_sigreturn);
-__attribute_used__ noinline static void
-_sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe32 __user *frame;
mm_segment_t old_fs;
sigset_t set;
stack_t st;
s32 sp;
+ int sig;
frame = (struct rt_sigframe32 __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
- if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
+ if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
@@ -521,8 +605,11 @@ _sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- if (restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext))
+ sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
+ if (sig < 0)
goto badframe;
+ else if (sig)
+ force_sig(sig, current);
/* The ucontext contains a stack32_t, so we must convert! */
if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp))
@@ -554,111 +641,21 @@ badframe:
force_sig(SIGSEGV, current);
}
-static inline int setup_sigcontext32(struct pt_regs *regs,
- struct sigcontext32 __user *sc)
-{
- int err = 0;
-
- err |= __put_user(regs->cp0_epc, &sc->sc_pc);
- err |= __put_user(regs->cp0_status, &sc->sc_status);
-
-#define save_gp_reg(i) { \
- err |= __put_user(regs->regs[i], &sc->sc_regs[i]); \
-} while(0)
- __put_user(0, &sc->sc_regs[0]); save_gp_reg(1); save_gp_reg(2);
- save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); save_gp_reg(6);
- save_gp_reg(7); save_gp_reg(8); save_gp_reg(9); save_gp_reg(10);
- save_gp_reg(11); save_gp_reg(12); save_gp_reg(13); save_gp_reg(14);
- save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); save_gp_reg(18);
- save_gp_reg(19); save_gp_reg(20); save_gp_reg(21); save_gp_reg(22);
- save_gp_reg(23); save_gp_reg(24); save_gp_reg(25); save_gp_reg(26);
- save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); save_gp_reg(30);
- save_gp_reg(31);
-#undef save_gp_reg
-
- err |= __put_user(regs->hi, &sc->sc_mdhi);
- err |= __put_user(regs->lo, &sc->sc_mdlo);
- if (cpu_has_dsp) {
- err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
- err |= __put_user(mfhi1(), &sc->sc_hi1);
- err |= __put_user(mflo1(), &sc->sc_lo1);
- err |= __put_user(mfhi2(), &sc->sc_hi2);
- err |= __put_user(mflo2(), &sc->sc_lo2);
- err |= __put_user(mfhi3(), &sc->sc_hi3);
- err |= __put_user(mflo3(), &sc->sc_lo3);
- }
-
- err |= __put_user(!!used_math(), &sc->sc_used_math);
-
- if (!used_math())
- goto out;
-
- /*
- * Save FPU state to signal context. Signal handler will "inherit"
- * current FPU state.
- */
- preempt_disable();
-
- if (!is_fpu_owner()) {
- own_fpu();
- restore_fp(current);
- }
- err |= save_fp_context32(sc);
-
- preempt_enable();
-
-out:
- return err;
-}
-
-/*
- * Determine which stack to use..
- */
-static inline void __user *get_sigframe(struct k_sigaction *ka,
- struct pt_regs *regs,
- size_t frame_size)
-{
- unsigned long sp;
-
- /* Default to using normal stack */
- sp = regs->regs[29];
-
- /*
- * FPU emulator may have it's own trampoline active just
- * above the user stack, 16-bytes before the next lowest
- * 16 byte boundary. Try to avoid trashing it.
- */
- sp -= 32;
-
- /* This is the X/Open sanctioned signal stack switching. */
- if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0))
- sp = current->sas_ss_sp + current->sas_ss_size;
-
- return (void __user *)((sp - frame_size) & ALMASK);
-}
-
-int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
+static int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
int signr, sigset_t *set)
{
- struct sigframe __user *frame;
+ struct sigframe32 __user *frame;
int err = 0;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
- /*
- * Set up the return code ...
- *
- * li v0, __NR_O32_sigreturn
- * syscall
- */
- err |= __put_user(0x24020000 + __NR_O32_sigreturn, frame->sf_code + 0);
- err |= __put_user(0x0000000c , frame->sf_code + 1);
- flush_cache_sigtramp((unsigned long) frame->sf_code);
+ err |= install_sigtramp(frame->sf_code, __NR_O32_sigreturn);
err |= setup_sigcontext32(regs, &frame->sf_sc);
- err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
+ err |= __copy_conv_sigset_to_user(&frame->sf_mask, set);
+
if (err)
goto give_sigsegv;
@@ -679,11 +676,10 @@ int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
regs->regs[31] = (unsigned long) frame->sf_code;
regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
-#if DEBUG_SIG
- printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n",
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
- frame, regs->cp0_epc, frame->sf_code);
-#endif
+ frame, regs->cp0_epc, regs->regs[31]);
+
return 0;
give_sigsegv:
@@ -691,7 +687,7 @@ give_sigsegv:
return -EFAULT;
}
-int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
+static int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
int signr, sigset_t *set, siginfo_t *info)
{
struct rt_sigframe32 __user *frame;
@@ -702,17 +698,7 @@ int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
- /* Set up to return from userspace. If provided, use a stub already
- in userspace. */
- /*
- * Set up the return code ...
- *
- * li v0, __NR_O32_rt_sigreturn
- * syscall
- */
- err |= __put_user(0x24020000 + __NR_O32_rt_sigreturn, frame->rs_code + 0);
- err |= __put_user(0x0000000c , frame->rs_code + 1);
- flush_cache_sigtramp((unsigned long) frame->rs_code);
+ err |= install_sigtramp(frame->rs_code, __NR_O32_rt_sigreturn);
/* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
err |= copy_siginfo_to_user32(&frame->rs_info, info);
@@ -728,7 +714,7 @@ int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
err |= __put_user(current->sas_ss_size,
&frame->rs_uc.uc_stack.ss_size);
err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
- err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
+ err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
if (err)
goto give_sigsegv;
@@ -750,11 +736,10 @@ int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
regs->regs[31] = (unsigned long) frame->rs_code;
regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
-#if DEBUG_SIG
- printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n",
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
- frame, regs->cp0_epc, frame->rs_code);
-#endif
+ frame, regs->cp0_epc, regs->regs[31]);
+
return 0;
give_sigsegv:
@@ -762,110 +747,14 @@ give_sigsegv:
return -EFAULT;
}
-static inline int handle_signal(unsigned long sig, siginfo_t *info,
- struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs)
-{
- int ret;
-
- switch (regs->regs[0]) {
- case ERESTART_RESTARTBLOCK:
- case ERESTARTNOHAND:
- regs->regs[2] = EINTR;
- break;
- case ERESTARTSYS:
- if (!(ka->sa.sa_flags & SA_RESTART)) {
- regs->regs[2] = EINTR;
- break;
- }
- /* fallthrough */
- case ERESTARTNOINTR: /* Userland will reload $v0. */
- regs->regs[7] = regs->regs[26];
- regs->cp0_epc -= 8;
- }
-
- regs->regs[0] = 0; /* Don't deal with this again. */
-
- if (ka->sa.sa_flags & SA_SIGINFO)
- ret = current->thread.abi->setup_rt_frame(ka, regs, sig, oldset, info);
- else
- ret = current->thread.abi->setup_frame(ka, regs, sig, oldset);
-
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- return ret;
-}
-
-void do_signal32(struct pt_regs *regs)
-{
- struct k_sigaction ka;
- sigset_t *oldset;
- siginfo_t info;
- int signr;
-
- /*
- * We want the common case to go fast, which is why we may in certain
- * cases get here from kernel mode. Just return without doing anything
- * if so.
- */
- if (!user_mode(regs))
- return;
-
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- oldset = &current->saved_sigmask;
- else
- oldset = &current->blocked;
-
- signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- if (signr > 0) {
- /* Whee! Actually deliver the signal. */
- if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
- /*
- * A signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TIF_RESTORE_SIGMASK flag.
- */
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- }
-
- return;
- }
-
- /*
- * Who's code doesn't conform to the restartable syscall convention
- * dies here!!! The li instruction, a single machine instruction,
- * must directly be followed by the syscall instruction.
- */
- if (regs->regs[0]) {
- if (regs->regs[2] == ERESTARTNOHAND ||
- regs->regs[2] == ERESTARTSYS ||
- regs->regs[2] == ERESTARTNOINTR) {
- regs->regs[7] = regs->regs[26];
- regs->cp0_epc -= 8;
- }
- if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
- regs->regs[2] = __NR_O32_restart_syscall;
- regs->regs[7] = regs->regs[26];
- regs->cp0_epc -= 4;
- }
- regs->regs[0] = 0; /* Don't deal with this again. */
- }
-
- /*
- * If there's no signal to deliver, we just put the saved sigmask
- * back
- */
- if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
- }
-}
+/*
+ * o32 compatibility on 64-bit kernels, without DSP ASE
+ */
+struct mips_abi mips_abi_32 = {
+ .setup_frame = setup_frame_32,
+ .setup_rt_frame = setup_rt_frame_32,
+ .restart = __NR_O32_restart_syscall
+};
asmlinkage int sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
struct sigaction32 __user *oact,
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index a67c18555ed3..a9202fa95987 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -29,8 +29,10 @@
#include <linux/compat.h>
#include <linux/bitops.h>
+#include <asm/abi.h>
#include <asm/asm.h>
#include <asm/cacheflush.h>
+#include <asm/compat-signal.h>
#include <asm/sim.h>
#include <asm/uaccess.h>
#include <asm/ucontext.h>
@@ -47,9 +49,9 @@
#define __NR_N32_rt_sigreturn 6211
#define __NR_N32_restart_syscall 6214
-#define DEBUG_SIG 0
+extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *);
+extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
/* IRIX compatible stack_t */
typedef struct sigaltstack32 {
@@ -63,28 +65,33 @@ struct ucontextn32 {
s32 uc_link;
stack32_t uc_stack;
struct sigcontext uc_mcontext;
- sigset_t uc_sigmask; /* mask last for extensibility */
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
+#if ICACHE_REFILLS_WORKAROUND_WAR == 0
+
struct rt_sigframe_n32 {
u32 rs_ass[4]; /* argument save space for o32 */
-#if ICACHE_REFILLS_WORKAROUND_WAR
- u32 rs_pad[2];
-#else
u32 rs_code[2]; /* signal trampoline */
-#endif
struct siginfo rs_info;
struct ucontextn32 rs_uc;
-#if ICACHE_REFILLS_WORKAROUND_WAR
+};
+
+#else /* ICACHE_REFILLS_WORKAROUND_WAR */
+
+struct rt_sigframe_n32 {
+ u32 rs_ass[4]; /* argument save space for o32 */
+ u32 rs_pad[2];
+ struct siginfo rs_info;
+ struct ucontextn32 rs_uc;
u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */
-#endif
};
+#endif /* !ICACHE_REFILLS_WORKAROUND_WAR */
+
extern void sigset_from_compat (sigset_t *set, compat_sigset_t *compat);
-save_static_function(sysn32_rt_sigsuspend);
-__attribute_used__ noinline static int
-_sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
+asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
compat_sigset_t __user *unewset;
compat_sigset_t uset;
@@ -105,7 +112,7 @@ _sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
spin_lock_irq(&current->sighand->siglock);
current->saved_sigmask = current->blocked;
current->blocked = newset;
- recalc_sigpending();
+ recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
current->state = TASK_INTERRUPTIBLE;
@@ -114,19 +121,18 @@ _sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
return -ERESTARTNOHAND;
}
-save_static_function(sysn32_rt_sigreturn);
-__attribute_used__ noinline static void
-_sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe_n32 __user *frame;
sigset_t set;
stack_t st;
s32 sp;
+ int sig;
frame = (struct rt_sigframe_n32 __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
- if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
+ if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
@@ -135,8 +141,11 @@ _sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- if (restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext))
+ sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
+ if (sig < 0)
goto badframe;
+ else if (sig)
+ force_sig(sig, current);
/* The ucontext contains a stack32_t, so we must convert! */
if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp))
@@ -165,7 +174,7 @@ badframe:
force_sig(SIGSEGV, current);
}
-int setup_rt_frame_n32(struct k_sigaction * ka,
+static int setup_rt_frame_n32(struct k_sigaction * ka,
struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info)
{
struct rt_sigframe_n32 __user *frame;
@@ -184,7 +193,7 @@ int setup_rt_frame_n32(struct k_sigaction * ka,
/* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(0, &frame->rs_uc.uc_link);
- sp = (int) (long) current->sas_ss_sp;
+ sp = (int) (long) current->sas_ss_sp;
err |= __put_user(sp,
&frame->rs_uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->regs[29]),
@@ -192,7 +201,7 @@ int setup_rt_frame_n32(struct k_sigaction * ka,
err |= __put_user(current->sas_ss_size,
&frame->rs_uc.uc_stack.ss_size);
err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
- err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
+ err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
if (err)
goto give_sigsegv;
@@ -214,14 +223,18 @@ int setup_rt_frame_n32(struct k_sigaction * ka,
regs->regs[31] = (unsigned long) frame->rs_code;
regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
-#if DEBUG_SIG
- printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n",
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
-#endif
+
return 0;
give_sigsegv:
force_sigsegv(signr, current);
return -EFAULT;
}
+
+struct mips_abi mips_abi_n32 = {
+ .setup_rt_frame = setup_rt_frame_n32,
+ .restart = __NR_N32_restart_syscall
+};
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 1ee689c0e0c9..64b62bdfb4f6 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -35,7 +35,6 @@
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
-#include <asm/mips-boards/maltaint.h> /* This is f*cking wrong */
#define MIPS_CPU_IPI_RESCHED_IRQ 0
#define MIPS_CPU_IPI_CALL_IRQ 1
@@ -108,12 +107,12 @@ void __init sanitize_tlb_entries(void)
static void ipi_resched_dispatch(void)
{
- do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
+ do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
}
static void ipi_call_dispatch(void)
{
- do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ);
+ do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
}
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
@@ -270,8 +269,8 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
}
- cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
- cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ;
+ cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
+ cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
setup_irq(cpu_ipi_resched_irq, &irq_resched);
setup_irq(cpu_ipi_call_irq, &irq_call);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 0555fc554f65..c46e479c992b 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -51,31 +51,14 @@ int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
EXPORT_SYMBOL(phys_cpu_present_map);
EXPORT_SYMBOL(cpu_online_map);
+/* This happens early in bootup, can't really do it better */
static void smp_tune_scheduling (void)
{
struct cache_desc *cd = &current_cpu_data.scache;
- unsigned long cachesize; /* kB */
- unsigned long cpu_khz;
+ unsigned long cachesize = cd->linesz * cd->sets * cd->ways;
- /*
- * Crude estimate until we actually meassure ...
- */
- cpu_khz = loops_per_jiffy * 2 * HZ / 1000;
-
- /*
- * Rough estimation for SMP scheduling, this is the number of
- * cycles it takes for a fully memory-limited process to flush
- * the SMP-local cache.
- *
- * (For a P5 this pretty much means we will choose another idle
- * CPU almost always at wakeup time (this is due to the small
- * L1 cache), on PIIs it's around 50-100 usecs, depending on
- * the cache size)
- */
- if (!cpu_khz)
- return;
-
- cachesize = cd->linesz * cd->sets * cd->ways;
+ if (cachesize > max_cache_size)
+ max_cache_size = cachesize;
}
extern void __init calibrate_delay(void);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 6a857bf030b0..5dcfab6b288e 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -4,6 +4,7 @@
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <asm/cpu.h>
@@ -14,6 +15,7 @@
#include <asm/hazards.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
+#include <asm/mips-boards/maltaint.h>
#include <asm/mipsregs.h>
#include <asm/cacheflush.h>
#include <asm/time.h>
@@ -26,16 +28,6 @@
* This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
*/
-/*
- * MIPSCPU_INT_BASE is identically defined in both
- * asm-mips/mips-boards/maltaint.h and asm-mips/mips-boards/simint.h,
- * but as yet there's no properly organized include structure that
- * will ensure that the right *int.h file will be included for a
- * given platform build.
- */
-
-#define MIPSCPU_INT_BASE 16
-
#define MIPS_CPU_IPI_IRQ 1
#define LOCK_MT_PRA() \
@@ -77,15 +69,15 @@ unsigned int ipi_timer_latch[NR_CPUS];
#define IPIBUF_PER_CPU 4
-struct smtc_ipi_q IPIQ[NR_CPUS];
-struct smtc_ipi_q freeIPIq;
+static struct smtc_ipi_q IPIQ[NR_CPUS];
+static struct smtc_ipi_q freeIPIq;
/* Forward declarations */
void ipi_decode(struct smtc_ipi *);
-void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
-void setup_cross_vpe_interrupts(void);
+static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
+static void setup_cross_vpe_interrupts(unsigned int nvpe);
void init_smtc_stats(void);
/* Global SMTC Status */
@@ -151,10 +143,7 @@ __setup("ipibufs=", ipibufs);
__setup("nostlb", stlb_disable);
__setup("asidmask=", asidmask_set);
-/* Enable additional debug checks before going into CPU idle loop */
-#define SMTC_IDLE_HOOK_DEBUG
-
-#ifdef SMTC_IDLE_HOOK_DEBUG
+#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
static int hang_trig = 0;
@@ -181,12 +170,15 @@ __setup("tintq=", tintq);
int imstuckcount[2][8];
/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
-int vpemask[2][8] = {{0,1,1,0,0,0,0,1},{0,1,0,0,0,0,0,1}};
+int vpemask[2][8] = {
+ {0, 0, 1, 0, 0, 0, 0, 1},
+ {0, 0, 0, 0, 0, 0, 0, 1}
+};
int tcnoprog[NR_CPUS];
static atomic_t idle_hook_initialized = {0};
static int clock_hang_reported[NR_CPUS];
-#endif /* SMTC_IDLE_HOOK_DEBUG */
+#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
/* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
@@ -200,7 +192,7 @@ void __init sanitize_tlb_entries(void)
* Configure shared TLB - VPC configuration bit must be set by caller
*/
-void smtc_configure_tlb(void)
+static void smtc_configure_tlb(void)
{
int i,tlbsiz,vpes;
unsigned long mvpconf0;
@@ -404,10 +396,10 @@ void mipsmt_prepare_cpus(void)
printk("ASID mask value override to 0x%x\n", asidmask);
/* Temporary */
-#ifdef SMTC_IDLE_HOOK_DEBUG
+#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
if (hang_trig)
printk("Logic Analyser Trigger on suspected TC hang\n");
-#endif /* SMTC_IDLE_HOOK_DEBUG */
+#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
/* Put MVPE's into 'configuration state' */
write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
@@ -514,8 +506,7 @@ void mipsmt_prepare_cpus(void)
/* If we have multiple VPEs running, set up the cross-VPE interrupt */
- if (nvpe > 1)
- setup_cross_vpe_interrupts();
+ setup_cross_vpe_interrupts(nvpe);
/* Set up queue of free IPI "messages". */
nipi = NR_CPUS * IPIBUF_PER_CPU;
@@ -620,7 +611,12 @@ void smtc_cpus_done(void)
int setup_irq_smtc(unsigned int irq, struct irqaction * new,
unsigned long hwmask)
{
+ unsigned int vpe = current_cpu_data.vpe_id;
+
irq_hwmask[irq] = hwmask;
+#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
+ vpemask[vpe][irq - MIPSCPU_INT_BASE] = 1;
+#endif
return setup_irq(irq, new);
}
@@ -648,7 +644,7 @@ int setup_irq_smtc(unsigned int irq, struct irqaction * new,
* the VPE.
*/
-void smtc_ipi_qdump(void)
+static void smtc_ipi_qdump(void)
{
int i;
@@ -686,28 +682,6 @@ static __inline__ int atomic_postincrement(unsigned int *pv)
return result;
}
-/* No longer used in IPI dispatch, but retained for future recycling */
-
-static __inline__ int atomic_postclear(unsigned int *pv)
-{
- unsigned long result;
-
- unsigned long temp;
-
- __asm__ __volatile__(
- "1: ll %0, %2 \n"
- " or %1, $0, $0 \n"
- " sc %1, %2 \n"
- " beqz %1, 1b \n"
- " sync \n"
- : "=&r" (result), "=&r" (temp), "=m" (*pv)
- : "m" (*pv)
- : "memory");
-
- return result;
-}
-
-
void smtc_send_ipi(int cpu, int type, unsigned int action)
{
int tcstatus;
@@ -781,7 +755,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
/*
* Send IPI message to Halted TC, TargTC/TargVPE already having been set
*/
-void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
+static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
{
struct pt_regs *kstack;
unsigned long tcstatus;
@@ -847,12 +821,15 @@ void ipi_decode(struct smtc_ipi *pipi)
smtc_ipi_nq(&freeIPIq, pipi);
switch (type_copy) {
case SMTC_CLOCK_TICK:
+ irq_enter();
+ kstat_this_cpu.irqs[MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR]++;
/* Invoke Clock "Interrupt" */
ipi_timer_latch[dest_copy] = 0;
-#ifdef SMTC_IDLE_HOOK_DEBUG
+#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
clock_hang_reported[dest_copy] = 0;
-#endif /* SMTC_IDLE_HOOK_DEBUG */
+#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
local_timer_interrupt(0, NULL);
+ irq_exit();
break;
case LINUX_SMP_IPI:
switch ((int)arg_copy) {
@@ -921,7 +898,7 @@ void smtc_timer_broadcast(int vpe)
* interrupts.
*/
-static int cpu_ipi_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_IRQ;
+static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
{
@@ -1000,8 +977,11 @@ static void ipi_irq_dispatch(void)
static struct irqaction irq_ipi;
-void setup_cross_vpe_interrupts(void)
+static void setup_cross_vpe_interrupts(unsigned int nvpe)
{
+ if (nvpe < 1)
+ return;
+
if (!cpu_has_vint)
panic("SMTC Kernel requires Vectored Interupt support");
@@ -1019,10 +999,17 @@ void setup_cross_vpe_interrupts(void)
/*
* SMTC-specific hacks invoked from elsewhere in the kernel.
+ *
+ * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
+ * called with interrupts disabled. We do rely on interrupts being disabled
+ * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
+ * result in a recursive call to raw_local_irq_restore().
*/
-void smtc_ipi_replay(void)
+static void __smtc_ipi_replay(void)
{
+ unsigned int cpu = smp_processor_id();
+
/*
* To the extent that we've ever turned interrupts off,
* we may have accumulated deferred IPIs. This is subtle.
@@ -1037,22 +1024,35 @@ void smtc_ipi_replay(void)
* is clear, and we'll handle it as a real pseudo-interrupt
* and not a pseudo-pseudo interrupt.
*/
- if (IPIQ[smp_processor_id()].depth > 0) {
- struct smtc_ipi *pipi;
- extern void self_ipi(struct smtc_ipi *);
+ if (IPIQ[cpu].depth > 0) {
+ while (1) {
+ struct smtc_ipi_q *q = &IPIQ[cpu];
+ struct smtc_ipi *pipi;
+ extern void self_ipi(struct smtc_ipi *);
+
+ spin_lock(&q->lock);
+ pipi = __smtc_ipi_dq(q);
+ spin_unlock(&q->lock);
+ if (!pipi)
+ break;
- while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) {
self_ipi(pipi);
- smtc_cpu_stats[smp_processor_id()].selfipis++;
+ smtc_cpu_stats[cpu].selfipis++;
}
}
}
+void smtc_ipi_replay(void)
+{
+ raw_local_irq_disable();
+ __smtc_ipi_replay();
+}
+
EXPORT_SYMBOL(smtc_ipi_replay);
void smtc_idle_loop_hook(void)
{
-#ifdef SMTC_IDLE_HOOK_DEBUG
+#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
int im;
int flags;
int mtflags;
@@ -1145,14 +1145,20 @@ void smtc_idle_loop_hook(void)
local_irq_restore(flags);
if (pdb_msg != &id_ho_db_msg[0])
printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
-#endif /* SMTC_IDLE_HOOK_DEBUG */
+#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
/*
* Replay any accumulated deferred IPIs. If "Instant Replay"
* is in use, there should never be any.
*/
#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
- smtc_ipi_replay();
+ {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __smtc_ipi_replay();
+ local_irq_restore(flags);
+ }
#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
}
@@ -1191,7 +1197,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
* It would be nice to be able to use a spinlock here,
* but this is invoked from within TLB flush routines
* that protect themselves with DVPE, so if a lock is
- * held by another TC, it'll never be freed.
+ * held by another TC, it'll never be freed.
*
* DVPE/DMT must not be done with interrupts enabled,
* so even so most callers will already have disabled
@@ -1296,7 +1302,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
* Support for single-threading cache flush operations.
*/
-int halt_state_save[NR_CPUS];
+static int halt_state_save[NR_CPUS];
/*
* To really, really be sure that nothing is being done
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 6c2406a93f2b..93a148486f88 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -669,7 +669,7 @@ asmlinkage int irix_mount(char __user *dev_name, char __user *dir_name,
struct irix_statfs {
short f_type;
- long f_bsize, f_frsize, f_blocks, f_bfree, f_files, f_ffree;
+ long f_bsize, f_frsize, f_blocks, f_bfree, f_files, f_ffree;
char f_fname[6], f_fpack[6];
};
@@ -959,7 +959,7 @@ static inline loff_t llseek(struct file *file, loff_t offset, int origin)
fn = default_llseek;
if (file->f_op && file->f_op->llseek)
- fn = file->f_op->llseek;
+ fn = file->f_op->llseek;
lock_kernel();
retval = fn(file, offset, origin);
unlock_kernel();
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 8aa544f73a5e..e5e56bd498db 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -307,7 +307,7 @@ static unsigned int __init calibrate_hpt(void)
struct clocksource clocksource_mips = {
.name = "MIPS",
.mask = 0xffffffff,
- .is_continuous = 1,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void __init init_mips_clocksource(void)
@@ -455,8 +455,3 @@ EXPORT_SYMBOL(rtc_lock);
EXPORT_SYMBOL(to_tm);
EXPORT_SYMBOL(rtc_mips_set_time);
EXPORT_SYMBOL(rtc_mips_get_time);
-
-unsigned long long sched_clock(void)
-{
- return (unsigned long long)jiffies*(1000000000/HZ);
-}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 2a932cada244..493cb29b8a42 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -229,6 +229,9 @@ void show_regs(struct pt_regs *regs)
printk("\n");
}
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ printk("Acx : %0*lx\n", field, regs->acx);
+#endif
printk("Hi : %0*lx\n", field, regs->hi);
printk("Lo : %0*lx\n", field, regs->lo);
@@ -340,13 +343,9 @@ NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs)
extern const struct exception_table_entry __start___dbe_table[];
extern const struct exception_table_entry __stop___dbe_table[];
-void __declare_dbe_table(void)
-{
- __asm__ __volatile__(
- ".section\t__dbe_table,\"a\"\n\t"
- ".previous"
- );
-}
+__asm__(
+" .section __dbe_table, \"a\"\n"
+" .previous \n");
/* Given an address, look for it in the exception tables. */
static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
@@ -611,16 +610,6 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
if (fcr31 & FPU_CSR_UNI_X) {
int sig;
- preempt_disable();
-
-#ifdef CONFIG_PREEMPT
- if (!is_fpu_owner()) {
- /* We might lose fpu before disabling preempt... */
- own_fpu();
- BUG_ON(!used_math());
- restore_fp(current);
- }
-#endif
/*
* Unimplemented operation exception. If we've got the full
* software emulator on-board, let's use it...
@@ -631,18 +620,12 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
* register operands before invoking the emulator, which seems
* a bit extreme for what should be an infrequent event.
*/
- save_fp(current);
/* Ensure 'resume' not overwrite saved fp context again. */
- lose_fpu();
-
- preempt_enable();
+ lose_fpu(1);
/* Run the emulator */
sig = fpu_emulator_cop1Handler (regs, &current->thread.fpu, 1);
- preempt_disable();
-
- own_fpu(); /* Using the FPU again. */
/*
* We can't allow the emulated instruction to leave any of
* the cause bit set in $fcr31.
@@ -650,9 +633,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
/* Restore the hardware register state */
- restore_fp(current);
-
- preempt_enable();
+ own_fpu(1); /* Using the FPU again. */
/* If something went wrong, signal */
if (sig)
@@ -669,7 +650,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
unsigned int opcode, bcode;
siginfo_t info;
- if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+ if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
/*
@@ -708,6 +689,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
die_if_kernel("Break instruction in kernel code", regs);
force_sig(SIGTRAP, current);
}
+ return;
out_sigsegv:
force_sig(SIGSEGV, current);
@@ -718,7 +700,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
unsigned int opcode, tcode = 0;
siginfo_t info;
- if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+ if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
/* Immediate versions don't provide a code. */
@@ -751,6 +733,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
die_if_kernel("Trap instruction in kernel code", regs);
force_sig(SIGTRAP, current);
}
+ return;
out_sigsegv:
force_sig(SIGSEGV, current);
@@ -790,21 +773,15 @@ asmlinkage void do_cpu(struct pt_regs *regs)
break;
case 1:
- preempt_disable();
-
- own_fpu();
- if (used_math()) { /* Using the FPU again. */
- restore_fp(current);
- } else { /* First time FPU user. */
+ if (used_math()) /* Using the FPU again. */
+ own_fpu(1);
+ else { /* First time FPU user. */
init_fpu();
set_used_math();
}
- if (cpu_has_fpu) {
- preempt_enable();
- } else {
+ if (!raw_cpu_has_fpu) {
int sig;
- preempt_enable();
sig = fpu_emulator_cop1Handler(regs,
&current->thread.fpu, 0);
if (sig)
@@ -845,7 +822,6 @@ asmlinkage void do_cpu(struct pt_regs *regs)
case 2:
case 3:
- die_if_kernel("do_cpu invoked from kernel context!", regs);
break;
}
@@ -1258,26 +1234,26 @@ static inline void mips_srs_init(void)
/*
* This is used by native signal handling
*/
-asmlinkage int (*save_fp_context)(struct sigcontext *sc);
-asmlinkage int (*restore_fp_context)(struct sigcontext *sc);
+asmlinkage int (*save_fp_context)(struct sigcontext __user *sc);
+asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc);
-extern asmlinkage int _save_fp_context(struct sigcontext *sc);
-extern asmlinkage int _restore_fp_context(struct sigcontext *sc);
+extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
+extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
-extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);
-extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);
+extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
+extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
#ifdef CONFIG_SMP
-static int smp_save_fp_context(struct sigcontext *sc)
+static int smp_save_fp_context(struct sigcontext __user *sc)
{
- return cpu_has_fpu
+ return raw_cpu_has_fpu
? _save_fp_context(sc)
: fpu_emulator_save_context(sc);
}
-static int smp_restore_fp_context(struct sigcontext *sc)
+static int smp_restore_fp_context(struct sigcontext __user *sc)
{
- return cpu_has_fpu
+ return raw_cpu_has_fpu
? _restore_fp_context(sc)
: fpu_emulator_restore_context(sc);
}
@@ -1305,14 +1281,14 @@ static inline void signal_init(void)
/*
* This is used by 32-bit signal stuff on the 64-bit kernel
*/
-asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc);
-asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc);
+asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc);
+asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc);
-extern asmlinkage int _save_fp_context32(struct sigcontext32 *sc);
-extern asmlinkage int _restore_fp_context32(struct sigcontext32 *sc);
+extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
+extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 *sc);
-extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 *sc);
+extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc);
+extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc);
static inline void signal32_init(void)
{
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 7e7d54823486..24b7b053cfe9 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -515,7 +515,7 @@ asmlinkage void do_ade(struct pt_regs *regs)
goto sigbus;
pc = (unsigned int __user *) exception_epc(regs);
- if ((current->thread.mflags & MF_FIXADE) == 0)
+ if (user_mode(regs) && (current->thread.mflags & MF_FIXADE) == 0)
goto sigbus;
/*
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index cecff24cc972..c76b793310c2 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -113,10 +113,12 @@ SECTIONS
references from .rodata */
.exit.text : { *(.exit.text) }
.exit.data : { *(.exit.data) }
+#if defined(CONFIG_BLK_DEV_INITRD)
. = ALIGN(_PAGE_SIZE);
__initramfs_start = .;
.init.ramfs : { *(.init.ramfs) }
__initramfs_end = .;
+#endif
. = ALIGN(32);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 458fccf87c54..c9ee9d2d5856 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -29,6 +29,7 @@
*/
#include <linux/kernel.h>
+#include <linux/device.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/init.h>
@@ -48,6 +49,7 @@
#include <asm/cacheflush.h>
#include <asm/atomic.h>
#include <asm/cpu.h>
+#include <asm/mips_mt.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/vpe.h>
@@ -64,6 +66,7 @@ typedef void *vpe_handle;
static char module_name[] = "vpe";
static int major;
+static const int minor = 1; /* fixed for now */
#ifdef CONFIG_MIPS_APSP_KSPD
static struct kspd_notifications kspd_events;
@@ -522,7 +525,7 @@ static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
};
static char *rstrs[] = {
- [R_MIPS_NONE] = "MIPS_NONE",
+ [R_MIPS_NONE] = "MIPS_NONE",
[R_MIPS_32] = "MIPS_32",
[R_MIPS_26] = "MIPS_26",
[R_MIPS_HI16] = "MIPS_HI16",
@@ -695,7 +698,7 @@ static void dump_tclist(void)
}
/* We are prepared so configure and start the VPE... */
-int vpe_run(struct vpe * v)
+static int vpe_run(struct vpe * v)
{
struct vpe_notifications *n;
unsigned long val, dmt_flag;
@@ -713,16 +716,16 @@ int vpe_run(struct vpe * v)
dvpe();
if (!list_empty(&v->tc)) {
- if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
- printk(KERN_WARNING "VPE loader: TC %d is already in use.\n",
- t->index);
- return -ENOEXEC;
- }
- } else {
- printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n",
- v->minor);
- return -ENOEXEC;
- }
+ if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
+ printk(KERN_WARNING "VPE loader: TC %d is already in use.\n",
+ t->index);
+ return -ENOEXEC;
+ }
+ } else {
+ printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n",
+ v->minor);
+ return -ENOEXEC;
+ }
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC);
@@ -775,14 +778,14 @@ int vpe_run(struct vpe * v)
back_to_back_c0_hazard();
- /* Set up the XTC bit in vpeconf0 to point at our tc */
- write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
- | (t->index << VPECONF0_XTC_SHIFT));
+ /* Set up the XTC bit in vpeconf0 to point at our tc */
+ write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
+ | (t->index << VPECONF0_XTC_SHIFT));
back_to_back_c0_hazard();
- /* enable this VPE */
- write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
+ /* enable this VPE */
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
/* clear out any left overs from a previous program */
write_vpe_c0_status(0);
@@ -832,7 +835,7 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
* contents of the program (p)buffer performing relocatations/etc, free's it
* when finished.
*/
-int vpe_elfload(struct vpe * v)
+static int vpe_elfload(struct vpe * v)
{
Elf_Ehdr *hdr;
Elf_Shdr *sechdrs;
@@ -1076,6 +1079,7 @@ static int getcwd(char *buff, int size)
static int vpe_open(struct inode *inode, struct file *filp)
{
int minor, ret;
+ enum vpe_state state;
struct vpe *v;
struct vpe_notifications *not;
@@ -1090,7 +1094,8 @@ static int vpe_open(struct inode *inode, struct file *filp)
return -ENODEV;
}
- if (v->state != VPE_STATE_UNUSED) {
+ state = xchg(&v->state, VPE_STATE_INUSE);
+ if (state != VPE_STATE_UNUSED) {
dvpe();
printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n");
@@ -1105,9 +1110,6 @@ static int vpe_open(struct inode *inode, struct file *filp)
cleanup_tc(get_tc(minor));
}
- // allocate it so when we get write ops we know it's expected.
- v->state = VPE_STATE_INUSE;
-
/* this of-course trashes what was there before... */
v->pbuffer = vmalloc(P_SIZE);
v->plen = P_SIZE;
@@ -1205,7 +1207,7 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer,
return ret;
}
-static struct file_operations vpe_fops = {
+static const struct file_operations vpe_fops = {
.owner = THIS_MODULE,
.open = vpe_open,
.release = vpe_release,
@@ -1365,12 +1367,15 @@ static void kspd_sp_exit( int sp_id)
}
#endif
+static struct device *vpe_dev;
+
static int __init vpe_module_init(void)
{
struct vpe *v = NULL;
+ struct device *dev;
struct tc *t;
unsigned long val;
- int i;
+ int i, err;
if (!cpu_has_mipsmt) {
printk("VPE loader: not a MIPS MT capable processor\n");
@@ -1383,6 +1388,14 @@ static int __init vpe_module_init(void)
return major;
}
+ dev = device_create(mt_class, NULL, MKDEV(major, minor),
+ "tc%d", minor);
+ if (IS_ERR(dev)) {
+ err = PTR_ERR(dev);
+ goto out_chrdev;
+ }
+ vpe_dev = dev;
+
dmt();
dvpe();
@@ -1478,6 +1491,11 @@ static int __init vpe_module_init(void)
kspd_events.kspd_sp_exit = kspd_sp_exit;
#endif
return 0;
+
+out_chrdev:
+ unregister_chrdev(major, module_name);
+
+ return err;
}
static void __exit vpe_module_exit(void)
@@ -1490,6 +1508,7 @@ static void __exit vpe_module_exit(void)
}
}
+ device_destroy(mt_class, MKDEV(major, minor));
unregister_chrdev(major, module_name);
}