summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c13
-rw-r--r--kernel/module.c11
-rw-r--r--kernel/sched.c70
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/timer.c9
5 files changed, 66 insertions, 39 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 407b5f0a8c8e..79866bc6b3a1 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -180,6 +180,8 @@ static struct super_block *cpuset_sb = NULL;
*/
static DECLARE_MUTEX(cpuset_sem);
+static struct task_struct *cpuset_sem_owner;
+static int cpuset_sem_depth;
/*
* The global cpuset semaphore cpuset_sem can be needed by the
@@ -200,16 +202,19 @@ static DECLARE_MUTEX(cpuset_sem);
static inline void cpuset_down(struct semaphore *psem)
{
- if (current->cpuset_sem_nest_depth == 0)
+ if (cpuset_sem_owner != current) {
down(psem);
- current->cpuset_sem_nest_depth++;
+ cpuset_sem_owner = current;
+ }
+ cpuset_sem_depth++;
}
static inline void cpuset_up(struct semaphore *psem)
{
- current->cpuset_sem_nest_depth--;
- if (current->cpuset_sem_nest_depth == 0)
+ if (--cpuset_sem_depth == 0) {
+ cpuset_sem_owner = NULL;
up(psem);
+ }
}
/*
diff --git a/kernel/module.c b/kernel/module.c
index 4b39d3793c72..ff5c500ab625 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/elf.h>
@@ -498,7 +499,7 @@ static inline int try_force(unsigned int flags)
{
int ret = (flags & O_TRUNC);
if (ret)
- tainted |= TAINT_FORCED_MODULE;
+ add_taint(TAINT_FORCED_MODULE);
return ret;
}
#else
@@ -897,7 +898,7 @@ static int check_version(Elf_Shdr *sechdrs,
if (!(tainted & TAINT_FORCED_MODULE)) {
printk("%s: no version for \"%s\" found: kernel tainted.\n",
mod->name, symname);
- tainted |= TAINT_FORCED_MODULE;
+ add_taint(TAINT_FORCED_MODULE);
}
return 1;
}
@@ -1352,7 +1353,7 @@ static void set_license(struct module *mod, const char *license)
if (!mod->license_gplok && !(tainted & TAINT_PROPRIETARY_MODULE)) {
printk(KERN_WARNING "%s: module license '%s' taints kernel.\n",
mod->name, license);
- tainted |= TAINT_PROPRIETARY_MODULE;
+ add_taint(TAINT_PROPRIETARY_MODULE);
}
}
@@ -1610,7 +1611,7 @@ static struct module *load_module(void __user *umod,
modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
/* This is allowed: modprobe --force will invalidate it. */
if (!modmagic) {
- tainted |= TAINT_FORCED_MODULE;
+ add_taint(TAINT_FORCED_MODULE);
printk(KERN_WARNING "%s: no version magic, tainting kernel.\n",
mod->name);
} else if (!same_magic(modmagic, vermagic)) {
@@ -1739,7 +1740,7 @@ static struct module *load_module(void __user *umod,
(mod->num_gpl_syms && !gplcrcindex)) {
printk(KERN_WARNING "%s: No versions for exported symbols."
" Tainting kernel.\n", mod->name);
- tainted |= TAINT_FORCED_MODULE;
+ add_taint(TAINT_FORCED_MODULE);
}
#endif
diff --git a/kernel/sched.c b/kernel/sched.c
index e9ff04a9b56d..81b3a96ed2d0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3577,32 +3577,6 @@ task_t *idle_task(int cpu)
}
/**
- * curr_task - return the current task for a given cpu.
- * @cpu: the processor in question.
- */
-task_t *curr_task(int cpu)
-{
- return cpu_curr(cpu);
-}
-
-/**
- * set_curr_task - set the current task for a given cpu.
- * @cpu: the processor in question.
- * @p: the task pointer to set.
- *
- * Description: This function must only be used when non-maskable interrupts
- * are serviced on a separate stack. It allows the architecture to switch the
- * notion of the current task on a cpu in a non-blocking manner. This function
- * must be called with interrupts disabled, the caller must save the original
- * value of the current task (see curr_task() above) and restore that value
- * before reenabling interrupts.
- */
-void set_curr_task(int cpu, task_t *p)
-{
- cpu_curr(cpu) = p;
-}
-
-/**
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
*/
@@ -5628,3 +5602,47 @@ void normalize_rt_tasks(void)
}
#endif /* CONFIG_MAGIC_SYSRQ */
+
+#ifdef CONFIG_IA64
+/*
+ * These functions are only useful for the IA64 MCA handling.
+ *
+ * They can only be called when the whole system has been
+ * stopped - every CPU needs to be quiescent, and no scheduling
+ * activity can take place. Using them for anything else would
+ * be a serious bug, and as a result, they aren't even visible
+ * under any other configuration.
+ */
+
+/**
+ * curr_task - return the current task for a given cpu.
+ * @cpu: the processor in question.
+ *
+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ */
+task_t *curr_task(int cpu)
+{
+ return cpu_curr(cpu);
+}
+
+/**
+ * set_curr_task - set the current task for a given cpu.
+ * @cpu: the processor in question.
+ * @p: the task pointer to set.
+ *
+ * Description: This function must only be used when non-maskable interrupts
+ * are serviced on a separate stack. It allows the architecture to switch the
+ * notion of the current task on a cpu in a non-blocking manner. This function
+ * must be called with all CPU's synchronized, and interrupts disabled, the
+ * and caller must save the original value of the current task (see
+ * curr_task() above) and restore that value before reenabling interrupts and
+ * re-starting the system.
+ *
+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ */
+void set_curr_task(int cpu, task_t *p)
+{
+ cpu_curr(cpu) = p;
+}
+
+#endif
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b4ab6af1dea8..f766b2fc48be 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -84,7 +84,7 @@ asmlinkage void __do_softirq(void)
cpu = smp_processor_id();
restart:
/* Reset the pending bitmask before enabling irqs */
- local_softirq_pending() = 0;
+ set_softirq_pending(0);
local_irq_enable();
diff --git a/kernel/timer.c b/kernel/timer.c
index f4152fcd9f8e..3ba10fa35b60 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1151,19 +1151,22 @@ fastcall signed long __sched schedule_timeout(signed long timeout)
out:
return timeout < 0 ? 0 : timeout;
}
-
EXPORT_SYMBOL(schedule_timeout);
+/*
+ * We can use __set_current_state() here because schedule_timeout() calls
+ * schedule() unconditionally.
+ */
signed long __sched schedule_timeout_interruptible(signed long timeout)
{
- set_current_state(TASK_INTERRUPTIBLE);
+ __set_current_state(TASK_INTERRUPTIBLE);
return schedule_timeout(timeout);
}
EXPORT_SYMBOL(schedule_timeout_interruptible);
signed long __sched schedule_timeout_uninterruptible(signed long timeout)
{
- set_current_state(TASK_UNINTERRUPTIBLE);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
return schedule_timeout(timeout);
}
EXPORT_SYMBOL(schedule_timeout_uninterruptible);