summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 16:23:15 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 16:23:15 +0200
commitfaafcba3b5e15999cf75d5c5a513ac8e47e2545f (patch)
tree47d58d1c00e650e820506c91eb9a41268756bdda /arch
parent13ead805c5a14b0e7ecd34f61404a5bfba655895 (diff)
parentf10e00f4bf360c36edbe6bf18a6c75b171cbe012 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "The main changes in this cycle were: - Optimized support for Intel "Cluster-on-Die" (CoD) topologies (Dave Hansen) - Various sched/idle refinements for better idle handling (Nicolas Pitre, Daniel Lezcano, Chuansheng Liu, Vincent Guittot) - sched/numa updates and optimizations (Rik van Riel) - sysbench speedup (Vincent Guittot) - capacity calculation cleanups/refactoring (Vincent Guittot) - Various cleanups to thread group iteration (Oleg Nesterov) - Double-rq-lock removal optimization and various refactorings (Kirill Tkhai) - various sched/deadline fixes ... and lots of other changes" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (72 commits) sched/dl: Use dl_bw_of() under rcu_read_lock_sched() sched/fair: Delete resched_cpu() from idle_balance() sched, time: Fix build error with 64 bit cputime_t on 32 bit systems sched: Improve sysbench performance by fixing spurious active migration sched/x86: Fix up typo in topology detection x86, sched: Add new topology for multi-NUMA-node CPUs sched/rt: Use resched_curr() in task_tick_rt() sched: Use rq->rd in sched_setaffinity() under RCU read lock sched: cleanup: Rename 'out_unlock' to 'out_free_new_mask' sched: Use dl_bw_of() under RCU read lock sched/fair: Remove duplicate code from can_migrate_task() sched, mips, ia64: Remove __ARCH_WANT_UNLOCKED_CTXSW sched: print_rq(): Don't use tasklist_lock sched: normalize_rt_tasks(): Don't use _irqsave for tasklist_lock, use task_rq_lock() sched: Fix the task-group check in tg_has_rt_tasks() sched/fair: Leverage the idle state info when choosing the "idlest" cpu sched: Let the scheduler see CPU idle states sched/deadline: Fix inter- exclusive cpusets migrations sched/deadline: Clear dl_entity params when setscheduling to different class sched/numa: Kill the wrong/dead TASK_DEAD check in task_numa_fault() ...
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/topology.c4
-rw-r--r--arch/cris/arch-v10/drivers/sync_serial.c1
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c1
-rw-r--r--arch/ia64/include/asm/processor.h1
-rw-r--r--arch/mips/include/asm/processor.h6
-rw-r--r--arch/powerpc/include/asm/cputime.h2
-rw-r--r--arch/powerpc/mm/fault.c5
-rw-r--r--arch/s390/include/asm/cputime.h2
-rw-r--r--arch/um/drivers/random.c1
-rw-r--r--arch/x86/kernel/smpboot.c55
-rw-r--r--arch/x86/mm/fault.c5
11 files changed, 54 insertions, 29 deletions
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index e35d880f9773..89cfdd6e50cb 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -42,7 +42,7 @@
*/
static DEFINE_PER_CPU(unsigned long, cpu_scale);
-unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
+unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
{
return per_cpu(cpu_scale, cpu);
}
@@ -166,7 +166,7 @@ static void update_cpu_capacity(unsigned int cpu)
set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
printk(KERN_INFO "CPU%u: update cpu_capacity %lu\n",
- cpu, arch_scale_freq_capacity(NULL, cpu));
+ cpu, arch_scale_cpu_capacity(NULL, cpu));
}
#else
diff --git a/arch/cris/arch-v10/drivers/sync_serial.c b/arch/cris/arch-v10/drivers/sync_serial.c
index 29eb02ab3f25..0f3983241e60 100644
--- a/arch/cris/arch-v10/drivers/sync_serial.c
+++ b/arch/cris/arch-v10/drivers/sync_serial.c
@@ -1086,7 +1086,6 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
}
local_irq_restore(flags);
schedule();
- set_current_state(TASK_RUNNING);
remove_wait_queue(&port->out_wait_q, &wait);
if (signal_pending(current))
return -EINTR;
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index bbb806b68838..5a149134cfb5 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -1089,7 +1089,6 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
}
schedule();
- set_current_state(TASK_RUNNING);
remove_wait_queue(&port->out_wait_q, &wait);
if (signal_pending(current))
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index c7367130ab14..ce53c50d0ba4 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -19,7 +19,6 @@
#include <asm/ptrace.h>
#include <asm/ustack.h>
-#define __ARCH_WANT_UNLOCKED_CTXSW
#define ARCH_HAS_PREFETCH_SWITCH_STACK
#define IA64_NUM_PHYS_STACK_REG 96
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 05f08438a7c4..f1df4cb4a286 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -397,12 +397,6 @@ unsigned long get_wchan(struct task_struct *p);
#define ARCH_HAS_PREFETCHW
#define prefetchw(x) __builtin_prefetch((x), 1, 1)
-/*
- * See Documentation/scheduler/sched-arch.txt; prevents deadlock on SMP
- * systems.
- */
-#define __ARCH_WANT_UNLOCKED_CTXSW
-
#endif
#endif /* _ASM_PROCESSOR_H */
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 607559ab271f..6c840ceab820 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -32,6 +32,8 @@ static inline void setup_cputime_one_jiffy(void) { }
typedef u64 __nocast cputime_t;
typedef u64 __nocast cputime64_t;
+#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
+
#ifdef __KERNEL__
/*
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 24b3f4949df4..08d659a9fcdb 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -30,7 +30,6 @@
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/perf_event.h>
-#include <linux/magic.h>
#include <linux/ratelimit.h>
#include <linux/context_tracking.h>
#include <linux/hugetlb.h>
@@ -521,7 +520,6 @@ bail:
void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
const struct exception_table_entry *entry;
- unsigned long *stackend;
/* Are we prepared to handle this fault? */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
@@ -550,8 +548,7 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
regs->nip);
- stackend = end_of_stack(current);
- if (current != &init_task && *stackend != STACK_END_MAGIC)
+ if (task_stack_end_corrupted(current))
printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
die("Kernel access of bad area", regs, sig);
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index f65bd3634519..3001887f94b7 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -18,6 +18,8 @@
typedef unsigned long long __nocast cputime_t;
typedef unsigned long long __nocast cputime64_t;
+#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
+
static inline unsigned long __div(unsigned long long n, unsigned long base)
{
#ifndef CONFIG_64BIT
diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c
index 9e3a72205827..dd16c902ff70 100644
--- a/arch/um/drivers/random.c
+++ b/arch/um/drivers/random.c
@@ -79,7 +79,6 @@ static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size,
set_task_state(current, TASK_INTERRUPTIBLE);
schedule();
- set_task_state(current, TASK_RUNNING);
remove_wait_queue(&host_read_wait, &wait);
if (atomic_dec_and_test(&host_sleep_count)) {
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 42a2dca984b3..9b1c0f8f68e6 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -296,11 +296,19 @@ void smp_store_cpu_info(int id)
}
static bool
+topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+{
+ int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
+
+ return (cpu_to_node(cpu1) == cpu_to_node(cpu2));
+}
+
+static bool
topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
{
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
- return !WARN_ONCE(cpu_to_node(cpu1) != cpu_to_node(cpu2),
+ return !WARN_ONCE(!topology_same_node(c, o),
"sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
"[node: %d != %d]. Ignoring dependency.\n",
cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
@@ -341,17 +349,44 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
return false;
}
-static bool match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+/*
+ * Unlike the other levels, we do not enforce keeping a
+ * multicore group inside a NUMA node. If this happens, we will
+ * discard the MC level of the topology later.
+ */
+static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
- if (c->phys_proc_id == o->phys_proc_id) {
- if (cpu_has(c, X86_FEATURE_AMD_DCM))
- return true;
-
- return topology_sane(c, o, "mc");
- }
+ if (c->phys_proc_id == o->phys_proc_id)
+ return true;
return false;
}
+static struct sched_domain_topology_level numa_inside_package_topology[] = {
+#ifdef CONFIG_SCHED_SMT
+ { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
+#endif
+#ifdef CONFIG_SCHED_MC
+ { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
+#endif
+ { NULL, },
+};
+/*
+ * set_sched_topology() sets the topology internal to a CPU. The
+ * NUMA topologies are layered on top of it to build the full
+ * system topology.
+ *
+ * If NUMA nodes are observed to occur within a CPU package, this
+ * function should be called. It forces the sched domain code to
+ * only use the SMT level for the CPU portion of the topology.
+ * This essentially falls back to relying on NUMA information
+ * from the SRAT table to describe the entire system topology
+ * (except for hyperthreads).
+ */
+static void primarily_use_numa_for_topology(void)
+{
+ set_sched_topology(numa_inside_package_topology);
+}
+
void set_cpu_sibling_map(int cpu)
{
bool has_smt = smp_num_siblings > 1;
@@ -388,7 +423,7 @@ void set_cpu_sibling_map(int cpu)
for_each_cpu(i, cpu_sibling_setup_mask) {
o = &cpu_data(i);
- if ((i == cpu) || (has_mp && match_mc(c, o))) {
+ if ((i == cpu) || (has_mp && match_die(c, o))) {
link_mask(core, cpu, i);
/*
@@ -410,6 +445,8 @@ void set_cpu_sibling_map(int cpu)
} else if (i != cpu && !c->booted_cores)
c->booted_cores = cpu_data(i).booted_cores;
}
+ if (match_die(c, o) && !topology_same_node(c, o))
+ primarily_use_numa_for_topology();
}
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 83bb03bfa259..9c5b32e2bdc0 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -3,7 +3,6 @@
* Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
* Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
*/
-#include <linux/magic.h> /* STACK_END_MAGIC */
#include <linux/sched.h> /* test_thread_flag(), ... */
#include <linux/kdebug.h> /* oops_begin/end, ... */
#include <linux/module.h> /* search_exception_table */
@@ -649,7 +648,6 @@ no_context(struct pt_regs *regs, unsigned long error_code,
unsigned long address, int signal, int si_code)
{
struct task_struct *tsk = current;
- unsigned long *stackend;
unsigned long flags;
int sig;
@@ -709,8 +707,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
show_fault_oops(regs, error_code, address);
- stackend = end_of_stack(tsk);
- if (tsk != &init_task && *stackend != STACK_END_MAGIC)
+ if (task_stack_end_corrupted(tsk))
printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
tsk->thread.cr2 = address;