summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/compat.c2
-rw-r--r--kernel/cpu.c6
-rw-r--r--kernel/cpuset.c100
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/kmod.c2
-rw-r--r--kernel/kthread.c1
-rw-r--r--kernel/latencytop.c27
-rw-r--r--kernel/rcupreempt.c4
-rw-r--r--kernel/rcutorture.c15
-rw-r--r--kernel/sched.c1912
-rw-r--r--kernel/sched_debug.c36
-rw-r--r--kernel/sched_fair.c580
-rw-r--r--kernel/sched_features.h10
-rw-r--r--kernel/sched_rt.c227
-rw-r--r--kernel/sched_stats.h8
-rw-r--r--kernel/softirq.c63
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/sysctl.c15
-rw-r--r--kernel/time/tick-sched.c5
-rw-r--r--kernel/user.c30
20 files changed, 2357 insertions, 690 deletions
diff --git a/kernel/compat.c b/kernel/compat.c
index 9c48abfcd4a5..e1ef04870c2a 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -445,7 +445,7 @@ asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
if (retval)
return retval;
- return sched_setaffinity(pid, new_mask);
+ return sched_setaffinity(pid, &new_mask);
}
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 2eff3f63abed..2011ad8d2697 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -232,9 +232,9 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
/* Ensure that we are not runnable on dying cpu */
old_allowed = current->cpus_allowed;
- tmp = CPU_MASK_ALL;
+ cpus_setall(tmp);
cpu_clear(cpu, tmp);
- set_cpus_allowed(current, tmp);
+ set_cpus_allowed_ptr(current, &tmp);
p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
@@ -268,7 +268,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
out_thread:
err = kthread_stop(p);
out_allowed:
- set_cpus_allowed(current, old_allowed);
+ set_cpus_allowed_ptr(current, &old_allowed);
out_release:
cpu_hotplug_done();
return err;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index a1b61f414228..8b35fbd8292f 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -98,6 +98,9 @@ struct cpuset {
/* partition number for rebuild_sched_domains() */
int pn;
+ /* for custom sched domain */
+ int relax_domain_level;
+
/* used for walking a cpuset heirarchy */
struct list_head stack_list;
};
@@ -478,6 +481,16 @@ static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
return cpus_intersects(a->cpus_allowed, b->cpus_allowed);
}
+static void
+update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
+{
+ if (!dattr)
+ return;
+ if (dattr->relax_domain_level < c->relax_domain_level)
+ dattr->relax_domain_level = c->relax_domain_level;
+ return;
+}
+
/*
* rebuild_sched_domains()
*
@@ -553,12 +566,14 @@ static void rebuild_sched_domains(void)
int csn; /* how many cpuset ptrs in csa so far */
int i, j, k; /* indices for partition finding loops */
cpumask_t *doms; /* resulting partition; i.e. sched domains */
+ struct sched_domain_attr *dattr; /* attributes for custom domains */
int ndoms; /* number of sched domains in result */
int nslot; /* next empty doms[] cpumask_t slot */
q = NULL;
csa = NULL;
doms = NULL;
+ dattr = NULL;
/* Special case for the 99% of systems with one, full, sched domain */
if (is_sched_load_balance(&top_cpuset)) {
@@ -566,6 +581,11 @@ static void rebuild_sched_domains(void)
doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
if (!doms)
goto rebuild;
+ dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
+ if (dattr) {
+ *dattr = SD_ATTR_INIT;
+ update_domain_attr(dattr, &top_cpuset);
+ }
*doms = top_cpuset.cpus_allowed;
goto rebuild;
}
@@ -622,6 +642,7 @@ restart:
doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);
if (!doms)
goto rebuild;
+ dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
for (nslot = 0, i = 0; i < csn; i++) {
struct cpuset *a = csa[i];
@@ -644,12 +665,15 @@ restart:
}
cpus_clear(*dp);
+ if (dattr)
+ *(dattr + nslot) = SD_ATTR_INIT;
for (j = i; j < csn; j++) {
struct cpuset *b = csa[j];
if (apn == b->pn) {
cpus_or(*dp, *dp, b->cpus_allowed);
b->pn = -1;
+ update_domain_attr(dattr, b);
}
}
nslot++;
@@ -660,7 +684,7 @@ restart:
rebuild:
/* Have scheduler rebuild sched domains */
get_online_cpus();
- partition_sched_domains(ndoms, doms);
+ partition_sched_domains(ndoms, doms, dattr);
put_online_cpus();
done:
@@ -668,6 +692,7 @@ done:
kfifo_free(q);
kfree(csa);
/* Don't kfree(doms) -- partition_sched_domains() does that. */
+ /* Don't kfree(dattr) -- partition_sched_domains() does that. */
}
static inline int started_after_time(struct task_struct *t1,
@@ -729,7 +754,7 @@ int cpuset_test_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
*/
void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
{
- set_cpus_allowed(tsk, (cgroup_cs(scan->cg))->cpus_allowed);
+ set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed));
}
/**
@@ -1011,6 +1036,21 @@ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
return 0;
}
+static int update_relax_domain_level(struct cpuset *cs, char *buf)
+{
+ int val = simple_strtol(buf, NULL, 10);
+
+ if (val < 0)
+ val = -1;
+
+ if (val != cs->relax_domain_level) {
+ cs->relax_domain_level = val;
+ rebuild_sched_domains();
+ }
+
+ return 0;
+}
+
/*
* update_flag - read a 0 or a 1 in a file and update associated flag
* bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
@@ -1178,7 +1218,7 @@ static void cpuset_attach(struct cgroup_subsys *ss,
mutex_lock(&callback_mutex);
guarantee_online_cpus(cs, &cpus);
- set_cpus_allowed(tsk, cpus);
+ set_cpus_allowed_ptr(tsk, &cpus);
mutex_unlock(&callback_mutex);
from = oldcs->mems_allowed;
@@ -1202,6 +1242,7 @@ typedef enum {
FILE_CPU_EXCLUSIVE,
FILE_MEM_EXCLUSIVE,
FILE_SCHED_LOAD_BALANCE,
+ FILE_SCHED_RELAX_DOMAIN_LEVEL,
FILE_MEMORY_PRESSURE_ENABLED,
FILE_MEMORY_PRESSURE,
FILE_SPREAD_PAGE,
@@ -1256,6 +1297,9 @@ static ssize_t cpuset_common_file_write(struct cgroup *cont,
case FILE_SCHED_LOAD_BALANCE:
retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, buffer);
break;
+ case FILE_SCHED_RELAX_DOMAIN_LEVEL:
+ retval = update_relax_domain_level(cs, buffer);
+ break;
case FILE_MEMORY_MIGRATE:
retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
break;
@@ -1354,6 +1398,9 @@ static ssize_t cpuset_common_file_read(struct cgroup *cont,
case FILE_SCHED_LOAD_BALANCE:
*s++ = is_sched_load_balance(cs) ? '1' : '0';
break;
+ case FILE_SCHED_RELAX_DOMAIN_LEVEL:
+ s += sprintf(s, "%d", cs->relax_domain_level);
+ break;
case FILE_MEMORY_MIGRATE:
*s++ = is_memory_migrate(cs) ? '1' : '0';
break;
@@ -1424,6 +1471,13 @@ static struct cftype cft_sched_load_balance = {
.private = FILE_SCHED_LOAD_BALANCE,
};
+static struct cftype cft_sched_relax_domain_level = {
+ .name = "sched_relax_domain_level",
+ .read = cpuset_common_file_read,
+ .write = cpuset_common_file_write,
+ .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
+};
+
static struct cftype cft_memory_migrate = {
.name = "memory_migrate",
.read = cpuset_common_file_read,
@@ -1475,6 +1529,9 @@ static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
return err;
if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0)
return err;
+ if ((err = cgroup_add_file(cont, ss,
+ &cft_sched_relax_domain_level)) < 0)
+ return err;
if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0)
return err;
if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0)
@@ -1555,10 +1612,11 @@ static struct cgroup_subsys_state *cpuset_create(
if (is_spread_slab(parent))
set_bit(CS_SPREAD_SLAB, &cs->flags);
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
- cs->cpus_allowed = CPU_MASK_NONE;
- cs->mems_allowed = NODE_MASK_NONE;
+ cpus_clear(cs->cpus_allowed);
+ nodes_clear(cs->mems_allowed);
cs->mems_generation = cpuset_mems_generation++;
fmeter_init(&cs->fmeter);
+ cs->relax_domain_level = -1;
cs->parent = parent;
number_of_cpusets++;
@@ -1625,12 +1683,13 @@ int __init cpuset_init(void)
{
int err = 0;
- top_cpuset.cpus_allowed = CPU_MASK_ALL;
- top_cpuset.mems_allowed = NODE_MASK_ALL;
+ cpus_setall(top_cpuset.cpus_allowed);
+ nodes_setall(top_cpuset.mems_allowed);
fmeter_init(&top_cpuset.fmeter);
top_cpuset.mems_generation = cpuset_mems_generation++;
set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
+ top_cpuset.relax_domain_level = -1;
err = register_filesystem(&cpuset_fs_type);
if (err < 0)
@@ -1844,6 +1903,7 @@ void __init cpuset_init_smp(void)
* cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
+ * @pmask: pointer to cpumask_t variable to receive cpus_allowed set.
*
* Description: Returns the cpumask_t cpus_allowed of the cpuset
* attached to the specified @tsk. Guaranteed to return some non-empty
@@ -1851,35 +1911,27 @@ void __init cpuset_init_smp(void)
* tasks cpuset.
**/
-cpumask_t cpuset_cpus_allowed(struct task_struct *tsk)
+void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask)
{
- cpumask_t mask;
-
mutex_lock(&callback_mutex);
- mask = cpuset_cpus_allowed_locked(tsk);
+ cpuset_cpus_allowed_locked(tsk, pmask);
mutex_unlock(&callback_mutex);
-
- return mask;
}
/**
* cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
* Must be called with callback_mutex held.
**/
-cpumask_t cpuset_cpus_allowed_locked(struct task_struct *tsk)
+void cpuset_cpus_allowed_locked(struct task_struct *tsk, cpumask_t *pmask)
{
- cpumask_t mask;
-
task_lock(tsk);
- guarantee_online_cpus(task_cs(tsk), &mask);
+ guarantee_online_cpus(task_cs(tsk), pmask);
task_unlock(tsk);
-
- return mask;
}
void cpuset_init_current_mems_allowed(void)
{
- current->mems_allowed = NODE_MASK_ALL;
+ nodes_setall(current->mems_allowed);
}
/**
@@ -2261,8 +2313,16 @@ void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
m->count += cpumask_scnprintf(m->buf + m->count, m->size - m->count,
task->cpus_allowed);
seq_printf(m, "\n");
+ seq_printf(m, "Cpus_allowed_list:\t");
+ m->count += cpulist_scnprintf(m->buf + m->count, m->size - m->count,
+ task->cpus_allowed);
+ seq_printf(m, "\n");
seq_printf(m, "Mems_allowed:\t");
m->count += nodemask_scnprintf(m->buf + m->count, m->size - m->count,
task->mems_allowed);
seq_printf(m, "\n");
+ seq_printf(m, "Mems_allowed_list:\t");
+ m->count += nodelist_scnprintf(m->buf + m->count, m->size - m->count,
+ task->mems_allowed);
+ seq_printf(m, "\n");
}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index fdb3fbe2b0c4..964964baefa2 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -47,7 +47,7 @@ void dynamic_irq_init(unsigned int irq)
desc->irq_count = 0;
desc->irqs_unhandled = 0;
#ifdef CONFIG_SMP
- desc->affinity = CPU_MASK_ALL;
+ cpus_setall(desc->affinity);
#endif
spin_unlock_irqrestore(&desc->lock, flags);
}
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 22be3ff3f363..e2764047ec03 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -165,7 +165,7 @@ static int ____call_usermodehelper(void *data)
}
/* We can run anywhere, unlike our parent keventd(). */
- set_cpus_allowed(current, CPU_MASK_ALL);
+ set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR);
/*
* Our parent is keventd, which runs with elevated scheduling priority.
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 0ac887882f90..25241d6ec8cd 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -180,6 +180,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
wait_task_inactive(k);
set_task_cpu(k, cpu);
k->cpus_allowed = cpumask_of_cpu(cpu);
+ k->rt.nr_cpus_allowed = 1;
}
EXPORT_SYMBOL(kthread_bind);
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index b4e3c85abe74..7c74dab0d21b 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -64,8 +64,8 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
return;
for (i = 0; i < MAXLR; i++) {
- int q;
- int same = 1;
+ int q, same = 1;
+
/* Nothing stored: */
if (!latency_record[i].backtrace[0]) {
if (firstnonnull > i)
@@ -73,12 +73,15 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
continue;
}
for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
- if (latency_record[i].backtrace[q] !=
- lat->backtrace[q])
+ unsigned long record = lat->backtrace[q];
+
+ if (latency_record[i].backtrace[q] != record) {
same = 0;
- if (same && lat->backtrace[q] == 0)
break;
- if (same && lat->backtrace[q] == ULONG_MAX)
+ }
+
+ /* 0 and ULONG_MAX entries mean end of backtrace: */
+ if (record == 0 || record == ULONG_MAX)
break;
}
if (same) {
@@ -143,14 +146,18 @@ account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
for (i = 0; i < LT_SAVECOUNT ; i++) {
struct latency_record *mylat;
int same = 1;
+
mylat = &tsk->latency_record[i];
for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
- if (mylat->backtrace[q] !=
- lat.backtrace[q])
+ unsigned long record = lat.backtrace[q];
+
+ if (mylat->backtrace[q] != record) {
same = 0;
- if (same && lat.backtrace[q] == 0)
break;
- if (same && lat.backtrace[q] == ULONG_MAX)
+ }
+
+ /* 0 and ULONG_MAX entries mean end of backtrace: */
+ if (record == 0 || record == ULONG_MAX)
break;
}
if (same) {
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index e9517014b57c..e1cdf196a515 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -1007,10 +1007,10 @@ void __synchronize_sched(void)
if (sched_getaffinity(0, &oldmask) < 0)
oldmask = cpu_possible_map;
for_each_online_cpu(cpu) {
- sched_setaffinity(0, cpumask_of_cpu(cpu));
+ sched_setaffinity(0, &cpumask_of_cpu(cpu));
schedule();
}
- sched_setaffinity(0, oldmask);
+ sched_setaffinity(0, &oldmask);
}
EXPORT_SYMBOL_GPL(__synchronize_sched);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index fd599829e72a..47894f919d4e 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -723,9 +723,10 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
*/
static void rcu_torture_shuffle_tasks(void)
{
- cpumask_t tmp_mask = CPU_MASK_ALL;
+ cpumask_t tmp_mask;
int i;
+ cpus_setall(tmp_mask);
get_online_cpus();
/* No point in shuffling if there is only one online CPU (ex: UP) */
@@ -737,25 +738,27 @@ static void rcu_torture_shuffle_tasks(void)
if (rcu_idle_cpu != -1)
cpu_clear(rcu_idle_cpu, tmp_mask);
- set_cpus_allowed(current, tmp_mask);
+ set_cpus_allowed_ptr(current, &tmp_mask);
if (reader_tasks) {
for (i = 0; i < nrealreaders; i++)
if (reader_tasks[i])
- set_cpus_allowed(reader_tasks[i], tmp_mask);
+ set_cpus_allowed_ptr(reader_tasks[i],
+ &tmp_mask);
}
if (fakewriter_tasks) {
for (i = 0; i < nfakewriters; i++)
if (fakewriter_tasks[i])
- set_cpus_allowed(fakewriter_tasks[i], tmp_mask);
+ set_cpus_allowed_ptr(fakewriter_tasks[i],
+ &tmp_mask);
}
if (writer_task)
- set_cpus_allowed(writer_task, tmp_mask);
+ set_cpus_allowed_ptr(writer_task, &tmp_mask);
if (stats_task)
- set_cpus_allowed(stats_task, tmp_mask);
+ set_cpus_allowed_ptr(stats_task, &tmp_mask);
if (rcu_idle_cpu == -1)
rcu_idle_cpu = num_online_cpus() - 1;
diff --git a/kernel/sched.c b/kernel/sched.c
index 8dcdec6fe0fe..57ba7ea9b744 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -66,6 +66,10 @@
#include <linux/unistd.h>
#include <linux/pagemap.h>
#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/bootmem.h>
+#include <linux/debugfs.h>
+#include <linux/ctype.h>
#include <asm/tlb.h>
#include <asm/irq_regs.h>
@@ -114,6 +118,11 @@ unsigned long long __attribute__((weak)) sched_clock(void)
*/
#define DEF_TIMESLICE (100 * HZ / 1000)
+/*
+ * single value that denotes runtime == period, ie unlimited time.
+ */
+#define RUNTIME_INF ((u64)~0ULL)
+
#ifdef CONFIG_SMP
/*
* Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
@@ -155,6 +164,84 @@ struct rt_prio_array {
struct list_head queue[MAX_RT_PRIO];
};
+struct rt_bandwidth {
+ /* nests inside the rq lock: */
+ spinlock_t rt_runtime_lock;
+ ktime_t rt_period;
+ u64 rt_runtime;
+ struct hrtimer rt_period_timer;
+};
+
+static struct rt_bandwidth def_rt_bandwidth;
+
+static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
+
+static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
+{
+ struct rt_bandwidth *rt_b =
+ container_of(timer, struct rt_bandwidth, rt_period_timer);
+ ktime_t now;
+ int overrun;
+ int idle = 0;
+
+ for (;;) {
+ now = hrtimer_cb_get_time(timer);
+ overrun = hrtimer_forward(timer, now, rt_b->rt_period);
+
+ if (!overrun)
+ break;
+
+ idle = do_sched_rt_period_timer(rt_b, overrun);
+ }
+
+ return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
+}
+
+static
+void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
+{
+ rt_b->rt_period = ns_to_ktime(period);
+ rt_b->rt_runtime = runtime;
+
+ spin_lock_init(&rt_b->rt_runtime_lock);
+
+ hrtimer_init(&rt_b->rt_period_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rt_b->rt_period_timer.function = sched_rt_period_timer;
+ rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
+}
+
+static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
+{
+ ktime_t now;
+
+ if (rt_b->rt_runtime == RUNTIME_INF)
+ return;
+
+ if (hrtimer_active(&rt_b->rt_period_timer))
+ return;
+
+ spin_lock(&rt_b->rt_runtime_lock);
+ for (;;) {
+ if (hrtimer_active(&rt_b->rt_period_timer))
+ break;
+
+ now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
+ hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
+ hrtimer_start(&rt_b->rt_period_timer,
+ rt_b->rt_period_timer.expires,
+ HRTIMER_MODE_ABS);
+ }
+ spin_unlock(&rt_b->rt_runtime_lock);
+}
+
+#ifdef CONFIG_RT_GROUP_SCHED
+static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
+{
+ hrtimer_cancel(&rt_b->rt_period_timer);
+}
+#endif
+
#ifdef CONFIG_GROUP_SCHED
#include <linux/cgroup.h>
@@ -181,29 +268,39 @@ struct task_group {
struct sched_rt_entity **rt_se;
struct rt_rq **rt_rq;
- u64 rt_runtime;
+ struct rt_bandwidth rt_bandwidth;
#endif
struct rcu_head rcu;
struct list_head list;
+
+ struct task_group *parent;
+ struct list_head siblings;
+ struct list_head children;
};
+#ifdef CONFIG_USER_SCHED
+
+/*
+ * Root task group.
+ * Every UID task group (including init_task_group aka UID-0) will
+ * be a child to this group.
+ */
+struct task_group root_task_group;
+
#ifdef CONFIG_FAIR_GROUP_SCHED
/* Default task group's sched entity on each cpu */
static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
/* Default task group's cfs_rq on each cpu */
static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
-
-static struct sched_entity *init_sched_entity_p[NR_CPUS];
-static struct cfs_rq *init_cfs_rq_p[NR_CPUS];
#endif
#ifdef CONFIG_RT_GROUP_SCHED
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
-
-static struct sched_rt_entity *init_sched_rt_entity_p[NR_CPUS];
-static struct rt_rq *init_rt_rq_p[NR_CPUS];
+#endif
+#else
+#define root_task_group init_task_group
#endif
/* task_group_lock serializes add/remove of task groups and also changes to
@@ -221,23 +318,15 @@ static DEFINE_MUTEX(doms_cur_mutex);
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
#endif
+#define MIN_SHARES 2
+
static int init_task_group_load = INIT_TASK_GROUP_LOAD;
#endif
/* Default task group.
* Every task in system belong to this group at bootup.
*/
-struct task_group init_task_group = {
-#ifdef CONFIG_FAIR_GROUP_SCHED
- .se = init_sched_entity_p,
- .cfs_rq = init_cfs_rq_p,
-#endif
-
-#ifdef CONFIG_RT_GROUP_SCHED
- .rt_se = init_sched_rt_entity_p,
- .rt_rq = init_rt_rq_p,
-#endif
-};
+struct task_group init_task_group;
/* return group to which a task belongs */
static inline struct task_group *task_group(struct task_struct *p)
@@ -297,8 +386,12 @@ struct cfs_rq {
struct rb_root tasks_timeline;
struct rb_node *rb_leftmost;
- struct rb_node *rb_load_balance_curr;
- /* 'curr' points to currently running entity on this cfs_rq.
+
+ struct list_head tasks;
+ struct list_head *balance_iterator;
+
+ /*
+ * 'curr' points to currently running entity on this cfs_rq.
* It is set to NULL otherwise (i.e when none are currently running).
*/
struct sched_entity *curr, *next;
@@ -318,6 +411,43 @@ struct cfs_rq {
*/
struct list_head leaf_cfs_rq_list;
struct task_group *tg; /* group that "owns" this runqueue */
+
+#ifdef CONFIG_SMP
+ unsigned long task_weight;
+ unsigned long shares;
+ /*
+ * We need space to build a sched_domain wide view of the full task
+ * group tree, in order to avoid depending on dynamic memory allocation
+ * during the load balancing we place this in the per cpu task group
+ * hierarchy. This limits the load balancing to one instance per cpu,
+ * but more should not be needed anyway.
+ */
+ struct aggregate_struct {
+ /*
+ * load = weight(cpus) * f(tg)
+ *
+ * Where f(tg) is the recursive weight fraction assigned to
+ * this group.
+ */
+ unsigned long load;
+
+ /*
+ * part of the group weight distributed to this span.
+ */
+ unsigned long shares;
+
+ /*
+ * The sum of all runqueue weights within this span.
+ */
+ unsigned long rq_weight;
+
+ /*
+ * Weight contributed by tasks; this is the part we can
+ * influence by moving tasks around.
+ */
+ unsigned long task_weight;
+ } aggregate;
+#endif
#endif
};
@@ -334,6 +464,9 @@ struct rt_rq {
#endif
int rt_throttled;
u64 rt_time;
+ u64 rt_runtime;
+ /* Nests inside the rq lock: */
+ spinlock_t rt_runtime_lock;
#ifdef CONFIG_RT_GROUP_SCHED
unsigned long rt_nr_boosted;
@@ -396,6 +529,7 @@ struct rq {
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
unsigned char idle_at_tick;
#ifdef CONFIG_NO_HZ
+ unsigned long last_tick_seen;
unsigned char in_nohz_recently;
#endif
/* capture load from *all* tasks on this cpu: */
@@ -405,8 +539,6 @@ struct rq {
struct cfs_rq cfs;
struct rt_rq rt;
- u64 rt_period_expire;
- int rt_throttled;
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this cpu: */
@@ -499,6 +631,32 @@ static inline int cpu_of(struct rq *rq)
#endif
}
+#ifdef CONFIG_NO_HZ
+static inline bool nohz_on(int cpu)
+{
+ return tick_get_tick_sched(cpu)->nohz_mode != NOHZ_MODE_INACTIVE;
+}
+
+static inline u64 max_skipped_ticks(struct rq *rq)
+{
+ return nohz_on(cpu_of(rq)) ? jiffies - rq->last_tick_seen + 2 : 1;
+}
+
+static inline void update_last_tick_seen(struct rq *rq)
+{
+ rq->last_tick_seen = jiffies;
+}
+#else
+static inline u64 max_skipped_ticks(struct rq *rq)
+{
+ return 1;
+}
+
+static inline void update_last_tick_seen(struct rq *rq)
+{
+}
+#endif
+
/*
* Update the per-runqueue clock, as finegrained as the platform can give
* us, but without assuming monotonicity, etc.:
@@ -523,9 +681,12 @@ static void __update_rq_clock(struct rq *rq)
/*
* Catch too large forward jumps too:
*/
- if (unlikely(clock + delta > rq->tick_timestamp + TICK_NSEC)) {
- if (clock < rq->tick_timestamp + TICK_NSEC)
- clock = rq->tick_timestamp + TICK_NSEC;
+ u64 max_jump = max_skipped_ticks(rq) * TICK_NSEC;
+ u64 max_time = rq->tick_timestamp + max_jump;
+
+ if (unlikely(clock + delta > max_time)) {
+ if (clock < max_time)
+ clock = max_time;
else
clock++;
rq->clock_overflows++;
@@ -561,23 +722,6 @@ static void update_rq_clock(struct rq *rq)
#define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
-unsigned long rt_needs_cpu(int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
- u64 delta;
-
- if (!rq->rt_throttled)
- return 0;
-
- if (rq->clock > rq->rt_period_expire)
- return 1;
-
- delta = rq->rt_period_expire - rq->clock;
- do_div(delta, NSEC_PER_SEC / HZ);
-
- return (unsigned long)delta;
-}
-
/*
* Tunables that become constants when CONFIG_SCHED_DEBUG is off:
*/
@@ -590,22 +734,137 @@ unsigned long rt_needs_cpu(int cpu)
/*
* Debugging: various feature bits
*/
+
+#define SCHED_FEAT(name, enabled) \
+ __SCHED_FEAT_##name ,
+
enum {
- SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
- SCHED_FEAT_WAKEUP_PREEMPT = 2,
- SCHED_FEAT_START_DEBIT = 4,
- SCHED_FEAT_HRTICK = 8,
- SCHED_FEAT_DOUBLE_TICK = 16,
+#include "sched_features.h"
};
+#undef SCHED_FEAT
+
+#define SCHED_FEAT(name, enabled) \
+ (1UL << __SCHED_FEAT_##name) * enabled |
+
const_debug unsigned int sysctl_sched_features =
- SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
- SCHED_FEAT_WAKEUP_PREEMPT * 1 |
- SCHED_FEAT_START_DEBIT * 1 |
- SCHED_FEAT_HRTICK * 1 |
- SCHED_FEAT_DOUBLE_TICK * 0;
+#include "sched_features.h"
+ 0;
+
+#undef SCHED_FEAT
+
+#ifdef CONFIG_SCHED_DEBUG
+#define SCHED_FEAT(name, enabled) \
+ #name ,
+
+__read_mostly char *sched_feat_names[] = {
+#include "sched_features.h"
+ NULL
+};
+
+#undef SCHED_FEAT
+
+int sched_feat_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+sched_feat_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char *buf;
+ int r = 0;
+ int len = 0;
+ int i;
-#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
+ for (i = 0; sched_feat_names[i]; i++) {
+ len += strlen(sched_feat_names[i]);
+ len += 4;
+ }
+
+ buf = kmalloc(len + 2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; sched_feat_names[i]; i++) {
+ if (sysctl_sched_features & (1UL << i))
+ r += sprintf(buf + r, "%s ", sched_feat_names[i]);
+ else
+ r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]);
+ }
+
+ r += sprintf(buf + r, "\n");
+ WARN_ON(r >= len + 2);
+
+ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+
+ kfree(buf);
+
+ return r;
+}
+
+static ssize_t
+sched_feat_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ char *cmp = buf;
+ int neg = 0;
+ int i;
+
+ if (cnt > 63)
+ cnt = 63;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ if (strncmp(buf, "NO_", 3) == 0) {
+ neg = 1;
+ cmp += 3;
+ }
+
+ for (i = 0; sched_feat_names[i]; i++) {
+ int len = strlen(sched_feat_names[i]);
+
+ if (strncmp(cmp, sched_feat_names[i], len) == 0) {
+ if (neg)
+ sysctl_sched_features &= ~(1UL << i);
+ else
+ sysctl_sched_features |= (1UL << i);
+ break;
+ }
+ }
+
+ if (!sched_feat_names[i])
+ return -EINVAL;
+
+ filp->f_pos += cnt;
+
+ return cnt;
+}
+
+static struct file_operations sched_feat_fops = {
+ .open = sched_feat_open,
+ .read = sched_feat_read,
+ .write = sched_feat_write,
+};
+
+static __init int sched_init_debug(void)
+{
+ debugfs_create_file("sched_features", 0644, NULL, NULL,
+ &sched_feat_fops);
+
+ return 0;
+}
+late_initcall(sched_init_debug);
+
+#endif
+
+#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
/*
* Number of tasks to iterate in a single balance run.
@@ -627,16 +886,52 @@ static __read_mostly int scheduler_running;
*/
int sysctl_sched_rt_runtime = 950000;
-/*
- * single value that denotes runtime == period, ie unlimited time.
- */
-#define RUNTIME_INF ((u64)~0ULL)
+static inline u64 global_rt_period(void)
+{
+ return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
+}
+
+static inline u64 global_rt_runtime(void)
+{
+ if (sysctl_sched_rt_period < 0)
+ return RUNTIME_INF;
+
+ return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
+}
+
+static const unsigned long long time_sync_thresh = 100000;
+
+static DEFINE_PER_CPU(unsigned long long, time_offset);
+static DEFINE_PER_CPU(unsigned long long, prev_cpu_time);
/*
- * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
- * clock constructed from sched_clock():
+ * Global lock which we take every now and then to synchronize
+ * the CPUs time. This method is not warp-safe, but it's good
+ * enough to synchronize slowly diverging time sources and thus
+ * it's good enough for tracing:
*/
-unsigned long long cpu_clock(int cpu)
+static DEFINE_SPINLOCK(time_sync_lock);
+static unsigned long long prev_global_time;
+
+static unsigned long long __sync_cpu_clock(cycles_t time, int cpu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&time_sync_lock, flags);
+
+ if (time < prev_global_time) {
+ per_cpu(time_offset, cpu) += prev_global_time - time;
+ time = prev_global_time;
+ } else {
+ prev_global_time = time;
+ }
+
+ spin_unlock_irqrestore(&time_sync_lock, flags);
+
+ return time;
+}
+
+static unsigned long long __cpu_clock(int cpu)
{
unsigned long long now;
unsigned long flags;
@@ -657,6 +952,24 @@ unsigned long long cpu_clock(int cpu)
return now;
}
+
+/*
+ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
+ * clock constructed from sched_clock():
+ */
+unsigned long long cpu_clock(int cpu)
+{
+ unsigned long long prev_cpu_time, time, delta_time;
+
+ prev_cpu_time = per_cpu(prev_cpu_time, cpu);
+ time = __cpu_clock(cpu) + per_cpu(time_offset, cpu);
+ delta_time = time-prev_cpu_time;
+
+ if (unlikely(delta_time > time_sync_thresh))
+ time = __sync_cpu_clock(time, cpu);
+
+ return time;
+}
EXPORT_SYMBOL_GPL(cpu_clock);
#ifndef prepare_arch_switch
@@ -1116,6 +1429,9 @@ static void __resched_task(struct task_struct *p, int tif_bit)
*/
#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
+/*
+ * delta *= weight / lw
+ */
static unsigned long
calc_delta_mine(unsigned long delta_exec, unsigned long weight,
struct load_weight *lw)
@@ -1138,12 +1454,6 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
}
-static inline unsigned long
-calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
-{
- return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
-}
-
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
lw->weight += inc;
@@ -1241,11 +1551,390 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
#endif
+static inline void inc_cpu_load(struct rq *rq, unsigned long load)
+{
+ update_load_add(&rq->load, load);
+}
+
+static inline void dec_cpu_load(struct rq *rq, unsigned long load)
+{
+ update_load_sub(&rq->load, load);
+}
+
#ifdef CONFIG_SMP
static unsigned long source_load(int cpu, int type);
static unsigned long target_load(int cpu, int type);
static unsigned long cpu_avg_load_per_task(int cpu);
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+
+/*
+ * Group load balancing.
+ *
+ * We calculate a few balance domain wide aggregate numbers; load and weight.
+ * Given the pictures below, and assuming each item has equal weight:
+ *
+ * root 1 - thread
+ * / | \ A - group
+ * A 1 B
+ * /|\ / \
+ * C 2 D 3 4
+ * | |
+ * 5 6
+ *
+ * load:
+ * A and B get 1/3-rd of the total load. C and D get 1/3-rd of A's 1/3-rd,
+ * which equals 1/9-th of the total load.
+ *
+ * shares:
+ * The weight of this group on the selected cpus.
+ *
+ * rq_weight:
+ * Direct sum of all the cpu's their rq weight, e.g. A would get 3 while
+ * B would get 2.
+ *
+ * task_weight:
+ * Part of the rq_weight contributed by tasks; all groups except B would
+ * get 1, B gets 2.
+ */
+
+static inline struct aggregate_struct *
+aggregate(struct task_group *tg, struct sched_domain *sd)
+{
+ return &tg->cfs_rq[sd->first_cpu]->aggregate;
+}
+
+typedef void (*aggregate_func)(struct task_group *, struct sched_domain *);
+
+/*
+ * Iterate the full tree, calling @down when first entering a node and @up when
+ * leaving it for the final time.
+ */
+static
+void aggregate_walk_tree(aggregate_func down, aggregate_func up,
+ struct sched_domain *sd)
+{
+ struct task_group *parent, *child;
+
+ rcu_read_lock();
+ parent = &root_task_group;
+down:
+ (*down)(parent, sd);
+ list_for_each_entry_rcu(child, &parent->children, siblings) {
+ parent = child;
+ goto down;
+
+up:
+ continue;
+ }
+ (*up)(parent, sd);
+
+ child = parent;
+ parent = parent->parent;
+ if (parent)
+ goto up;
+ rcu_read_unlock();
+}
+
+/*
+ * Calculate the aggregate runqueue weight.
+ */
+static
+void aggregate_group_weight(struct task_group *tg, struct sched_domain *sd)
+{
+ unsigned long rq_weight = 0;
+ unsigned long task_weight = 0;
+ int i;
+
+ for_each_cpu_mask(i, sd->span) {
+ rq_weight += tg->cfs_rq[i]->load.weight;
+ task_weight += tg->cfs_rq[i]->task_weight;
+ }
+
+ aggregate(tg, sd)->rq_weight = rq_weight;
+ aggregate(tg, sd)->task_weight = task_weight;
+}
+
+/*
+ * Redistribute tg->shares amongst all tg->cfs_rq[]s.
+ */
+static void __aggregate_redistribute_shares(struct task_group *tg)
+{
+ int i, max_cpu = smp_processor_id();
+ unsigned long rq_weight = 0;
+ unsigned long shares, max_shares = 0, shares_rem = tg->shares;
+
+ for_each_possible_cpu(i)
+ rq_weight += tg->cfs_rq[i]->load.weight;
+
+ for_each_possible_cpu(i) {
+ /*
+ * divide shares proportional to the rq_weights.
+ */
+ shares = tg->shares * tg->cfs_rq[i]->load.weight;
+ shares /= rq_weight + 1;
+
+ tg->cfs_rq[i]->shares = shares;
+
+ if (shares > max_shares) {
+ max_shares = shares;
+ max_cpu = i;
+ }
+ shares_rem -= shares;
+ }
+
+ /*
+ * Ensure it all adds up to tg->shares; we can loose a few
+ * due to rounding down when computing the per-cpu shares.
+ */
+ if (shares_rem)
+ tg->cfs_rq[max_cpu]->shares += shares_rem;
+}
+
+/*
+ * Compute the weight of this group on the given cpus.
+ */
+static
+void aggregate_group_shares(struct task_group *tg, struct sched_domain *sd)
+{
+ unsigned long shares = 0;
+ int i;
+
+again:
+ for_each_cpu_mask(i, sd->span)
+ shares += tg->cfs_rq[i]->shares;
+
+ /*
+ * When the span doesn't have any shares assigned, but does have
+ * tasks to run do a machine wide rebalance (should be rare).
+ */
+ if (unlikely(!shares && aggregate(tg, sd)->rq_weight)) {
+ __aggregate_redistribute_shares(tg);
+ goto again;
+ }
+
+ aggregate(tg, sd)->shares = shares;
+}
+
+/*
+ * Compute the load fraction assigned to this group, relies on the aggregate
+ * weight and this group's parent's load, i.e. top-down.
+ */
+static
+void aggregate_group_load(struct task_group *tg, struct sched_domain *sd)
+{
+ unsigned long load;
+
+ if (!tg->parent) {
+ int i;
+
+ load = 0;
+ for_each_cpu_mask(i, sd->span)
+ load += cpu_rq(i)->load.weight;
+
+ } else {
+ load = aggregate(tg->parent, sd)->load;
+
+ /*
+ * shares is our weight in the parent's rq so
+ * shares/parent->rq_weight gives our fraction of the load
+ */
+ load *= aggregate(tg, sd)->shares;
+ load /= aggregate(tg->parent, sd)->rq_weight + 1;
+ }
+
+ aggregate(tg, sd)->load = load;
+}
+
+static void __set_se_shares(struct sched_entity *se, unsigned long shares);
+
+/*
+ * Calculate and set the cpu's group shares.
+ */
+static void
+__update_group_shares_cpu(struct task_group *tg, struct sched_domain *sd,
+ int tcpu)
+{
+ int boost = 0;
+ unsigned long shares;
+ unsigned long rq_weight;
+
+ if (!tg->se[tcpu])
+ return;
+
+ rq_weight = tg->cfs_rq[tcpu]->load.weight;
+
+ /*
+ * If there are currently no tasks on the cpu pretend there is one of
+ * average load so that when a new task gets to run here it will not
+ * get delayed by group starvation.
+ */
+ if (!rq_weight) {
+ boost = 1;
+ rq_weight = NICE_0_LOAD;
+ }
+
+ /*
+ * \Sum shares * rq_weight
+ * shares = -----------------------
+ * \Sum rq_weight
+ *
+ */
+ shares = aggregate(tg, sd)->shares * rq_weight;
+ shares /= aggregate(tg, sd)->rq_weight + 1;
+
+ /*
+ * record the actual number of shares, not the boosted amount.
+ */
+ tg->cfs_rq[tcpu]->shares = boost ? 0 : shares;
+
+ if (shares < MIN_SHARES)
+ shares = MIN_SHARES;
+
+ __set_se_shares(tg->se[tcpu], shares);
+}
+
+/*
+ * Re-adjust the weights on the cpu the task came from and on the cpu the
+ * task went to.
+ */
+static void
+__move_group_shares(struct task_group *tg, struct sched_domain *sd,
+ int scpu, int dcpu)
+{
+ unsigned long shares;
+
+ shares = tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares;
+
+ __update_group_shares_cpu(tg, sd, scpu);
+ __update_group_shares_cpu(tg, sd, dcpu);
+
+ /*
+ * ensure we never loose shares due to rounding errors in the
+ * above redistribution.
+ */
+ shares -= tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares;
+ if (shares)
+ tg->cfs_rq[dcpu]->shares += shares;
+}
+
+/*
+ * Because changing a group's shares changes the weight of the super-group
+ * we need to walk up the tree and change all shares until we hit the root.
+ */
+static void
+move_group_shares(struct task_group *tg, struct sched_domain *sd,
+ int scpu, int dcpu)
+{
+ while (tg) {
+ __move_group_shares(tg, sd, scpu, dcpu);
+ tg = tg->parent;
+ }
+}
+
+static
+void aggregate_group_set_shares(struct task_group *tg, struct sched_domain *sd)
+{
+ unsigned long shares = aggregate(tg, sd)->shares;
+ int i;
+
+ for_each_cpu_mask(i, sd->span) {
+ struct rq *rq = cpu_rq(i);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rq->lock, flags);
+ __update_group_shares_cpu(tg, sd, i);
+ spin_unlock_irqrestore(&rq->lock, flags);
+ }
+
+ aggregate_group_shares(tg, sd);
+
+ /*
+ * ensure we never loose shares due to rounding errors in the
+ * above redistribution.
+ */
+ shares -= aggregate(tg, sd)->shares;
+ if (shares) {
+ tg->cfs_rq[sd->first_cpu]->shares += shares;
+ aggregate(tg, sd)->shares += shares;
+ }
+}
+
+/*
+ * Calculate the accumulative weight and recursive load of each task group
+ * while walking down the tree.
+ */
+static
+void aggregate_get_down(struct task_group *tg, struct sched_domain *sd)
+{
+ aggregate_group_weight(tg, sd);
+ aggregate_group_shares(tg, sd);
+ aggregate_group_load(tg, sd);
+}
+
+/*
+ * Rebalance the cpu shares while walking back up the tree.
+ */
+static
+void aggregate_get_up(struct task_group *tg, struct sched_domain *sd)
+{
+ aggregate_group_set_shares(tg, sd);
+}
+
+static DEFINE_PER_CPU(spinlock_t, aggregate_lock);
+
+static void __init init_aggregate(void)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ spin_lock_init(&per_cpu(aggregate_lock, i));
+}
+
+static int get_aggregate(struct sched_domain *sd)
+{
+ if (!spin_trylock(&per_cpu(aggregate_lock, sd->first_cpu)))
+ return 0;
+
+ aggregate_walk_tree(aggregate_get_down, aggregate_get_up, sd);
+ return 1;
+}
+
+static void put_aggregate(struct sched_domain *sd)
+{
+ spin_unlock(&per_cpu(aggregate_lock, sd->first_cpu));
+}
+
+static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
+{
+ cfs_rq->shares = shares;
+}
+
+#else
+
+static inline void init_aggregate(void)
+{
+}
+
+static inline int get_aggregate(struct sched_domain *sd)
+{
+ return 0;
+}
+
+static inline void put_aggregate(struct sched_domain *sd)
+{
+}
+#endif
+
+#else /* CONFIG_SMP */
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
+{
+}
+#endif
+
#endif /* CONFIG_SMP */
#include "sched_stats.h"
@@ -1258,26 +1947,14 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
#define sched_class_highest (&rt_sched_class)
-static inline void inc_load(struct rq *rq, const struct task_struct *p)
-{
- update_load_add(&rq->load, p->se.load.weight);
-}
-
-static inline void dec_load(struct rq *rq, const struct task_struct *p)
-{
- update_load_sub(&rq->load, p->se.load.weight);
-}
-
-static void inc_nr_running(struct task_struct *p, struct rq *rq)
+static void inc_nr_running(struct rq *rq)
{
rq->nr_running++;
- inc_load(rq, p);
}
-static void dec_nr_running(struct task_struct *p, struct rq *rq)
+static void dec_nr_running(struct rq *rq)
{
rq->nr_running--;
- dec_load(rq, p);
}
static void set_load_weight(struct task_struct *p)
@@ -1369,7 +2046,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
rq->nr_uninterruptible--;
enqueue_task(rq, p, wakeup);
- inc_nr_running(p, rq);
+ inc_nr_running(rq);
}
/*
@@ -1381,7 +2058,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
rq->nr_uninterruptible++;
dequeue_task(rq, p, sleep);
- dec_nr_running(p, rq);
+ dec_nr_running(rq);
}
/**
@@ -1438,7 +2115,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
/*
* Buddy candidates are cache hot:
*/
- if (&p->se == cfs_rq_of(&p->se)->next)
+ if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next))
return 1;
if (p->sched_class != &fair_sched_class)
@@ -1728,17 +2405,17 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
* find_idlest_cpu - find the idlest cpu among the cpus in group.
*/
static int
-find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
+find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
+ cpumask_t *tmp)
{
- cpumask_t tmp;
unsigned long load, min_load = ULONG_MAX;
int idlest = -1;
int i;
/* Traverse only the allowed CPUs */
- cpus_and(tmp, group->cpumask, p->cpus_allowed);
+ cpus_and(*tmp, group->cpumask, p->cpus_allowed);
- for_each_cpu_mask(i, tmp) {
+ for_each_cpu_mask(i, *tmp) {
load = weighted_cpuload(i);
if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -1777,7 +2454,7 @@ static int sched_balance_self(int cpu, int flag)
}
while (sd) {
- cpumask_t span;
+ cpumask_t span, tmpmask;
struct sched_group *group;
int new_cpu, weight;
@@ -1793,7 +2470,7 @@ static int sched_balance_self(int cpu, int flag)
continue;
}
- new_cpu = find_idlest_cpu(group, t, cpu);
+ new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask);
if (new_cpu == -1 || new_cpu == cpu) {
/* Now try balancing at a lower domain level of cpu */
sd = sd->child;
@@ -1839,6 +2516,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
long old_state;
struct rq *rq;
+ if (!sched_feat(SYNC_WAKEUPS))
+ sync = 0;
+
smp_wmb();
rq = task_rq_lock(p, &flags);
old_state = p->state;
@@ -1955,6 +2635,7 @@ static void __sched_fork(struct task_struct *p)
INIT_LIST_HEAD(&p->rt.run_list);
p->se.on_rq = 0;
+ INIT_LIST_HEAD(&p->se.group_node);
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&p->preempt_notifiers);
@@ -2030,7 +2711,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
* management (if any):
*/
p->sched_class->task_new(rq, p);
- inc_nr_running(p, rq);
+ inc_nr_running(rq);
}
check_preempt_curr(rq, p);
#ifdef CONFIG_SMP
@@ -2674,7 +3355,7 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
static struct sched_group *
find_busiest_group(struct sched_domain *sd, int this_cpu,
unsigned long *imbalance, enum cpu_idle_type idle,
- int *sd_idle, cpumask_t *cpus, int *balance)
+ int *sd_idle, const cpumask_t *cpus, int *balance)
{
struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
unsigned long max_load, avg_load, total_load, this_load, total_pwr;
@@ -2975,7 +3656,7 @@ ret:
*/
static struct rq *
find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
- unsigned long imbalance, cpumask_t *cpus)
+ unsigned long imbalance, const cpumask_t *cpus)
{
struct rq *busiest = NULL, *rq;
unsigned long max_load = 0;
@@ -3014,14 +3695,18 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
*/
static int load_balance(int this_cpu, struct rq *this_rq,
struct sched_domain *sd, enum cpu_idle_type idle,
- int *balance)
+ int *balance, cpumask_t *cpus)
{
int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
struct sched_group *group;
unsigned long imbalance;
struct rq *busiest;
- cpumask_t cpus = CPU_MASK_ALL;
unsigned long flags;
+ int unlock_aggregate;
+
+ cpus_setall(*cpus);
+
+ unlock_aggregate = get_aggregate(sd);
/*
* When power savings policy is enabled for the parent domain, idle
@@ -3037,7 +3722,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
redo:
group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
- &cpus, balance);
+ cpus, balance);
if (*balance == 0)
goto out_balanced;
@@ -3047,7 +3732,7 @@ redo:
goto out_balanced;
}
- busiest = find_busiest_queue(group, idle, imbalance, &cpus);
+ busiest = find_busiest_queue(group, idle, imbalance, cpus);
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[idle]);
goto out_balanced;
@@ -3080,8 +3765,8 @@ redo:
/* All tasks on this runqueue were pinned by CPU affinity */
if (unlikely(all_pinned)) {
- cpu_clear(cpu_of(busiest), cpus);
- if (!cpus_empty(cpus))
+ cpu_clear(cpu_of(busiest), *cpus);
+ if (!cpus_empty(*cpus))
goto redo;
goto out_balanced;
}
@@ -3138,8 +3823,9 @@ redo:
if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
- return -1;
- return ld_moved;
+ ld_moved = -1;
+
+ goto out;
out_balanced:
schedstat_inc(sd, lb_balanced[idle]);
@@ -3154,8 +3840,13 @@ out_one_pinned:
if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
- return -1;
- return 0;
+ ld_moved = -1;
+ else
+ ld_moved = 0;
+out:
+ if (unlock_aggregate)
+ put_aggregate(sd);
+ return ld_moved;
}
/*
@@ -3166,7 +3857,8 @@ out_one_pinned:
* this_rq is locked.
*/
static int
-load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
+load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
+ cpumask_t *cpus)
{
struct sched_group *group;
struct rq *busiest = NULL;
@@ -3174,7 +3866,8 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
int ld_moved = 0;
int sd_idle = 0;
int all_pinned = 0;
- cpumask_t cpus = CPU_MASK_ALL;
+
+ cpus_setall(*cpus);
/*
* When power savings policy is enabled for the parent domain, idle
@@ -3189,14 +3882,13 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
redo:
group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
- &sd_idle, &cpus, NULL);
+ &sd_idle, cpus, NULL);
if (!group) {
schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
goto out_balanced;
}
- busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance,
- &cpus);
+ busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance, cpus);
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
goto out_balanced;
@@ -3218,8 +3910,8 @@ redo:
spin_unlock(&busiest->lock);
if (unlikely(all_pinned)) {
- cpu_clear(cpu_of(busiest), cpus);
- if (!cpus_empty(cpus))
+ cpu_clear(cpu_of(busiest), *cpus);
+ if (!cpus_empty(*cpus))
goto redo;
}
}
@@ -3253,6 +3945,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
struct sched_domain *sd;
int pulled_task = -1;
unsigned long next_balance = jiffies + HZ;
+ cpumask_t tmpmask;
for_each_domain(this_cpu, sd) {
unsigned long interval;
@@ -3262,8 +3955,8 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
if (sd->flags & SD_BALANCE_NEWIDLE)
/* If we've pulled tasks over stop searching: */
- pulled_task = load_balance_newidle(this_cpu,
- this_rq, sd);
+ pulled_task = load_balance_newidle(this_cpu, this_rq,
+ sd, &tmpmask);
interval = msecs_to_jiffies(sd->balance_interval);
if (time_after(next_balance, sd->last_balance + interval))
@@ -3422,6 +4115,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
/* Earliest time when we have to do rebalance again */
unsigned long next_balance = jiffies + 60*HZ;
int update_next_balance = 0;
+ cpumask_t tmp;
for_each_domain(cpu, sd) {
if (!(sd->flags & SD_LOAD_BALANCE))
@@ -3445,7 +4139,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
}
if (time_after_eq(jiffies, sd->last_balance + interval)) {
- if (load_balance(cpu, rq, sd, idle, &balance)) {
+ if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) {
/*
* We've pulled tasks over so either we're no
* longer idle, or one of our SMT siblings is
@@ -3561,7 +4255,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu)
*/
int ilb = first_cpu(nohz.cpu_mask);
- if (ilb != NR_CPUS)
+ if (ilb < nr_cpu_ids)
resched_cpu(ilb);
}
}
@@ -3765,9 +4459,9 @@ void scheduler_tick(void)
rq->clock_underflows++;
}
rq->tick_timestamp = rq->clock;
+ update_last_tick_seen(rq);
update_cpu_load(rq);
curr->sched_class->task_tick(rq, curr, 0);
- update_sched_rt_period(rq);
spin_unlock(&rq->lock);
#ifdef CONFIG_SMP
@@ -4367,10 +5061,8 @@ void set_user_nice(struct task_struct *p, long nice)
goto out_unlock;
}
on_rq = p->se.on_rq;
- if (on_rq) {
+ if (on_rq)
dequeue_task(rq, p, 0);
- dec_load(rq, p);
- }
p->static_prio = NICE_TO_PRIO(nice);
set_load_weight(p);
@@ -4380,7 +5072,6 @@ void set_user_nice(struct task_struct *p, long nice)
if (on_rq) {
enqueue_task(rq, p, 0);
- inc_load(rq, p);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
@@ -4602,7 +5293,7 @@ recheck:
* Do not allow realtime tasks into groups that have no runtime
* assigned.
*/
- if (rt_policy(policy) && task_group(p)->rt_runtime == 0)
+ if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0)
return -EPERM;
#endif
@@ -4764,9 +5455,10 @@ out_unlock:
return retval;
}
-long sched_setaffinity(pid_t pid, cpumask_t new_mask)
+long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
{
cpumask_t cpus_allowed;
+ cpumask_t new_mask = *in_mask;
struct task_struct *p;
int retval;
@@ -4797,13 +5489,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
if (retval)
goto out_unlock;
- cpus_allowed = cpuset_cpus_allowed(p);
+ cpuset_cpus_allowed(p, &cpus_allowed);
cpus_and(new_mask, new_mask, cpus_allowed);
again:
- retval = set_cpus_allowed(p, new_mask);
+ retval = set_cpus_allowed_ptr(p, &new_mask);
if (!retval) {
- cpus_allowed = cpuset_cpus_allowed(p);
+ cpuset_cpus_allowed(p, &cpus_allowed);
if (!cpus_subset(new_mask, cpus_allowed)) {
/*
* We must have raced with a concurrent cpuset
@@ -4847,7 +5539,7 @@ asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
if (retval)
return retval;
- return sched_setaffinity(pid, new_mask);
+ return sched_setaffinity(pid, &new_mask);
}
/*
@@ -5309,7 +6001,6 @@ static inline void sched_init_granularity(void)
sysctl_sched_latency = limit;
sysctl_sched_wakeup_granularity *= factor;
- sysctl_sched_batch_wakeup_granularity *= factor;
}
#ifdef CONFIG_SMP
@@ -5338,7 +6029,7 @@ static inline void sched_init_granularity(void)
* task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
-int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
{
struct migration_req req;
unsigned long flags;
@@ -5346,23 +6037,23 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
int ret = 0;
rq = task_rq_lock(p, &flags);
- if (!cpus_intersects(new_mask, cpu_online_map)) {
+ if (!cpus_intersects(*new_mask, cpu_online_map)) {
ret = -EINVAL;
goto out;
}
if (p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, &new_mask);
+ p->sched_class->set_cpus_allowed(p, new_mask);
else {
- p->cpus_allowed = new_mask;
- p->rt.nr_cpus_allowed = cpus_weight(new_mask);
+ p->cpus_allowed = *new_mask;
+ p->rt.nr_cpus_allowed = cpus_weight(*new_mask);
}
/* Can the task run on the task's current CPU? If so, we're done */
- if (cpu_isset(task_cpu(p), new_mask))
+ if (cpu_isset(task_cpu(p), *new_mask))
goto out;
- if (migrate_task(p, any_online_cpu(new_mask), &req)) {
+ if (migrate_task(p, any_online_cpu(*new_mask), &req)) {
/* Need help from migration thread: drop lock and wait. */
task_rq_unlock(rq, &flags);
wake_up_process(rq->migration_thread);
@@ -5375,7 +6066,7 @@ out:
return ret;
}
-EXPORT_SYMBOL_GPL(set_cpus_allowed);
+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
/*
* Move (not current) task off this cpu, onto dest cpu. We're doing
@@ -5513,12 +6204,14 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
dest_cpu = any_online_cpu(mask);
/* On any allowed CPU? */
- if (dest_cpu == NR_CPUS)
+ if (dest_cpu >= nr_cpu_ids)
dest_cpu = any_online_cpu(p->cpus_allowed);
/* No more Mr. Nice Guy. */
- if (dest_cpu == NR_CPUS) {
- cpumask_t cpus_allowed = cpuset_cpus_allowed_locked(p);
+ if (dest_cpu >= nr_cpu_ids) {
+ cpumask_t cpus_allowed;
+
+ cpuset_cpus_allowed_locked(p, &cpus_allowed);
/*
* Try to stay on the same cpuset, where the
* current cpuset may be a subset of all cpus.
@@ -5554,7 +6247,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
*/
static void migrate_nr_uninterruptible(struct rq *rq_src)
{
- struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
+ struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR));
unsigned long flags;
local_irq_save(flags);
@@ -5966,20 +6659,16 @@ void __init migration_init(void)
#ifdef CONFIG_SMP
-/* Number of possible processor ids */
-int nr_cpu_ids __read_mostly = NR_CPUS;
-EXPORT_SYMBOL(nr_cpu_ids);
-
#ifdef CONFIG_SCHED_DEBUG
-static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level)
+static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
+ cpumask_t *groupmask)
{
struct sched_group *group = sd->groups;
- cpumask_t groupmask;
- char str[NR_CPUS];
+ char str[256];
- cpumask_scnprintf(str, NR_CPUS, sd->span);
- cpus_clear(groupmask);
+ cpulist_scnprintf(str, sizeof(str), sd->span);
+ cpus_clear(*groupmask);
printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
@@ -6023,25 +6712,25 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level)
break;
}
- if (cpus_intersects(groupmask, group->cpumask)) {
+ if (cpus_intersects(*groupmask, group->cpumask)) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: repeated CPUs\n");
break;
}
- cpus_or(groupmask, groupmask, group->cpumask);
+ cpus_or(*groupmask, *groupmask, group->cpumask);
- cpumask_scnprintf(str, NR_CPUS, group->cpumask);
+ cpulist_scnprintf(str, sizeof(str), group->cpumask);
printk(KERN_CONT " %s", str);
group = group->next;
} while (group != sd->groups);
printk(KERN_CONT "\n");
- if (!cpus_equal(sd->span, groupmask))
+ if (!cpus_equal(sd->span, *groupmask))
printk(KERN_ERR "ERROR: groups don't span domain->span\n");
- if (sd->parent && !cpus_subset(groupmask, sd->parent->span))
+ if (sd->parent && !cpus_subset(*groupmask, sd->parent->span))
printk(KERN_ERR "ERROR: parent span is not a superset "
"of domain->span\n");
return 0;
@@ -6049,6 +6738,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level)
static void sched_domain_debug(struct sched_domain *sd, int cpu)
{
+ cpumask_t *groupmask;
int level = 0;
if (!sd) {
@@ -6058,14 +6748,21 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
+ groupmask = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
+ if (!groupmask) {
+ printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
+ return;
+ }
+
for (;;) {
- if (sched_domain_debug_one(sd, cpu, level))
+ if (sched_domain_debug_one(sd, cpu, level, groupmask))
break;
level++;
sd = sd->parent;
if (!sd)
break;
}
+ kfree(groupmask);
}
#else
# define sched_domain_debug(sd, cpu) do { } while (0)
@@ -6253,30 +6950,33 @@ __setup("isolcpus=", isolated_cpu_setup);
* and ->cpu_power to 0.
*/
static void
-init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
+init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
int (*group_fn)(int cpu, const cpumask_t *cpu_map,
- struct sched_group **sg))
+ struct sched_group **sg,
+ cpumask_t *tmpmask),
+ cpumask_t *covered, cpumask_t *tmpmask)
{
struct sched_group *first = NULL, *last = NULL;
- cpumask_t covered = CPU_MASK_NONE;
int i;
- for_each_cpu_mask(i, span) {
+ cpus_clear(*covered);
+
+ for_each_cpu_mask(i, *span) {
struct sched_group *sg;
- int group = group_fn(i, cpu_map, &sg);
+ int group = group_fn(i, cpu_map, &sg, tmpmask);
int j;
- if (cpu_isset(i, covered))
+ if (cpu_isset(i, *covered))
continue;
- sg->cpumask = CPU_MASK_NONE;
+ cpus_clear(sg->cpumask);
sg->__cpu_power = 0;
- for_each_cpu_mask(j, span) {
- if (group_fn(j, cpu_map, NULL) != group)
+ for_each_cpu_mask(j, *span) {
+ if (group_fn(j, cpu_map, NULL, tmpmask) != group)
continue;
- cpu_set(j, covered);
+ cpu_set(j, *covered);
cpu_set(j, sg->cpumask);
}
if (!first)
@@ -6302,7 +7002,7 @@ init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
*
* Should use nodemask_t.
*/
-static int find_next_best_node(int node, unsigned long *used_nodes)
+static int find_next_best_node(int node, nodemask_t *used_nodes)
{
int i, n, val, min_val, best_node = 0;
@@ -6316,7 +7016,7 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
continue;
/* Skip already used nodes */
- if (test_bit(n, used_nodes))
+ if (node_isset(n, *used_nodes))
continue;
/* Simple min distance search */
@@ -6328,40 +7028,36 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
}
}
- set_bit(best_node, used_nodes);
+ node_set(best_node, *used_nodes);
return best_node;
}
/**
* sched_domain_node_span - get a cpumask for a node's sched_domain
* @node: node whose cpumask we're constructing
- * @size: number of nodes to include in this span
*
* Given a node, construct a good cpumask for its sched_domain to span. It
* should be one that prevents unnecessary balancing, but also spreads tasks
* out optimally.
*/
-static cpumask_t sched_domain_node_span(int node)
+static void sched_domain_node_span(int node, cpumask_t *span)
{
- DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
- cpumask_t span, nodemask;
+ nodemask_t used_nodes;
+ node_to_cpumask_ptr(nodemask, node);
int i;
- cpus_clear(span);
- bitmap_zero(used_nodes, MAX_NUMNODES);
+ cpus_clear(*span);
+ nodes_clear(used_nodes);
- nodemask = node_to_cpumask(node);
- cpus_or(span, span, nodemask);
- set_bit(node, used_nodes);
+ cpus_or(*span, *span, *nodemask);
+ node_set(node, used_nodes);
for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
- int next_node = find_next_best_node(node, used_nodes);
+ int next_node = find_next_best_node(node, &used_nodes);
- nodemask = node_to_cpumask(next_node);
- cpus_or(span, span, nodemask);
+ node_to_cpumask_ptr_next(nodemask, next_node);
+ cpus_or(*span, *span, *nodemask);
}
-
- return span;
}
#endif
@@ -6375,7 +7071,8 @@ static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
static int
-cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
+cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
+ cpumask_t *unused)
{
if (sg)
*sg = &per_cpu(sched_group_cpus, cpu);
@@ -6393,19 +7090,22 @@ static DEFINE_PER_CPU(struct sched_group, sched_group_core);
#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
static int
-cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
+cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
+ cpumask_t *mask)
{
int group;
- cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
- cpus_and(mask, mask, *cpu_map);
- group = first_cpu(mask);
+
+ *mask = per_cpu(cpu_sibling_map, cpu);
+ cpus_and(*mask, *mask, *cpu_map);
+ group = first_cpu(*mask);
if (sg)
*sg = &per_cpu(sched_group_core, group);
return group;
}
#elif defined(CONFIG_SCHED_MC)
static int
-cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
+cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
+ cpumask_t *unused)
{
if (sg)
*sg = &per_cpu(sched_group_core, cpu);
@@ -6417,17 +7117,18 @@ static DEFINE_PER_CPU(struct sched_domain, phys_domains);
static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
static int
-cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
+cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
+ cpumask_t *mask)
{
int group;
#ifdef CONFIG_SCHED_MC
- cpumask_t mask = cpu_coregroup_map(cpu);
- cpus_and(mask, mask, *cpu_map);
- group = first_cpu(mask);
+ *mask = cpu_coregroup_map(cpu);
+ cpus_and(*mask, *mask, *cpu_map);
+ group = first_cpu(*mask);
#elif defined(CONFIG_SCHED_SMT)
- cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
- cpus_and(mask, mask, *cpu_map);
- group = first_cpu(mask);
+ *mask = per_cpu(cpu_sibling_map, cpu);
+ cpus_and(*mask, *mask, *cpu_map);
+ group = first_cpu(*mask);
#else
group = cpu;
#endif
@@ -6443,19 +7144,19 @@ cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
* gets dynamically allocated.
*/
static DEFINE_PER_CPU(struct sched_domain, node_domains);
-static struct sched_group **sched_group_nodes_bycpu[NR_CPUS];
+static struct sched_group ***sched_group_nodes_bycpu;
static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes);
static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
- struct sched_group **sg)
+ struct sched_group **sg, cpumask_t *nodemask)
{
- cpumask_t nodemask = node_to_cpumask(cpu_to_node(cpu));
int group;
- cpus_and(nodemask, nodemask, *cpu_map);
- group = first_cpu(nodemask);
+ *nodemask = node_to_cpumask(cpu_to_node(cpu));
+ cpus_and(*nodemask, *nodemask, *cpu_map);
+ group = first_cpu(*nodemask);
if (sg)
*sg = &per_cpu(sched_group_allnodes, group);
@@ -6491,7 +7192,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
#ifdef CONFIG_NUMA
/* Free memory allocated for various sched_group structures */
-static void free_sched_groups(const cpumask_t *cpu_map)
+static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
{
int cpu, i;
@@ -6503,11 +7204,11 @@ static void free_sched_groups(const cpumask_t *cpu_map)
continue;
for (i = 0; i < MAX_NUMNODES; i++) {
- cpumask_t nodemask = node_to_cpumask(i);
struct sched_group *oldsg, *sg = sched_group_nodes[i];
- cpus_and(nodemask, nodemask, *cpu_map);
- if (cpus_empty(nodemask))
+ *nodemask = node_to_cpumask(i);
+ cpus_and(*nodemask, *nodemask, *cpu_map);
+ if (cpus_empty(*nodemask))
continue;
if (sg == NULL)
@@ -6525,7 +7226,7 @@ next_sg:
}
}
#else
-static void free_sched_groups(const cpumask_t *cpu_map)
+static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
{
}
#endif
@@ -6583,13 +7284,106 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
}
/*
+ * Initializers for schedule domains
+ * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
+ */
+
+#define SD_INIT(sd, type) sd_init_##type(sd)
+#define SD_INIT_FUNC(type) \
+static noinline void sd_init_##type(struct sched_domain *sd) \
+{ \
+ memset(sd, 0, sizeof(*sd)); \
+ *sd = SD_##type##_INIT; \
+ sd->level = SD_LV_##type; \
+}
+
+SD_INIT_FUNC(CPU)
+#ifdef CONFIG_NUMA
+ SD_INIT_FUNC(ALLNODES)
+ SD_INIT_FUNC(NODE)
+#endif
+#ifdef CONFIG_SCHED_SMT
+ SD_INIT_FUNC(SIBLING)
+#endif
+#ifdef CONFIG_SCHED_MC
+ SD_INIT_FUNC(MC)
+#endif
+
+/*
+ * To minimize stack usage kmalloc room for cpumasks and share the
+ * space as the usage in build_sched_domains() dictates. Used only
+ * if the amount of space is significant.
+ */
+struct allmasks {
+ cpumask_t tmpmask; /* make this one first */
+ union {
+ cpumask_t nodemask;
+ cpumask_t this_sibling_map;
+ cpumask_t this_core_map;
+ };
+ cpumask_t send_covered;
+
+#ifdef CONFIG_NUMA
+ cpumask_t domainspan;
+ cpumask_t covered;
+ cpumask_t notcovered;
+#endif
+};
+
+#if NR_CPUS > 128
+#define SCHED_CPUMASK_ALLOC 1
+#define SCHED_CPUMASK_FREE(v) kfree(v)
+#define SCHED_CPUMASK_DECLARE(v) struct allmasks *v
+#else
+#define SCHED_CPUMASK_ALLOC 0
+#define SCHED_CPUMASK_FREE(v)
+#define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v
+#endif
+
+#define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \
+ ((unsigned long)(a) + offsetof(struct allmasks, v))
+
+static int default_relax_domain_level = -1;
+
+static int __init setup_relax_domain_level(char *str)
+{
+ default_relax_domain_level = simple_strtoul(str, NULL, 0);
+ return 1;
+}
+__setup("relax_domain_level=", setup_relax_domain_level);
+
+static void set_domain_attribute(struct sched_domain *sd,
+ struct sched_domain_attr *attr)
+{
+ int request;
+
+ if (!attr || attr->relax_domain_level < 0) {
+ if (default_relax_domain_level < 0)
+ return;
+ else
+ request = default_relax_domain_level;
+ } else
+ request = attr->relax_domain_level;
+ if (request < sd->level) {
+ /* turn off idle balance on this domain */
+ sd->flags &= ~(SD_WAKE_IDLE|SD_BALANCE_NEWIDLE);
+ } else {
+ /* turn on idle balance on this domain */
+ sd->flags |= (SD_WAKE_IDLE_FAR|SD_BALANCE_NEWIDLE);
+ }
+}
+
+/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
*/
-static int build_sched_domains(const cpumask_t *cpu_map)
+static int __build_sched_domains(const cpumask_t *cpu_map,
+ struct sched_domain_attr *attr)
{
int i;
struct root_domain *rd;
+ SCHED_CPUMASK_DECLARE(allmasks);
+ cpumask_t *tmpmask;
#ifdef CONFIG_NUMA
struct sched_group **sched_group_nodes = NULL;
int sd_allnodes = 0;
@@ -6603,39 +7397,65 @@ static int build_sched_domains(const cpumask_t *cpu_map)
printk(KERN_WARNING "Can not alloc sched group node list\n");
return -ENOMEM;
}
- sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
#endif
rd = alloc_rootdomain();
if (!rd) {
printk(KERN_WARNING "Cannot alloc root domain\n");
+#ifdef CONFIG_NUMA
+ kfree(sched_group_nodes);
+#endif
return -ENOMEM;
}
+#if SCHED_CPUMASK_ALLOC
+ /* get space for all scratch cpumask variables */
+ allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL);
+ if (!allmasks) {
+ printk(KERN_WARNING "Cannot alloc cpumask array\n");
+ kfree(rd);
+#ifdef CONFIG_NUMA
+ kfree(sched_group_nodes);
+#endif
+ return -ENOMEM;
+ }
+#endif
+ tmpmask = (cpumask_t *)allmasks;
+
+
+#ifdef CONFIG_NUMA
+ sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
+#endif
+
/*
* Set up domains for cpus specified by the cpu_map.
*/
for_each_cpu_mask(i, *cpu_map) {
struct sched_domain *sd = NULL, *p;
- cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
+ SCHED_CPUMASK_VAR(nodemask, allmasks);
- cpus_and(nodemask, nodemask, *cpu_map);
+ *nodemask = node_to_cpumask(cpu_to_node(i));
+ cpus_and(*nodemask, *nodemask, *cpu_map);
#ifdef CONFIG_NUMA
if (cpus_weight(*cpu_map) >
- SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
+ SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) {
sd = &per_cpu(allnodes_domains, i);
- *sd = SD_ALLNODES_INIT;
+ SD_INIT(sd, ALLNODES);
+ set_domain_attribute(sd, attr);
sd->span = *cpu_map;
- cpu_to_allnodes_group(i, cpu_map, &sd->groups);
+ sd->first_cpu = first_cpu(sd->span);
+ cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
p = sd;
sd_allnodes = 1;
} else
p = NULL;
sd = &per_cpu(node_domains, i);
- *sd = SD_NODE_INIT;
- sd->span = sched_domain_node_span(cpu_to_node(i));
+ SD_INIT(sd, NODE);
+ set_domain_attribute(sd, attr);
+ sched_domain_node_span(cpu_to_node(i), &sd->span);
+ sd->first_cpu = first_cpu(sd->span);
sd->parent = p;
if (p)
p->child = sd;
@@ -6644,94 +7464,120 @@ static int build_sched_domains(const cpumask_t *cpu_map)
p = sd;
sd = &per_cpu(phys_domains, i);
- *sd = SD_CPU_INIT;
- sd->span = nodemask;
+ SD_INIT(sd, CPU);
+ set_domain_attribute(sd, attr);
+ sd->span = *nodemask;
+ sd->first_cpu = first_cpu(sd->span);
sd->parent = p;
if (p)
p->child = sd;
- cpu_to_phys_group(i, cpu_map, &sd->groups);
+ cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask);
#ifdef CONFIG_SCHED_MC
p = sd;
sd = &per_cpu(core_domains, i);
- *sd = SD_MC_INIT;
+ SD_INIT(sd, MC);
+ set_domain_attribute(sd, attr);
sd->span = cpu_coregroup_map(i);
+ sd->first_cpu = first_cpu(sd->span);
cpus_and(sd->span, sd->span, *cpu_map);
sd->parent = p;
p->child = sd;
- cpu_to_core_group(i, cpu_map, &sd->groups);
+ cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
#endif
#ifdef CONFIG_SCHED_SMT
p = sd;
sd = &per_cpu(cpu_domains, i);
- *sd = SD_SIBLING_INIT;
+ SD_INIT(sd, SIBLING);
+ set_domain_attribute(sd, attr);
sd->span = per_cpu(cpu_sibling_map, i);
+ sd->first_cpu = first_cpu(sd->span);
cpus_and(sd->span, sd->span, *cpu_map);
sd->parent = p;
p->child = sd;
- cpu_to_cpu_group(i, cpu_map, &sd->groups);
+ cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
#endif
}
#ifdef CONFIG_SCHED_SMT
/* Set up CPU (sibling) groups */
for_each_cpu_mask(i, *cpu_map) {
- cpumask_t this_sibling_map = per_cpu(cpu_sibling_map, i);
- cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
- if (i != first_cpu(this_sibling_map))
+ SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
+ SCHED_CPUMASK_VAR(send_covered, allmasks);
+
+ *this_sibling_map = per_cpu(cpu_sibling_map, i);
+ cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map);
+ if (i != first_cpu(*this_sibling_map))
continue;
init_sched_build_groups(this_sibling_map, cpu_map,
- &cpu_to_cpu_group);
+ &cpu_to_cpu_group,
+ send_covered, tmpmask);
}
#endif
#ifdef CONFIG_SCHED_MC
/* Set up multi-core groups */
for_each_cpu_mask(i, *cpu_map) {
- cpumask_t this_core_map = cpu_coregroup_map(i);
- cpus_and(this_core_map, this_core_map, *cpu_map);
- if (i != first_cpu(this_core_map))
+ SCHED_CPUMASK_VAR(this_core_map, allmasks);
+ SCHED_CPUMASK_VAR(send_covered, allmasks);
+
+ *this_core_map = cpu_coregroup_map(i);
+ cpus_and(*this_core_map, *this_core_map, *cpu_map);
+ if (i != first_cpu(*this_core_map))
continue;
+
init_sched_build_groups(this_core_map, cpu_map,
- &cpu_to_core_group);
+ &cpu_to_core_group,
+ send_covered, tmpmask);
}
#endif
/* Set up physical groups */
for (i = 0; i < MAX_NUMNODES; i++) {
- cpumask_t nodemask = node_to_cpumask(i);
+ SCHED_CPUMASK_VAR(nodemask, allmasks);
+ SCHED_CPUMASK_VAR(send_covered, allmasks);
- cpus_and(nodemask, nodemask, *cpu_map);
- if (cpus_empty(nodemask))
+ *nodemask = node_to_cpumask(i);
+ cpus_and(*nodemask, *nodemask, *cpu_map);
+ if (cpus_empty(*nodemask))
continue;
- init_sched_build_groups(nodemask, cpu_map, &cpu_to_phys_group);
+ init_sched_build_groups(nodemask, cpu_map,
+ &cpu_to_phys_group,
+ send_covered, tmpmask);
}
#ifdef CONFIG_NUMA
/* Set up node groups */
- if (sd_allnodes)
- init_sched_build_groups(*cpu_map, cpu_map,
- &cpu_to_allnodes_group);
+ if (sd_allnodes) {
+ SCHED_CPUMASK_VAR(send_covered, allmasks);
+
+ init_sched_build_groups(cpu_map, cpu_map,
+ &cpu_to_allnodes_group,
+ send_covered, tmpmask);
+ }
for (i = 0; i < MAX_NUMNODES; i++) {
/* Set up node groups */
struct sched_group *sg, *prev;
- cpumask_t nodemask = node_to_cpumask(i);
- cpumask_t domainspan;
- cpumask_t covered = CPU_MASK_NONE;
+ SCHED_CPUMASK_VAR(nodemask, allmasks);
+ SCHED_CPUMASK_VAR(domainspan, allmasks);
+ SCHED_CPUMASK_VAR(covered, allmasks);
int j;
- cpus_and(nodemask, nodemask, *cpu_map);
- if (cpus_empty(nodemask)) {
+ *nodemask = node_to_cpumask(i);
+ cpus_clear(*covered);
+
+ cpus_and(*nodemask, *nodemask, *cpu_map);
+ if (cpus_empty(*nodemask)) {
sched_group_nodes[i] = NULL;
continue;
}
- domainspan = sched_domain_node_span(i);
- cpus_and(domainspan, domainspan, *cpu_map);
+ sched_domain_node_span(i, domainspan);
+ cpus_and(*domainspan, *domainspan, *cpu_map);
sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
if (!sg) {
@@ -6740,31 +7586,31 @@ static int build_sched_domains(const cpumask_t *cpu_map)
goto error;
}
sched_group_nodes[i] = sg;
- for_each_cpu_mask(j, nodemask) {
+ for_each_cpu_mask(j, *nodemask) {
struct sched_domain *sd;
sd = &per_cpu(node_domains, j);
sd->groups = sg;
}
sg->__cpu_power = 0;
- sg->cpumask = nodemask;
+ sg->cpumask = *nodemask;
sg->next = sg;
- cpus_or(covered, covered, nodemask);
+ cpus_or(*covered, *covered, *nodemask);
prev = sg;
for (j = 0; j < MAX_NUMNODES; j++) {
- cpumask_t tmp, notcovered;
+ SCHED_CPUMASK_VAR(notcovered, allmasks);
int n = (i + j) % MAX_NUMNODES;
+ node_to_cpumask_ptr(pnodemask, n);
- cpus_complement(notcovered, covered);
- cpus_and(tmp, notcovered, *cpu_map);
- cpus_and(tmp, tmp, domainspan);
- if (cpus_empty(tmp))
+ cpus_complement(*notcovered, *covered);
+ cpus_and(*tmpmask, *notcovered, *cpu_map);
+ cpus_and(*tmpmask, *tmpmask, *domainspan);
+ if (cpus_empty(*tmpmask))
break;
- nodemask = node_to_cpumask(n);
- cpus_and(tmp, tmp, nodemask);
- if (cpus_empty(tmp))
+ cpus_and(*tmpmask, *tmpmask, *pnodemask);
+ if (cpus_empty(*tmpmask))
continue;
sg = kmalloc_node(sizeof(struct sched_group),
@@ -6775,9 +7621,9 @@ static int build_sched_domains(const cpumask_t *cpu_map)
goto error;
}
sg->__cpu_power = 0;
- sg->cpumask = tmp;
+ sg->cpumask = *tmpmask;
sg->next = prev->next;
- cpus_or(covered, covered, tmp);
+ cpus_or(*covered, *covered, *tmpmask);
prev->next = sg;
prev = sg;
}
@@ -6813,7 +7659,8 @@ static int build_sched_domains(const cpumask_t *cpu_map)
if (sd_allnodes) {
struct sched_group *sg;
- cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg);
+ cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg,
+ tmpmask);
init_numa_sched_groups_power(sg);
}
#endif
@@ -6831,17 +7678,26 @@ static int build_sched_domains(const cpumask_t *cpu_map)
cpu_attach_domain(sd, rd, i);
}
+ SCHED_CPUMASK_FREE((void *)allmasks);
return 0;
#ifdef CONFIG_NUMA
error:
- free_sched_groups(cpu_map);
+ free_sched_groups(cpu_map, tmpmask);
+ SCHED_CPUMASK_FREE((void *)allmasks);
return -ENOMEM;
#endif
}
+static int build_sched_domains(const cpumask_t *cpu_map)
+{
+ return __build_sched_domains(cpu_map, NULL);
+}
+
static cpumask_t *doms_cur; /* current sched domains */
static int ndoms_cur; /* number of sched domains in 'doms_cur' */
+static struct sched_domain_attr *dattr_cur; /* attribues of custom domains
+ in 'doms_cur' */
/*
* Special case: If a kmalloc of a doms_cur partition (array of
@@ -6869,15 +7725,17 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map)
if (!doms_cur)
doms_cur = &fallback_doms;
cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map);
+ dattr_cur = NULL;
err = build_sched_domains(doms_cur);
register_sched_domain_sysctl();
return err;
}
-static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
+static void arch_destroy_sched_domains(const cpumask_t *cpu_map,
+ cpumask_t *tmpmask)
{
- free_sched_groups(cpu_map);
+ free_sched_groups(cpu_map, tmpmask);
}
/*
@@ -6886,6 +7744,7 @@ static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
*/
static void detach_destroy_domains(const cpumask_t *cpu_map)
{
+ cpumask_t tmpmask;
int i;
unregister_sched_domain_sysctl();
@@ -6893,7 +7752,23 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
for_each_cpu_mask(i, *cpu_map)
cpu_attach_domain(NULL, &def_root_domain, i);
synchronize_sched();
- arch_destroy_sched_domains(cpu_map);
+ arch_destroy_sched_domains(cpu_map, &tmpmask);
+}
+
+/* handle null as "default" */
+static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
+ struct sched_domain_attr *new, int idx_new)
+{
+ struct sched_domain_attr tmp;
+
+ /* fast path */
+ if (!new && !cur)
+ return 1;
+
+ tmp = SD_ATTR_INIT;
+ return !memcmp(cur ? (cur + idx_cur) : &tmp,
+ new ? (new + idx_new) : &tmp,
+ sizeof(struct sched_domain_attr));
}
/*
@@ -6917,7 +7792,8 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
*
* Call with hotplug lock held
*/
-void partition_sched_domains(int ndoms_new, cpumask_t *doms_new)
+void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
+ struct sched_domain_attr *dattr_new)
{
int i, j;
@@ -6930,12 +7806,14 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new)
ndoms_new = 1;
doms_new = &fallback_doms;
cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
+ dattr_new = NULL;
}
/* Destroy deleted domains */
for (i = 0; i < ndoms_cur; i++) {
for (j = 0; j < ndoms_new; j++) {
- if (cpus_equal(doms_cur[i], doms_new[j]))
+ if (cpus_equal(doms_cur[i], doms_new[j])
+ && dattrs_equal(dattr_cur, i, dattr_new, j))
goto match1;
}
/* no match - a current sched domain not in new doms_new[] */
@@ -6947,11 +7825,13 @@ match1:
/* Build new domains */
for (i = 0; i < ndoms_new; i++) {
for (j = 0; j < ndoms_cur; j++) {
- if (cpus_equal(doms_new[i], doms_cur[j]))
+ if (cpus_equal(doms_new[i], doms_cur[j])
+ && dattrs_equal(dattr_new, i, dattr_cur, j))
goto match2;
}
/* no match - add a new doms_new */
- build_sched_domains(doms_new + i);
+ __build_sched_domains(doms_new + i,
+ dattr_new ? dattr_new + i : NULL);
match2:
;
}
@@ -6959,7 +7839,9 @@ match2:
/* Remember the new sched domains */
if (doms_cur != &fallback_doms)
kfree(doms_cur);
+ kfree(dattr_cur); /* kfree(NULL) is safe */
doms_cur = doms_new;
+ dattr_cur = dattr_new;
ndoms_cur = ndoms_new;
register_sched_domain_sysctl();
@@ -7086,6 +7968,11 @@ void __init sched_init_smp(void)
{
cpumask_t non_isolated_cpus;
+#if defined(CONFIG_NUMA)
+ sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
+ GFP_KERNEL);
+ BUG_ON(sched_group_nodes_bycpu == NULL);
+#endif
get_online_cpus();
arch_init_sched_domains(&cpu_online_map);
cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
@@ -7096,13 +7983,18 @@ void __init sched_init_smp(void)
hotcpu_notifier(update_sched_domains, 0);
/* Move init over to a non-isolated CPU */
- if (set_cpus_allowed(current, non_isolated_cpus) < 0)
+ if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0)
BUG();
sched_init_granularity();
}
#else
void __init sched_init_smp(void)
{
+#if defined(CONFIG_NUMA)
+ sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
+ GFP_KERNEL);
+ BUG_ON(sched_group_nodes_bycpu == NULL);
+#endif
sched_init_granularity();
}
#endif /* CONFIG_SMP */
@@ -7117,6 +8009,7 @@ int in_sched_functions(unsigned long addr)
static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
{
cfs_rq->tasks_timeline = RB_ROOT;
+ INIT_LIST_HEAD(&cfs_rq->tasks);
#ifdef CONFIG_FAIR_GROUP_SCHED
cfs_rq->rq = rq;
#endif
@@ -7146,6 +8039,8 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
+ rt_rq->rt_runtime = 0;
+ spin_lock_init(&rt_rq->rt_runtime_lock);
#ifdef CONFIG_RT_GROUP_SCHED
rt_rq->rt_nr_boosted = 0;
@@ -7154,10 +8049,11 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
}
#ifdef CONFIG_FAIR_GROUP_SCHED
-static void init_tg_cfs_entry(struct rq *rq, struct task_group *tg,
- struct cfs_rq *cfs_rq, struct sched_entity *se,
- int cpu, int add)
+static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
+ struct sched_entity *se, int cpu, int add,
+ struct sched_entity *parent)
{
+ struct rq *rq = cpu_rq(cpu);
tg->cfs_rq[cpu] = cfs_rq;
init_cfs_rq(cfs_rq, rq);
cfs_rq->tg = tg;
@@ -7165,45 +8061,132 @@ static void init_tg_cfs_entry(struct rq *rq, struct task_group *tg,
list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
tg->se[cpu] = se;
- se->cfs_rq = &rq->cfs;
+ /* se could be NULL for init_task_group */
+ if (!se)
+ return;
+
+ if (!parent)
+ se->cfs_rq = &rq->cfs;
+ else
+ se->cfs_rq = parent->my_q;
+
se->my_q = cfs_rq;
se->load.weight = tg->shares;
se->load.inv_weight = div64_64(1ULL<<32, se->load.weight);
- se->parent = NULL;
+ se->parent = parent;
}
#endif
#ifdef CONFIG_RT_GROUP_SCHED
-static void init_tg_rt_entry(struct rq *rq, struct task_group *tg,
- struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
- int cpu, int add)
+static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
+ struct sched_rt_entity *rt_se, int cpu, int add,
+ struct sched_rt_entity *parent)
{
+ struct rq *rq = cpu_rq(cpu);
+
tg->rt_rq[cpu] = rt_rq;
init_rt_rq(rt_rq, rq);
rt_rq->tg = tg;
rt_rq->rt_se = rt_se;
+ rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
if (add)
list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
tg->rt_se[cpu] = rt_se;
+ if (!rt_se)
+ return;
+
+ if (!parent)
+ rt_se->rt_rq = &rq->rt;
+ else
+ rt_se->rt_rq = parent->my_q;
+
rt_se->rt_rq = &rq->rt;
rt_se->my_q = rt_rq;
- rt_se->parent = NULL;
+ rt_se->parent = parent;
INIT_LIST_HEAD(&rt_se->run_list);
}
#endif
void __init sched_init(void)
{
- int highest_cpu = 0;
int i, j;
+ unsigned long alloc_size = 0, ptr;
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ alloc_size += 2 * nr_cpu_ids * sizeof(void **);
+#endif
+#ifdef CONFIG_RT_GROUP_SCHED
+ alloc_size += 2 * nr_cpu_ids * sizeof(void **);
+#endif
+#ifdef CONFIG_USER_SCHED
+ alloc_size *= 2;
+#endif
+ /*
+ * As sched_init() is called before page_alloc is setup,
+ * we use alloc_bootmem().
+ */
+ if (alloc_size) {
+ ptr = (unsigned long)alloc_bootmem_low(alloc_size);
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ init_task_group.se = (struct sched_entity **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+
+ init_task_group.cfs_rq = (struct cfs_rq **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+
+#ifdef CONFIG_USER_SCHED
+ root_task_group.se = (struct sched_entity **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+
+ root_task_group.cfs_rq = (struct cfs_rq **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+#endif
+#endif
+#ifdef CONFIG_RT_GROUP_SCHED
+ init_task_group.rt_se = (struct sched_rt_entity **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+
+ init_task_group.rt_rq = (struct rt_rq **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+
+#ifdef CONFIG_USER_SCHED
+ root_task_group.rt_se = (struct sched_rt_entity **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+
+ root_task_group.rt_rq = (struct rt_rq **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
+#endif
+#endif
+ }
#ifdef CONFIG_SMP
+ init_aggregate();
init_defrootdomain();
#endif
+ init_rt_bandwidth(&def_rt_bandwidth,
+ global_rt_period(), global_rt_runtime());
+
+#ifdef CONFIG_RT_GROUP_SCHED
+ init_rt_bandwidth(&init_task_group.rt_bandwidth,
+ global_rt_period(), global_rt_runtime());
+#ifdef CONFIG_USER_SCHED
+ init_rt_bandwidth(&root_task_group.rt_bandwidth,
+ global_rt_period(), RUNTIME_INF);
+#endif
+#endif
+
#ifdef CONFIG_GROUP_SCHED
list_add(&init_task_group.list, &task_groups);
+ INIT_LIST_HEAD(&init_task_group.children);
+
+#ifdef CONFIG_USER_SCHED
+ INIT_LIST_HEAD(&root_task_group.children);
+ init_task_group.parent = &root_task_group;
+ list_add(&init_task_group.siblings, &root_task_group.children);
+#endif
#endif
for_each_possible_cpu(i) {
@@ -7214,26 +8197,68 @@ void __init sched_init(void)
lockdep_set_class(&rq->lock, &rq->rq_lock_key);
rq->nr_running = 0;
rq->clock = 1;
+ update_last_tick_seen(rq);
init_cfs_rq(&rq->cfs, rq);
init_rt_rq(&rq->rt, rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
init_task_group.shares = init_task_group_load;
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
- init_tg_cfs_entry(rq, &init_task_group,
+#ifdef CONFIG_CGROUP_SCHED
+ /*
+ * How much cpu bandwidth does init_task_group get?
+ *
+ * In case of task-groups formed thr' the cgroup filesystem, it
+ * gets 100% of the cpu resources in the system. This overall
+ * system cpu resource is divided among the tasks of
+ * init_task_group and its child task-groups in a fair manner,
+ * based on each entity's (task or task-group's) weight
+ * (se->load.weight).
+ *
+ * In other words, if init_task_group has 10 tasks of weight
+ * 1024) and two child groups A0 and A1 (of weight 1024 each),
+ * then A0's share of the cpu resource is:
+ *
+ * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
+ *
+ * We achieve this by letting init_task_group's tasks sit
+ * directly in rq->cfs (i.e init_task_group->se[] = NULL).
+ */
+ init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
+#elif defined CONFIG_USER_SCHED
+ root_task_group.shares = NICE_0_LOAD;
+ init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
+ /*
+ * In case of task-groups formed thr' the user id of tasks,
+ * init_task_group represents tasks belonging to root user.
+ * Hence it forms a sibling of all subsequent groups formed.
+ * In this case, init_task_group gets only a fraction of overall
+ * system cpu resource, based on the weight assigned to root
+ * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished
+ * by letting tasks of init_task_group sit in a separate cfs_rq
+ * (init_cfs_rq) and having one entity represent this group of
+ * tasks in rq->cfs (i.e init_task_group->se[] != NULL).
+ */
+ init_tg_cfs_entry(&init_task_group,
&per_cpu(init_cfs_rq, i),
- &per_cpu(init_sched_entity, i), i, 1);
+ &per_cpu(init_sched_entity, i), i, 1,
+ root_task_group.se[i]);
#endif
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+
+ rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
#ifdef CONFIG_RT_GROUP_SCHED
- init_task_group.rt_runtime =
- sysctl_sched_rt_runtime * NSEC_PER_USEC;
INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
- init_tg_rt_entry(rq, &init_task_group,
+#ifdef CONFIG_CGROUP_SCHED
+ init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
+#elif defined CONFIG_USER_SCHED
+ init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
+ init_tg_rt_entry(&init_task_group,
&per_cpu(init_rt_rq, i),
- &per_cpu(init_sched_rt_entity, i), i, 1);
+ &per_cpu(init_sched_rt_entity, i), i, 1,
+ root_task_group.rt_se[i]);
+#endif
#endif
- rq->rt_period_expire = 0;
- rq->rt_throttled = 0;
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
rq->cpu_load[j] = 0;
@@ -7250,7 +8275,6 @@ void __init sched_init(void)
#endif
init_rq_hrtick(rq);
atomic_set(&rq->nr_iowait, 0);
- highest_cpu = i;
}
set_load_weight(&init_task);
@@ -7260,7 +8284,6 @@ void __init sched_init(void)
#endif
#ifdef CONFIG_SMP
- nr_cpu_ids = highest_cpu + 1;
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
#endif
@@ -7419,8 +8442,6 @@ void set_curr_task(int cpu, struct task_struct *p)
#endif
-#ifdef CONFIG_GROUP_SCHED
-
#ifdef CONFIG_FAIR_GROUP_SCHED
static void free_fair_sched_group(struct task_group *tg)
{
@@ -7437,17 +8458,18 @@ static void free_fair_sched_group(struct task_group *tg)
kfree(tg->se);
}
-static int alloc_fair_sched_group(struct task_group *tg)
+static
+int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
struct cfs_rq *cfs_rq;
- struct sched_entity *se;
+ struct sched_entity *se, *parent_se;
struct rq *rq;
int i;
- tg->cfs_rq = kzalloc(sizeof(cfs_rq) * NR_CPUS, GFP_KERNEL);
+ tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
if (!tg->cfs_rq)
goto err;
- tg->se = kzalloc(sizeof(se) * NR_CPUS, GFP_KERNEL);
+ tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
if (!tg->se)
goto err;
@@ -7466,7 +8488,8 @@ static int alloc_fair_sched_group(struct task_group *tg)
if (!se)
goto err;
- init_tg_cfs_entry(rq, tg, cfs_rq, se, i, 0);
+ parent_se = parent ? parent->se[i] : NULL;
+ init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se);
}
return 1;
@@ -7490,7 +8513,8 @@ static inline void free_fair_sched_group(struct task_group *tg)
{
}
-static inline int alloc_fair_sched_group(struct task_group *tg)
+static inline
+int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
return 1;
}
@@ -7509,6 +8533,8 @@ static void free_rt_sched_group(struct task_group *tg)
{
int i;
+ destroy_rt_bandwidth(&tg->rt_bandwidth);
+
for_each_possible_cpu(i) {
if (tg->rt_rq)
kfree(tg->rt_rq[i]);
@@ -7520,21 +8546,23 @@ static void free_rt_sched_group(struct task_group *tg)
kfree(tg->rt_se);
}
-static int alloc_rt_sched_group(struct task_group *tg)
+static
+int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
struct rt_rq *rt_rq;
- struct sched_rt_entity *rt_se;
+ struct sched_rt_entity *rt_se, *parent_se;
struct rq *rq;
int i;
- tg->rt_rq = kzalloc(sizeof(rt_rq) * NR_CPUS, GFP_KERNEL);
+ tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
if (!tg->rt_rq)
goto err;
- tg->rt_se = kzalloc(sizeof(rt_se) * NR_CPUS, GFP_KERNEL);
+ tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
if (!tg->rt_se)
goto err;
- tg->rt_runtime = 0;
+ init_rt_bandwidth(&tg->rt_bandwidth,
+ ktime_to_ns(def_rt_bandwidth.rt_period), 0);
for_each_possible_cpu(i) {
rq = cpu_rq(i);
@@ -7549,7 +8577,8 @@ static int alloc_rt_sched_group(struct task_group *tg)
if (!rt_se)
goto err;
- init_tg_rt_entry(rq, tg, rt_rq, rt_se, i, 0);
+ parent_se = parent ? parent->rt_se[i] : NULL;
+ init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se);
}
return 1;
@@ -7573,7 +8602,8 @@ static inline void free_rt_sched_group(struct task_group *tg)
{
}
-static inline int alloc_rt_sched_group(struct task_group *tg)
+static inline
+int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
return 1;
}
@@ -7587,6 +8617,7 @@ static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
}
#endif
+#ifdef CONFIG_GROUP_SCHED
static void free_sched_group(struct task_group *tg)
{
free_fair_sched_group(tg);
@@ -7595,7 +8626,7 @@ static void free_sched_group(struct task_group *tg)
}
/* allocate runqueue etc for a new task group */
-struct task_group *sched_create_group(void)
+struct task_group *sched_create_group(struct task_group *parent)
{
struct task_group *tg;
unsigned long flags;
@@ -7605,10 +8636,10 @@ struct task_group *sched_create_group(void)
if (!tg)
return ERR_PTR(-ENOMEM);
- if (!alloc_fair_sched_group(tg))
+ if (!alloc_fair_sched_group(tg, parent))
goto err;
- if (!alloc_rt_sched_group(tg))
+ if (!alloc_rt_sched_group(tg, parent))
goto err;
spin_lock_irqsave(&task_group_lock, flags);
@@ -7617,6 +8648,12 @@ struct task_group *sched_create_group(void)
register_rt_sched_group(tg, i);
}
list_add_rcu(&tg->list, &task_groups);
+
+ WARN_ON(!parent); /* root should already exist */
+
+ tg->parent = parent;
+ list_add_rcu(&tg->siblings, &parent->children);
+ INIT_LIST_HEAD(&tg->children);
spin_unlock_irqrestore(&task_group_lock, flags);
return tg;
@@ -7645,6 +8682,7 @@ void sched_destroy_group(struct task_group *tg)
unregister_rt_sched_group(tg, i);
}
list_del_rcu(&tg->list);
+ list_del_rcu(&tg->siblings);
spin_unlock_irqrestore(&task_group_lock, flags);
/* wait for possible concurrent references to cfs_rqs complete */
@@ -7688,16 +8726,14 @@ void sched_move_task(struct task_struct *tsk)
task_rq_unlock(rq, &flags);
}
+#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
-static void set_se_shares(struct sched_entity *se, unsigned long shares)
+static void __set_se_shares(struct sched_entity *se, unsigned long shares)
{
struct cfs_rq *cfs_rq = se->cfs_rq;
- struct rq *rq = cfs_rq->rq;
int on_rq;
- spin_lock_irq(&rq->lock);
-
on_rq = se->on_rq;
if (on_rq)
dequeue_entity(cfs_rq, se, 0);
@@ -7707,8 +8743,17 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
if (on_rq)
enqueue_entity(cfs_rq, se, 0);
+}
- spin_unlock_irq(&rq->lock);
+static void set_se_shares(struct sched_entity *se, unsigned long shares)
+{
+ struct cfs_rq *cfs_rq = se->cfs_rq;
+ struct rq *rq = cfs_rq->rq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rq->lock, flags);
+ __set_se_shares(se, shares);
+ spin_unlock_irqrestore(&rq->lock, flags);
}
static DEFINE_MUTEX(shares_mutex);
@@ -7719,12 +8764,18 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
unsigned long flags;
/*
+ * We can't change the weight of the root cgroup.
+ */
+ if (!tg->se[0])
+ return -EINVAL;
+
+ /*
* A weight of 0 or 1 can cause arithmetics problems.
* (The default weight is 1024 - so there's no practical
* limitation from this.)
*/
- if (shares < 2)
- shares = 2;
+ if (shares < MIN_SHARES)
+ shares = MIN_SHARES;
mutex_lock(&shares_mutex);
if (tg->shares == shares)
@@ -7733,6 +8784,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
spin_lock_irqsave(&task_group_lock, flags);
for_each_possible_cpu(i)
unregister_fair_sched_group(tg, i);
+ list_del_rcu(&tg->siblings);
spin_unlock_irqrestore(&task_group_lock, flags);
/* wait for any ongoing reference to this group to finish */
@@ -7743,8 +8795,13 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
* w/o tripping rebalance_share or load_balance_fair.
*/
tg->shares = shares;
- for_each_possible_cpu(i)
- set_se_shares(tg->se[i], shares);
+ for_each_possible_cpu(i) {
+ /*
+ * force a rebalance
+ */
+ cfs_rq_set_shares(tg->cfs_rq[i], 0);
+ set_se_shares(tg->se[i], shares/nr_cpu_ids);
+ }
/*
* Enable load balance activity on this group, by inserting it back on
@@ -7753,6 +8810,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
spin_lock_irqsave(&task_group_lock, flags);
for_each_possible_cpu(i)
register_fair_sched_group(tg, i);
+ list_add_rcu(&tg->siblings, &tg->parent->children);
spin_unlock_irqrestore(&task_group_lock, flags);
done:
mutex_unlock(&shares_mutex);
@@ -7779,26 +8837,58 @@ static unsigned long to_ratio(u64 period, u64 runtime)
return div64_64(runtime << 16, period);
}
+#ifdef CONFIG_CGROUP_SCHED
+static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
+{
+ struct task_group *tgi, *parent = tg->parent;
+ unsigned long total = 0;
+
+ if (!parent) {
+ if (global_rt_period() < period)
+ return 0;
+
+ return to_ratio(period, runtime) <
+ to_ratio(global_rt_period(), global_rt_runtime());
+ }
+
+ if (ktime_to_ns(parent->rt_bandwidth.rt_period) < period)
+ return 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tgi, &parent->children, siblings) {
+ if (tgi == tg)
+ continue;
+
+ total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period),
+ tgi->rt_bandwidth.rt_runtime);
+ }
+ rcu_read_unlock();
+
+ return total + to_ratio(period, runtime) <
+ to_ratio(ktime_to_ns(parent->rt_bandwidth.rt_period),
+ parent->rt_bandwidth.rt_runtime);
+}
+#elif defined CONFIG_USER_SCHED
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
struct task_group *tgi;
unsigned long total = 0;
unsigned long global_ratio =
- to_ratio(sysctl_sched_rt_period,
- sysctl_sched_rt_runtime < 0 ?
- RUNTIME_INF : sysctl_sched_rt_runtime);
+ to_ratio(global_rt_period(), global_rt_runtime());
rcu_read_lock();
list_for_each_entry_rcu(tgi, &task_groups, list) {
if (tgi == tg)
continue;
- total += to_ratio(period, tgi->rt_runtime);
+ total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period),
+ tgi->rt_bandwidth.rt_runtime);
}
rcu_read_unlock();
return total + to_ratio(period, runtime) < global_ratio;
}
+#endif
/* Must be called with tasklist_lock held */
static inline int tg_has_rt_tasks(struct task_group *tg)
@@ -7811,19 +8901,14 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
return 0;
}
-int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
+static int tg_set_bandwidth(struct task_group *tg,
+ u64 rt_period, u64 rt_runtime)
{
- u64 rt_runtime, rt_period;
- int err = 0;
-
- rt_period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
- rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
- if (rt_runtime_us == -1)
- rt_runtime = RUNTIME_INF;
+ int i, err = 0;
mutex_lock(&rt_constraints_mutex);
read_lock(&tasklist_lock);
- if (rt_runtime_us == 0 && tg_has_rt_tasks(tg)) {
+ if (rt_runtime == 0 && tg_has_rt_tasks(tg)) {
err = -EBUSY;
goto unlock;
}
@@ -7831,7 +8916,19 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
err = -EINVAL;
goto unlock;
}
- tg->rt_runtime = rt_runtime;
+
+ spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+ tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
+ tg->rt_bandwidth.rt_runtime = rt_runtime;
+
+ for_each_possible_cpu(i) {
+ struct rt_rq *rt_rq = tg->rt_rq[i];
+
+ spin_lock(&rt_rq->rt_runtime_lock);
+ rt_rq->rt_runtime = rt_runtime;
+ spin_unlock(&rt_rq->rt_runtime_lock);
+ }
+ spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock:
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
@@ -7839,19 +8936,109 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
return err;
}
+int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
+{
+ u64 rt_runtime, rt_period;
+
+ rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
+ rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
+ if (rt_runtime_us < 0)
+ rt_runtime = RUNTIME_INF;
+
+ return tg_set_bandwidth(tg, rt_period, rt_runtime);
+}
+
long sched_group_rt_runtime(struct task_group *tg)
{
u64 rt_runtime_us;
- if (tg->rt_runtime == RUNTIME_INF)
+ if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
return -1;
- rt_runtime_us = tg->rt_runtime;
+ rt_runtime_us = tg->rt_bandwidth.rt_runtime;
do_div(rt_runtime_us, NSEC_PER_USEC);
return rt_runtime_us;
}
+
+int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
+{
+ u64 rt_runtime, rt_period;
+
+ rt_period = (u64)rt_period_us * NSEC_PER_USEC;
+ rt_runtime = tg->rt_bandwidth.rt_runtime;
+
+ return tg_set_bandwidth(tg, rt_period, rt_runtime);
+}
+
+long sched_group_rt_period(struct task_group *tg)
+{
+ u64 rt_period_us;
+
+ rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
+ do_div(rt_period_us, NSEC_PER_USEC);
+ return rt_period_us;
+}
+
+static int sched_rt_global_constraints(void)
+{
+ int ret = 0;
+
+ mutex_lock(&rt_constraints_mutex);
+ if (!__rt_schedulable(NULL, 1, 0))
+ ret = -EINVAL;
+ mutex_unlock(&rt_constraints_mutex);
+
+ return ret;
+}
+#else
+static int sched_rt_global_constraints(void)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
+ for_each_possible_cpu(i) {
+ struct rt_rq *rt_rq = &cpu_rq(i)->rt;
+
+ spin_lock(&rt_rq->rt_runtime_lock);
+ rt_rq->rt_runtime = global_rt_runtime();
+ spin_unlock(&rt_rq->rt_runtime_lock);
+ }
+ spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
+
+ return 0;
+}
#endif
-#endif /* CONFIG_GROUP_SCHED */
+
+int sched_rt_handler(struct ctl_table *table, int write,
+ struct file *filp, void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+ int old_period, old_runtime;
+ static DEFINE_MUTEX(mutex);
+
+ mutex_lock(&mutex);
+ old_period = sysctl_sched_rt_period;
+ old_runtime = sysctl_sched_rt_runtime;
+
+ ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+
+ if (!ret && write) {
+ ret = sched_rt_global_constraints();
+ if (ret) {
+ sysctl_sched_rt_period = old_period;
+ sysctl_sched_rt_runtime = old_runtime;
+ } else {
+ def_rt_bandwidth.rt_runtime = global_rt_runtime();
+ def_rt_bandwidth.rt_period =
+ ns_to_ktime(global_rt_period());
+ }
+ }
+ mutex_unlock(&mutex);
+
+ return ret;
+}
#ifdef CONFIG_CGROUP_SCHED
@@ -7865,7 +9052,7 @@ static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
static struct cgroup_subsys_state *
cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
- struct task_group *tg;
+ struct task_group *tg, *parent;
if (!cgrp->parent) {
/* This is early initialization for the top cgroup */
@@ -7873,11 +9060,8 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
return &init_task_group.css;
}
- /* we support only 1-level deep hierarchical scheduler atm */
- if (cgrp->parent->parent)
- return ERR_PTR(-EINVAL);
-
- tg = sched_create_group();
+ parent = cgroup_tg(cgrp->parent);
+ tg = sched_create_group(parent);
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
@@ -7901,7 +9085,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
{
#ifdef CONFIG_RT_GROUP_SCHED
/* Don't accept realtime tasks when there is no way for them to run */
- if (rt_task(tsk) && cgroup_tg(cgrp)->rt_runtime == 0)
+ if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)
return -EINVAL;
#else
/* We don't support RT-tasks being in separate groups */
@@ -7935,7 +9119,7 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
#endif
#ifdef CONFIG_RT_GROUP_SCHED
-static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
+static ssize_t cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
struct file *file,
const char __user *userbuf,
size_t nbytes, loff_t *unused_ppos)
@@ -7979,6 +9163,17 @@ static ssize_t cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft,
return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
}
+
+static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
+ u64 rt_period_us)
+{
+ return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
+}
+
+static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
+{
+ return sched_group_rt_period(cgroup_tg(cgrp));
+}
#endif
static struct cftype cpu_files[] = {
@@ -7995,6 +9190,11 @@ static struct cftype cpu_files[] = {
.read = cpu_rt_runtime_read,
.write = cpu_rt_runtime_write,
},
+ {
+ .name = "rt_period_us",
+ .read_uint = cpu_rt_period_read_uint,
+ .write_uint = cpu_rt_period_write_uint,
+ },
#endif
};
@@ -8035,9 +9235,9 @@ struct cpuacct {
struct cgroup_subsys cpuacct_subsys;
/* return cpu accounting group corresponding to this container */
-static inline struct cpuacct *cgroup_ca(struct cgroup *cont)
+static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
{
- return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id),
+ return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
struct cpuacct, css);
}
@@ -8050,7 +9250,7 @@ static inline struct cpuacct *task_ca(struct task_struct *tsk)
/* create a new cpu accounting group */
static struct cgroup_subsys_state *cpuacct_create(
- struct cgroup_subsys *ss, struct cgroup *cont)
+ struct cgroup_subsys *ss, struct cgroup *cgrp)
{
struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
@@ -8068,18 +9268,18 @@ static struct cgroup_subsys_state *cpuacct_create(
/* destroy an existing cpu accounting group */
static void
-cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
+cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
- struct cpuacct *ca = cgroup_ca(cont);
+ struct cpuacct *ca = cgroup_ca(cgrp);
free_percpu(ca->cpuusage);
kfree(ca);
}
/* return total cpu usage (in nanoseconds) of a group */
-static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft)
+static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
{
- struct cpuacct *ca = cgroup_ca(cont);
+ struct cpuacct *ca = cgroup_ca(cgrp);
u64 totalcpuusage = 0;
int i;
@@ -8098,16 +9298,40 @@ static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft)
return totalcpuusage;
}
+static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
+ u64 reset)
+{
+ struct cpuacct *ca = cgroup_ca(cgrp);
+ int err = 0;
+ int i;
+
+ if (reset) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ for_each_possible_cpu(i) {
+ u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
+
+ spin_lock_irq(&cpu_rq(i)->lock);
+ *cpuusage = 0;
+ spin_unlock_irq(&cpu_rq(i)->lock);
+ }
+out:
+ return err;
+}
+
static struct cftype files[] = {
{
.name = "usage",
.read_uint = cpuusage_read,
+ .write_uint = cpuusage_write,
},
};
-static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont)
+static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
- return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
+ return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
}
/*
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index ef358ba07683..f3f4af4b8b0f 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -67,14 +67,24 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
(long long)(p->nvcsw + p->nivcsw),
p->prio);
#ifdef CONFIG_SCHEDSTATS
- SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld\n",
+ SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
SPLIT_NS(p->se.vruntime),
SPLIT_NS(p->se.sum_exec_runtime),
SPLIT_NS(p->se.sum_sleep_runtime));
#else
- SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld\n",
+ SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
#endif
+
+#ifdef CONFIG_CGROUP_SCHED
+ {
+ char path[64];
+
+ cgroup_path(task_group(p)->css.cgroup, path, sizeof(path));
+ SEQ_printf(m, " %s", path);
+ }
+#endif
+ SEQ_printf(m, "\n");
}
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
@@ -109,7 +119,21 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
struct sched_entity *last;
unsigned long flags;
- SEQ_printf(m, "\ncfs_rq\n");
+#if !defined(CONFIG_CGROUP_SCHED) || !defined(CONFIG_USER_SCHED)
+ SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
+#else
+ char path[128] = "";
+ struct cgroup *cgroup = NULL;
+ struct task_group *tg = cfs_rq->tg;
+
+ if (tg)
+ cgroup = tg->css.cgroup;
+
+ if (cgroup)
+ cgroup_path(cgroup, path, sizeof(path));
+
+ SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
+#endif
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
SPLIT_NS(cfs_rq->exec_clock));
@@ -143,6 +167,11 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
#endif
SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
cfs_rq->nr_spread_over);
+#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_SMP
+ SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares);
+#endif
+#endif
}
static void print_cpu(struct seq_file *m, int cpu)
@@ -214,7 +243,6 @@ static int sched_debug_show(struct seq_file *m, void *v)
PN(sysctl_sched_latency);
PN(sysctl_sched_min_granularity);
PN(sysctl_sched_wakeup_granularity);
- PN(sysctl_sched_batch_wakeup_granularity);
PN(sysctl_sched_child_runs_first);
P(sysctl_sched_features);
#undef PN
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0080968d3e4a..89fa32b4edf2 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -62,24 +62,14 @@ const_debug unsigned int sysctl_sched_child_runs_first = 1;
unsigned int __read_mostly sysctl_sched_compat_yield;
/*
- * SCHED_BATCH wake-up granularity.
- * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
- *
- * This option delays the preemption effects of decoupled workloads
- * and reduces their over-scheduling. Synchronous workloads will still
- * have immediate wakeup/sleep latencies.
- */
-unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
-
-/*
* SCHED_OTHER wake-up granularity.
- * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
*
* This option delays the preemption effects of decoupled workloads
* and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies.
*/
-unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
+unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
@@ -87,6 +77,11 @@ const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
* CFS operations on generic schedulable entities:
*/
+static inline struct task_struct *task_of(struct sched_entity *se)
+{
+ return container_of(se, struct task_struct, se);
+}
+
#ifdef CONFIG_FAIR_GROUP_SCHED
/* cpu runqueue to which this cfs_rq is attached */
@@ -98,6 +93,54 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
/* An entity is a task if it doesn't "own" a runqueue */
#define entity_is_task(se) (!se->my_q)
+/* Walk up scheduling entities hierarchy */
+#define for_each_sched_entity(se) \
+ for (; se; se = se->parent)
+
+static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
+{
+ return p->se.cfs_rq;
+}
+
+/* runqueue on which this entity is (to be) queued */
+static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+{
+ return se->cfs_rq;
+}
+
+/* runqueue "owned" by this group */
+static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
+{
+ return grp->my_q;
+}
+
+/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
+ * another cpu ('this_cpu')
+ */
+static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
+{
+ return cfs_rq->tg->cfs_rq[this_cpu];
+}
+
+/* Iterate thr' all leaf cfs_rq's on a runqueue */
+#define for_each_leaf_cfs_rq(rq, cfs_rq) \
+ list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
+
+/* Do the two (enqueued) entities belong to the same group ? */
+static inline int
+is_same_group(struct sched_entity *se, struct sched_entity *pse)
+{
+ if (se->cfs_rq == pse->cfs_rq)
+ return 1;
+
+ return 0;
+}
+
+static inline struct sched_entity *parent_entity(struct sched_entity *se)
+{
+ return se->parent;
+}
+
#else /* CONFIG_FAIR_GROUP_SCHED */
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
@@ -107,13 +150,49 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
#define entity_is_task(se) 1
-#endif /* CONFIG_FAIR_GROUP_SCHED */
+#define for_each_sched_entity(se) \
+ for (; se; se = NULL)
-static inline struct task_struct *task_of(struct sched_entity *se)
+static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
- return container_of(se, struct task_struct, se);
+ return &task_rq(p)->cfs;
+}
+
+static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+{
+ struct task_struct *p = task_of(se);
+ struct rq *rq = task_rq(p);
+
+ return &rq->cfs;
+}
+
+/* runqueue "owned" by this group */
+static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
+{
+ return NULL;
+}
+
+static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
+{
+ return &cpu_rq(this_cpu)->cfs;
+}
+
+#define for_each_leaf_cfs_rq(rq, cfs_rq) \
+ for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
+
+static inline int
+is_same_group(struct sched_entity *se, struct sched_entity *pse)
+{
+ return 1;
+}
+
+static inline struct sched_entity *parent_entity(struct sched_entity *se)
+{
+ return NULL;
}
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+
/**************************************************************
* Scheduling class tree data structure manipulation methods:
@@ -255,6 +334,34 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
#endif
/*
+ * delta *= w / rw
+ */
+static inline unsigned long
+calc_delta_weight(unsigned long delta, struct sched_entity *se)
+{
+ for_each_sched_entity(se) {
+ delta = calc_delta_mine(delta,
+ se->load.weight, &cfs_rq_of(se)->load);
+ }
+
+ return delta;
+}
+
+/*
+ * delta *= rw / w
+ */
+static inline unsigned long
+calc_delta_fair(unsigned long delta, struct sched_entity *se)
+{
+ for_each_sched_entity(se) {
+ delta = calc_delta_mine(delta,
+ cfs_rq_of(se)->load.weight, &se->load);
+ }
+
+ return delta;
+}
+
+/*
* The idea is to set a period in which each task runs once.
*
* When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
@@ -283,29 +390,54 @@ static u64 __sched_period(unsigned long nr_running)
*/
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- return calc_delta_mine(__sched_period(cfs_rq->nr_running),
- se->load.weight, &cfs_rq->load);
+ return calc_delta_weight(__sched_period(cfs_rq->nr_running), se);
}
/*
- * We calculate the vruntime slice.
+ * We calculate the vruntime slice of a to be inserted task
*
- * vs = s/w = p/rw
+ * vs = s*rw/w = p
*/
-static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
+static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- u64 vslice = __sched_period(nr_running);
+ unsigned long nr_running = cfs_rq->nr_running;
- vslice *= NICE_0_LOAD;
- do_div(vslice, rq_weight);
+ if (!se->on_rq)
+ nr_running++;
- return vslice;
+ return __sched_period(nr_running);
}
-static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+/*
+ * The goal of calc_delta_asym() is to be asymmetrically around NICE_0_LOAD, in
+ * that it favours >=0 over <0.
+ *
+ * -20 |
+ * |
+ * 0 --------+-------
+ * .'
+ * 19 .'
+ *
+ */
+static unsigned long
+calc_delta_asym(unsigned long delta, struct sched_entity *se)
{
- return __sched_vslice(cfs_rq->load.weight + se->load.weight,
- cfs_rq->nr_running + 1);
+ struct load_weight lw = {
+ .weight = NICE_0_LOAD,
+ .inv_weight = 1UL << (WMULT_SHIFT-NICE_0_SHIFT)
+ };
+
+ for_each_sched_entity(se) {
+ struct load_weight *se_lw = &se->load;
+
+ if (se->load.weight < NICE_0_LOAD)
+ se_lw = &lw;
+
+ delta = calc_delta_mine(delta,
+ cfs_rq_of(se)->load.weight, se_lw);
+ }
+
+ return delta;
}
/*
@@ -322,11 +454,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq, exec_clock, delta_exec);
- delta_exec_weighted = delta_exec;
- if (unlikely(curr->load.weight != NICE_0_LOAD)) {
- delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
- &curr->load);
- }
+ delta_exec_weighted = calc_delta_fair(delta_exec, curr);
curr->vruntime += delta_exec_weighted;
}
@@ -413,20 +541,43 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
* Scheduling class queueing methods:
*/
+#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
+static void
+add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
+{
+ cfs_rq->task_weight += weight;
+}
+#else
+static inline void
+add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
+{
+}
+#endif
+
static void
account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_add(&cfs_rq->load, se->load.weight);
+ if (!parent_entity(se))
+ inc_cpu_load(rq_of(cfs_rq), se->load.weight);
+ if (entity_is_task(se))
+ add_cfs_task_weight(cfs_rq, se->load.weight);
cfs_rq->nr_running++;
se->on_rq = 1;
+ list_add(&se->group_node, &cfs_rq->tasks);
}
static void
account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_sub(&cfs_rq->load, se->load.weight);
+ if (!parent_entity(se))
+ dec_cpu_load(rq_of(cfs_rq), se->load.weight);
+ if (entity_is_task(se))
+ add_cfs_task_weight(cfs_rq, -se->load.weight);
cfs_rq->nr_running--;
se->on_rq = 0;
+ list_del_init(&se->group_node);
}
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -510,8 +661,12 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
if (!initial) {
/* sleeps upto a single latency don't count. */
- if (sched_feat(NEW_FAIR_SLEEPERS))
- vruntime -= sysctl_sched_latency;
+ if (sched_feat(NEW_FAIR_SLEEPERS)) {
+ if (sched_feat(NORMALIZED_SLEEPER))
+ vruntime -= calc_delta_weight(sysctl_sched_latency, se);
+ else
+ vruntime -= sysctl_sched_latency;
+ }
/* ensure we never gain time by being placed backwards. */
vruntime = max_vruntime(se->vruntime, vruntime);
@@ -627,20 +782,16 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
+static int
+wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
+
static struct sched_entity *
pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- s64 diff, gran;
-
if (!cfs_rq->next)
return se;
- diff = cfs_rq->next->vruntime - se->vruntime;
- if (diff < 0)
- return se;
-
- gran = calc_delta_fair(sysctl_sched_wakeup_granularity, &cfs_rq->load);
- if (diff > gran)
+ if (wakeup_preempt_entity(cfs_rq->next, se) != 0)
return se;
return cfs_rq->next;
@@ -708,101 +859,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
* CFS operations on tasks:
*/
-#ifdef CONFIG_FAIR_GROUP_SCHED
-
-/* Walk up scheduling entities hierarchy */
-#define for_each_sched_entity(se) \
- for (; se; se = se->parent)
-
-static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
-{
- return p->se.cfs_rq;
-}
-
-/* runqueue on which this entity is (to be) queued */
-static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
-{
- return se->cfs_rq;
-}
-
-/* runqueue "owned" by this group */
-static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
-{
- return grp->my_q;
-}
-
-/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
- * another cpu ('this_cpu')
- */
-static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
-{
- return cfs_rq->tg->cfs_rq[this_cpu];
-}
-
-/* Iterate thr' all leaf cfs_rq's on a runqueue */
-#define for_each_leaf_cfs_rq(rq, cfs_rq) \
- list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
-
-/* Do the two (enqueued) entities belong to the same group ? */
-static inline int
-is_same_group(struct sched_entity *se, struct sched_entity *pse)
-{
- if (se->cfs_rq == pse->cfs_rq)
- return 1;
-
- return 0;
-}
-
-static inline struct sched_entity *parent_entity(struct sched_entity *se)
-{
- return se->parent;
-}
-
-#else /* CONFIG_FAIR_GROUP_SCHED */
-
-#define for_each_sched_entity(se) \
- for (; se; se = NULL)
-
-static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
-{
- return &task_rq(p)->cfs;
-}
-
-static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
-{
- struct task_struct *p = task_of(se);
- struct rq *rq = task_rq(p);
-
- return &rq->cfs;
-}
-
-/* runqueue "owned" by this group */
-static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
-{
- return NULL;
-}
-
-static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
-{
- return &cpu_rq(this_cpu)->cfs;
-}
-
-#define for_each_leaf_cfs_rq(rq, cfs_rq) \
- for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
-
-static inline int
-is_same_group(struct sched_entity *se, struct sched_entity *pse)
-{
- return 1;
-}
-
-static inline struct sched_entity *parent_entity(struct sched_entity *se)
-{
- return NULL;
-}
-
-#endif /* CONFIG_FAIR_GROUP_SCHED */
-
#ifdef CONFIG_SCHED_HRTICK
static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
{
@@ -916,7 +972,7 @@ static void yield_task_fair(struct rq *rq)
/*
* Already in the rightmost position?
*/
- if (unlikely(rightmost->vruntime < se->vruntime))
+ if (unlikely(!rightmost || rightmost->vruntime < se->vruntime))
return;
/*
@@ -955,7 +1011,9 @@ static int wake_idle(int cpu, struct task_struct *p)
return cpu;
for_each_domain(cpu, sd) {
- if (sd->flags & SD_WAKE_IDLE) {
+ if ((sd->flags & SD_WAKE_IDLE)
+ || ((sd->flags & SD_WAKE_IDLE_FAR)
+ && !task_hot(p, task_rq(p)->clock, sd))) {
cpus_and(tmp, sd->span, p->cpus_allowed);
for_each_cpu_mask(i, tmp) {
if (idle_cpu(i)) {
@@ -1099,6 +1157,58 @@ out:
}
#endif /* CONFIG_SMP */
+static unsigned long wakeup_gran(struct sched_entity *se)
+{
+ unsigned long gran = sysctl_sched_wakeup_granularity;
+
+ /*
+ * More easily preempt - nice tasks, while not making it harder for
+ * + nice tasks.
+ */
+ gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se);
+
+ return gran;
+}
+
+/*
+ * Should 'se' preempt 'curr'.
+ *
+ * |s1
+ * |s2
+ * |s3
+ * g
+ * |<--->|c
+ *
+ * w(c, s1) = -1
+ * w(c, s2) = 0
+ * w(c, s3) = 1
+ *
+ */
+static int
+wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
+{
+ s64 gran, vdiff = curr->vruntime - se->vruntime;
+
+ if (vdiff < 0)
+ return -1;
+
+ gran = wakeup_gran(curr);
+ if (vdiff > gran)
+ return 1;
+
+ return 0;
+}
+
+/* return depth at which a sched entity is present in the hierarchy */
+static inline int depth_se(struct sched_entity *se)
+{
+ int depth = 0;
+
+ for_each_sched_entity(se)
+ depth++;
+
+ return depth;
+}
/*
* Preempt the current task with a newly woken task if needed:
@@ -1108,7 +1218,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
struct sched_entity *se = &curr->se, *pse = &p->se;
- unsigned long gran;
+ int se_depth, pse_depth;
if (unlikely(rt_prio(p->prio))) {
update_rq_clock(rq);
@@ -1133,20 +1243,33 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
if (!sched_feat(WAKEUP_PREEMPT))
return;
- while (!is_same_group(se, pse)) {
+ /*
+ * preemption test can be made between sibling entities who are in the
+ * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
+ * both tasks until we find their ancestors who are siblings of common
+ * parent.
+ */
+
+ /* First walk up until both entities are at same depth */
+ se_depth = depth_se(se);
+ pse_depth = depth_se(pse);
+
+ while (se_depth > pse_depth) {
+ se_depth--;
se = parent_entity(se);
+ }
+
+ while (pse_depth > se_depth) {
+ pse_depth--;
pse = parent_entity(pse);
}
- gran = sysctl_sched_wakeup_granularity;
- /*
- * More easily preempt - nice tasks, while not making
- * it harder for + nice tasks.
- */
- if (unlikely(se->load.weight > NICE_0_LOAD))
- gran = calc_delta_fair(gran, &se->load);
+ while (!is_same_group(se, pse)) {
+ se = parent_entity(se);
+ pse = parent_entity(pse);
+ }
- if (pse->vruntime + gran < se->vruntime)
+ if (wakeup_preempt_entity(se, pse) == 1)
resched_task(curr);
}
@@ -1197,15 +1320,27 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
* the current task:
*/
static struct task_struct *
-__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
+__load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
{
- struct task_struct *p;
+ struct task_struct *p = NULL;
+ struct sched_entity *se;
+
+ if (next == &cfs_rq->tasks)
+ return NULL;
+
+ /* Skip over entities that are not tasks */
+ do {
+ se = list_entry(next, struct sched_entity, group_node);
+ next = next->next;
+ } while (next != &cfs_rq->tasks && !entity_is_task(se));
- if (!curr)
+ if (next == &cfs_rq->tasks)
return NULL;
- p = rb_entry(curr, struct task_struct, se.run_node);
- cfs_rq->rb_load_balance_curr = rb_next(curr);
+ cfs_rq->balance_iterator = next;
+
+ if (entity_is_task(se))
+ p = task_of(se);
return p;
}
@@ -1214,85 +1349,100 @@ static struct task_struct *load_balance_start_fair(void *arg)
{
struct cfs_rq *cfs_rq = arg;
- return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
+ return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
}
static struct task_struct *load_balance_next_fair(void *arg)
{
struct cfs_rq *cfs_rq = arg;
- return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
+ return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
}
-#ifdef CONFIG_FAIR_GROUP_SCHED
-static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
+static unsigned long
+__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
+ unsigned long max_load_move, struct sched_domain *sd,
+ enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
+ struct cfs_rq *cfs_rq)
{
- struct sched_entity *curr;
- struct task_struct *p;
-
- if (!cfs_rq->nr_running || !first_fair(cfs_rq))
- return MAX_PRIO;
-
- curr = cfs_rq->curr;
- if (!curr)
- curr = __pick_next_entity(cfs_rq);
+ struct rq_iterator cfs_rq_iterator;
- p = task_of(curr);
+ cfs_rq_iterator.start = load_balance_start_fair;
+ cfs_rq_iterator.next = load_balance_next_fair;
+ cfs_rq_iterator.arg = cfs_rq;
- return p->prio;
+ return balance_tasks(this_rq, this_cpu, busiest,
+ max_load_move, sd, idle, all_pinned,
+ this_best_prio, &cfs_rq_iterator);
}
-#endif
+#ifdef CONFIG_FAIR_GROUP_SCHED
static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio)
{
- struct cfs_rq *busy_cfs_rq;
long rem_load_move = max_load_move;
- struct rq_iterator cfs_rq_iterator;
-
- cfs_rq_iterator.start = load_balance_start_fair;
- cfs_rq_iterator.next = load_balance_next_fair;
+ int busiest_cpu = cpu_of(busiest);
+ struct task_group *tg;
- for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
-#ifdef CONFIG_FAIR_GROUP_SCHED
- struct cfs_rq *this_cfs_rq;
+ rcu_read_lock();
+ list_for_each_entry(tg, &task_groups, list) {
long imbalance;
- unsigned long maxload;
+ unsigned long this_weight, busiest_weight;
+ long rem_load, max_load, moved_load;
+
+ /*
+ * empty group
+ */
+ if (!aggregate(tg, sd)->task_weight)
+ continue;
+
+ rem_load = rem_load_move * aggregate(tg, sd)->rq_weight;
+ rem_load /= aggregate(tg, sd)->load + 1;
+
+ this_weight = tg->cfs_rq[this_cpu]->task_weight;
+ busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight;
+
+ imbalance = (busiest_weight - this_weight) / 2;
- this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
+ if (imbalance < 0)
+ imbalance = busiest_weight;
- imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
- /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
- if (imbalance <= 0)
+ max_load = max(rem_load, imbalance);
+ moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
+ max_load, sd, idle, all_pinned, this_best_prio,
+ tg->cfs_rq[busiest_cpu]);
+
+ if (!moved_load)
continue;
- /* Don't pull more than imbalance/2 */
- imbalance /= 2;
- maxload = min(rem_load_move, imbalance);
+ move_group_shares(tg, sd, busiest_cpu, this_cpu);
- *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
-#else
-# define maxload rem_load_move
-#endif
- /*
- * pass busy_cfs_rq argument into
- * load_balance_[start|next]_fair iterators
- */
- cfs_rq_iterator.arg = busy_cfs_rq;
- rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
- maxload, sd, idle, all_pinned,
- this_best_prio,
- &cfs_rq_iterator);
+ moved_load *= aggregate(tg, sd)->load;
+ moved_load /= aggregate(tg, sd)->rq_weight + 1;
- if (rem_load_move <= 0)
+ rem_load_move -= moved_load;
+ if (rem_load_move < 0)
break;
}
+ rcu_read_unlock();
return max_load_move - rem_load_move;
}
+#else
+static unsigned long
+load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
+ unsigned long max_load_move,
+ struct sched_domain *sd, enum cpu_idle_type idle,
+ int *all_pinned, int *this_best_prio)
+{
+ return __load_balance_fair(this_rq, this_cpu, busiest,
+ max_load_move, sd, idle, all_pinned,
+ this_best_prio, &busiest->cfs);
+}
+#endif
static int
move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
@@ -1461,16 +1611,40 @@ static const struct sched_class fair_sched_class = {
};
#ifdef CONFIG_SCHED_DEBUG
+static void
+print_cfs_rq_tasks(struct seq_file *m, struct cfs_rq *cfs_rq, int depth)
+{
+ struct sched_entity *se;
+
+ if (!cfs_rq)
+ return;
+
+ list_for_each_entry_rcu(se, &cfs_rq->tasks, group_node) {
+ int i;
+
+ for (i = depth; i; i--)
+ seq_puts(m, " ");
+
+ seq_printf(m, "%lu %s %lu\n",
+ se->load.weight,
+ entity_is_task(se) ? "T" : "G",
+ calc_delta_weight(SCHED_LOAD_SCALE, se)
+ );
+ if (!entity_is_task(se))
+ print_cfs_rq_tasks(m, group_cfs_rq(se), depth + 1);
+ }
+}
+
static void print_cfs_stats(struct seq_file *m, int cpu)
{
struct cfs_rq *cfs_rq;
-#ifdef CONFIG_FAIR_GROUP_SCHED
- print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
-#endif
rcu_read_lock();
for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
print_cfs_rq(m, cpu, cfs_rq);
+
+ seq_printf(m, "\nWeight tree:\n");
+ print_cfs_rq_tasks(m, &cpu_rq(cpu)->cfs, 1);
rcu_read_unlock();
}
#endif
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
new file mode 100644
index 000000000000..1c7283cb9581
--- /dev/null
+++ b/kernel/sched_features.h
@@ -0,0 +1,10 @@
+SCHED_FEAT(NEW_FAIR_SLEEPERS, 1)
+SCHED_FEAT(WAKEUP_PREEMPT, 1)
+SCHED_FEAT(START_DEBIT, 1)
+SCHED_FEAT(AFFINE_WAKEUPS, 1)
+SCHED_FEAT(CACHE_HOT_BUDDY, 1)
+SCHED_FEAT(SYNC_WAKEUPS, 1)
+SCHED_FEAT(HRTICK, 1)
+SCHED_FEAT(DOUBLE_TICK, 0)
+SCHED_FEAT(NORMALIZED_SLEEPER, 1)
+SCHED_FEAT(DEADLINE, 1)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 0a6d2e516420..c2730a5a4f05 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -62,7 +62,12 @@ static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
if (!rt_rq->tg)
return RUNTIME_INF;
- return rt_rq->tg->rt_runtime;
+ return rt_rq->rt_runtime;
+}
+
+static inline u64 sched_rt_period(struct rt_rq *rt_rq)
+{
+ return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
}
#define for_each_leaf_rt_rq(rt_rq, rq) \
@@ -127,14 +132,39 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se)
return p->prio != p->normal_prio;
}
+#ifdef CONFIG_SMP
+static inline cpumask_t sched_rt_period_mask(void)
+{
+ return cpu_rq(smp_processor_id())->rd->span;
+}
+#else
+static inline cpumask_t sched_rt_period_mask(void)
+{
+ return cpu_online_map;
+}
+#endif
+
+static inline
+struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
+{
+ return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
+}
+
+static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
+{
+ return &rt_rq->tg->rt_bandwidth;
+}
+
#else
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
- if (sysctl_sched_rt_runtime == -1)
- return RUNTIME_INF;
+ return rt_rq->rt_runtime;
+}
- return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
+static inline u64 sched_rt_period(struct rt_rq *rt_rq)
+{
+ return ktime_to_ns(def_rt_bandwidth.rt_period);
}
#define for_each_leaf_rt_rq(rt_rq, rq) \
@@ -173,6 +203,102 @@ static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
return rt_rq->rt_throttled;
}
+
+static inline cpumask_t sched_rt_period_mask(void)
+{
+ return cpu_online_map;
+}
+
+static inline
+struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
+{
+ return &cpu_rq(cpu)->rt;
+}
+
+static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
+{
+ return &def_rt_bandwidth;
+}
+
+#endif
+
+static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
+{
+ int i, idle = 1;
+ cpumask_t span;
+
+ if (rt_b->rt_runtime == RUNTIME_INF)
+ return 1;
+
+ span = sched_rt_period_mask();
+ for_each_cpu_mask(i, span) {
+ int enqueue = 0;
+ struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
+ struct rq *rq = rq_of_rt_rq(rt_rq);
+
+ spin_lock(&rq->lock);
+ if (rt_rq->rt_time) {
+ u64 runtime;
+
+ spin_lock(&rt_rq->rt_runtime_lock);
+ runtime = rt_rq->rt_runtime;
+ rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
+ if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
+ rt_rq->rt_throttled = 0;
+ enqueue = 1;
+ }
+ if (rt_rq->rt_time || rt_rq->rt_nr_running)
+ idle = 0;
+ spin_unlock(&rt_rq->rt_runtime_lock);
+ }
+
+ if (enqueue)
+ sched_rt_rq_enqueue(rt_rq);
+ spin_unlock(&rq->lock);
+ }
+
+ return idle;
+}
+
+#ifdef CONFIG_SMP
+static int balance_runtime(struct rt_rq *rt_rq)
+{
+ struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
+ struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
+ int i, weight, more = 0;
+ u64 rt_period;
+
+ weight = cpus_weight(rd->span);
+
+ spin_lock(&rt_b->rt_runtime_lock);
+ rt_period = ktime_to_ns(rt_b->rt_period);
+ for_each_cpu_mask(i, rd->span) {
+ struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
+ s64 diff;
+
+ if (iter == rt_rq)
+ continue;
+
+ spin_lock(&iter->rt_runtime_lock);
+ diff = iter->rt_runtime - iter->rt_time;
+ if (diff > 0) {
+ do_div(diff, weight);
+ if (rt_rq->rt_runtime + diff > rt_period)
+ diff = rt_period - rt_rq->rt_runtime;
+ iter->rt_runtime -= diff;
+ rt_rq->rt_runtime += diff;
+ more = 1;
+ if (rt_rq->rt_runtime == rt_period) {
+ spin_unlock(&iter->rt_runtime_lock);
+ break;
+ }
+ }
+ spin_unlock(&iter->rt_runtime_lock);
+ }
+ spin_unlock(&rt_b->rt_runtime_lock);
+
+ return more;
+}
#endif
static inline int rt_se_prio(struct sched_rt_entity *rt_se)
@@ -197,12 +323,24 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
if (rt_rq->rt_throttled)
return rt_rq_throttled(rt_rq);
+ if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
+ return 0;
+
+#ifdef CONFIG_SMP
if (rt_rq->rt_time > runtime) {
- struct rq *rq = rq_of_rt_rq(rt_rq);
+ int more;
- rq->rt_throttled = 1;
- rt_rq->rt_throttled = 1;
+ spin_unlock(&rt_rq->rt_runtime_lock);
+ more = balance_runtime(rt_rq);
+ spin_lock(&rt_rq->rt_runtime_lock);
+ if (more)
+ runtime = sched_rt_runtime(rt_rq);
+ }
+#endif
+
+ if (rt_rq->rt_time > runtime) {
+ rt_rq->rt_throttled = 1;
if (rt_rq_throttled(rt_rq)) {
sched_rt_rq_dequeue(rt_rq);
return 1;
@@ -212,29 +350,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
return 0;
}
-static void update_sched_rt_period(struct rq *rq)
-{
- struct rt_rq *rt_rq;
- u64 period;
-
- while (rq->clock > rq->rt_period_expire) {
- period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
- rq->rt_period_expire += period;
-
- for_each_leaf_rt_rq(rt_rq, rq) {
- u64 runtime = sched_rt_runtime(rt_rq);
-
- rt_rq->rt_time -= min(rt_rq->rt_time, runtime);
- if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
- rt_rq->rt_throttled = 0;
- sched_rt_rq_enqueue(rt_rq);
- }
- }
-
- rq->rt_throttled = 0;
- }
-}
-
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
@@ -259,9 +374,15 @@ static void update_curr_rt(struct rq *rq)
curr->se.exec_start = rq->clock;
cpuacct_charge(curr, delta_exec);
- rt_rq->rt_time += delta_exec;
- if (sched_rt_runtime_exceeded(rt_rq))
- resched_task(curr);
+ for_each_sched_rt_entity(rt_se) {
+ rt_rq = rt_rq_of_se(rt_se);
+
+ spin_lock(&rt_rq->rt_runtime_lock);
+ rt_rq->rt_time += delta_exec;
+ if (sched_rt_runtime_exceeded(rt_rq))
+ resched_task(curr);
+ spin_unlock(&rt_rq->rt_runtime_lock);
+ }
}
static inline
@@ -284,6 +405,11 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
#ifdef CONFIG_RT_GROUP_SCHED
if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted++;
+
+ if (rt_rq->tg)
+ start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
+#else
+ start_rt_bandwidth(&def_rt_bandwidth);
#endif
}
@@ -353,27 +479,21 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
/*
* Because the prio of an upper entry depends on the lower
* entries, we must remove entries top - down.
- *
- * XXX: O(1/2 h^2) because we can only walk up, not down the chain.
- * doesn't matter much for now, as h=2 for GROUP_SCHED.
*/
static void dequeue_rt_stack(struct task_struct *p)
{
- struct sched_rt_entity *rt_se, *top_se;
+ struct sched_rt_entity *rt_se, *back = NULL;
- /*
- * dequeue all, top - down.
- */
- do {
- rt_se = &p->rt;
- top_se = NULL;
- for_each_sched_rt_entity(rt_se) {
- if (on_rt_rq(rt_se))
- top_se = rt_se;
- }
- if (top_se)
- dequeue_rt_entity(top_se);
- } while (top_se);
+ rt_se = &p->rt;
+ for_each_sched_rt_entity(rt_se) {
+ rt_se->back = back;
+ back = rt_se;
+ }
+
+ for (rt_se = back; rt_se; rt_se = rt_se->back) {
+ if (on_rt_rq(rt_se))
+ dequeue_rt_entity(rt_se);
+ }
}
/*
@@ -393,6 +513,8 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
*/
for_each_sched_rt_entity(rt_se)
enqueue_rt_entity(rt_se);
+
+ inc_cpu_load(rq, p->se.load.weight);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
@@ -412,6 +534,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
if (rt_rq && rt_rq->rt_nr_running)
enqueue_rt_entity(rt_se);
}
+
+ dec_cpu_load(rq, p->se.load.weight);
}
/*
@@ -1001,7 +1125,8 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
return 0;
}
-static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
+static void set_cpus_allowed_rt(struct task_struct *p,
+ const cpumask_t *new_mask)
{
int weight = cpus_weight(*new_mask);
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 5b32433e7ee5..5bae2e0c3ff2 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -9,6 +9,11 @@
static int show_schedstat(struct seq_file *seq, void *v)
{
int cpu;
+ int mask_len = NR_CPUS/32 * 9;
+ char *mask_str = kmalloc(mask_len, GFP_KERNEL);
+
+ if (mask_str == NULL)
+ return -ENOMEM;
seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
seq_printf(seq, "timestamp %lu\n", jiffies);
@@ -36,9 +41,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
preempt_disable();
for_each_domain(cpu, sd) {
enum cpu_idle_type itype;
- char mask_str[NR_CPUS];
- cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
+ cpumask_scnprintf(mask_str, mask_len, sd->span);
seq_printf(seq, "domain%d %s", dcount++, mask_str);
for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
itype++) {
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 31e9f2a47928..3c44956ee7e2 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -356,7 +356,8 @@ void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
/* Tasklets */
struct tasklet_head
{
- struct tasklet_struct *list;
+ struct tasklet_struct *head;
+ struct tasklet_struct **tail;
};
/* Some compilers disobey section attribute on statics when not
@@ -369,8 +370,9 @@ void __tasklet_schedule(struct tasklet_struct *t)
unsigned long flags;
local_irq_save(flags);
- t->next = __get_cpu_var(tasklet_vec).list;
- __get_cpu_var(tasklet_vec).list = t;
+ t->next = NULL;
+ *__get_cpu_var(tasklet_vec).tail = t;
+ __get_cpu_var(tasklet_vec).tail = &(t->next);
raise_softirq_irqoff(TASKLET_SOFTIRQ);
local_irq_restore(flags);
}
@@ -382,8 +384,9 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
unsigned long flags;
local_irq_save(flags);
- t->next = __get_cpu_var(tasklet_hi_vec).list;
- __get_cpu_var(tasklet_hi_vec).list = t;
+ t->next = NULL;
+ *__get_cpu_var(tasklet_hi_vec).tail = t;
+ __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_restore(flags);
}
@@ -395,8 +398,9 @@ static void tasklet_action(struct softirq_action *a)
struct tasklet_struct *list;
local_irq_disable();
- list = __get_cpu_var(tasklet_vec).list;
- __get_cpu_var(tasklet_vec).list = NULL;
+ list = __get_cpu_var(tasklet_vec).head;
+ __get_cpu_var(tasklet_vec).head = NULL;
+ __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
local_irq_enable();
while (list) {
@@ -416,8 +420,9 @@ static void tasklet_action(struct softirq_action *a)
}
local_irq_disable();
- t->next = __get_cpu_var(tasklet_vec).list;
- __get_cpu_var(tasklet_vec).list = t;
+ t->next = NULL;
+ *__get_cpu_var(tasklet_vec).tail = t;
+ __get_cpu_var(tasklet_vec).tail = &(t->next);
__raise_softirq_irqoff(TASKLET_SOFTIRQ);
local_irq_enable();
}
@@ -428,8 +433,9 @@ static void tasklet_hi_action(struct softirq_action *a)
struct tasklet_struct *list;
local_irq_disable();
- list = __get_cpu_var(tasklet_hi_vec).list;
- __get_cpu_var(tasklet_hi_vec).list = NULL;
+ list = __get_cpu_var(tasklet_hi_vec).head;
+ __get_cpu_var(tasklet_hi_vec).head = NULL;
+ __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
local_irq_enable();
while (list) {
@@ -449,8 +455,9 @@ static void tasklet_hi_action(struct softirq_action *a)
}
local_irq_disable();
- t->next = __get_cpu_var(tasklet_hi_vec).list;
- __get_cpu_var(tasklet_hi_vec).list = t;
+ t->next = NULL;
+ *__get_cpu_var(tasklet_hi_vec).tail = t;
+ __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
__raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_enable();
}
@@ -487,6 +494,15 @@ EXPORT_SYMBOL(tasklet_kill);
void __init softirq_init(void)
{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ per_cpu(tasklet_vec, cpu).tail =
+ &per_cpu(tasklet_vec, cpu).head;
+ per_cpu(tasklet_hi_vec, cpu).tail =
+ &per_cpu(tasklet_hi_vec, cpu).head;
+ }
+
open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
}
@@ -555,9 +571,12 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
return;
/* CPU is dead, so no lock needed. */
- for (i = &per_cpu(tasklet_vec, cpu).list; *i; i = &(*i)->next) {
+ for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
if (*i == t) {
*i = t->next;
+ /* If this was the tail element, move the tail ptr */
+ if (*i == NULL)
+ per_cpu(tasklet_vec, cpu).tail = i;
return;
}
}
@@ -566,20 +585,20 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
static void takeover_tasklets(unsigned int cpu)
{
- struct tasklet_struct **i;
-
/* CPU is dead, so no lock needed. */
local_irq_disable();
/* Find end, append list for that CPU. */
- for (i = &__get_cpu_var(tasklet_vec).list; *i; i = &(*i)->next);
- *i = per_cpu(tasklet_vec, cpu).list;
- per_cpu(tasklet_vec, cpu).list = NULL;
+ *__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).head;
+ __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
+ per_cpu(tasklet_vec, cpu).head = NULL;
+ per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
raise_softirq_irqoff(TASKLET_SOFTIRQ);
- for (i = &__get_cpu_var(tasklet_hi_vec).list; *i; i = &(*i)->next);
- *i = per_cpu(tasklet_hi_vec, cpu).list;
- per_cpu(tasklet_hi_vec, cpu).list = NULL;
+ *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
+ __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
+ per_cpu(tasklet_hi_vec, cpu).head = NULL;
+ per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_enable();
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 6f4e0e13f70c..e1b2a5b1b105 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -35,7 +35,7 @@ static int stopmachine(void *cpu)
int irqs_disabled = 0;
int prepared = 0;
- set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu));
+ set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu));
/* Ack: we are alive */
smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b2a2d6889bab..fd3364827ccf 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -270,17 +270,6 @@ static struct ctl_table kern_table[] = {
},
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "sched_batch_wakeup_granularity_ns",
- .data = &sysctl_sched_batch_wakeup_granularity,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
- .extra1 = &min_wakeup_granularity_ns,
- .extra2 = &max_wakeup_granularity_ns,
- },
- {
- .ctl_name = CTL_UNNUMBERED,
.procname = "sched_child_runs_first",
.data = &sysctl_sched_child_runs_first,
.maxlen = sizeof(unsigned int),
@@ -318,7 +307,7 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_sched_rt_period,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &sched_rt_handler,
},
{
.ctl_name = CTL_UNNUMBERED,
@@ -326,7 +315,7 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_sched_rt_runtime,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &sched_rt_handler,
},
{
.ctl_name = CTL_UNNUMBERED,
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 69dba0c71727..d358d4e3a958 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -191,7 +191,6 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
void tick_nohz_stop_sched_tick(void)
{
unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
- unsigned long rt_jiffies;
struct tick_sched *ts;
ktime_t last_update, expires, now;
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
@@ -243,10 +242,6 @@ void tick_nohz_stop_sched_tick(void)
next_jiffies = get_next_timer_interrupt(last_jiffies);
delta_jiffies = next_jiffies - last_jiffies;
- rt_jiffies = rt_needs_cpu(cpu);
- if (rt_jiffies && rt_jiffies < delta_jiffies)
- delta_jiffies = rt_jiffies;
-
if (rcu_needs_cpu(cpu))
delta_jiffies = 1;
/*
diff --git a/kernel/user.c b/kernel/user.c
index 7132022a040c..debce602bfdd 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -101,7 +101,7 @@ static int sched_create_user(struct user_struct *up)
{
int rc = 0;
- up->tg = sched_create_group();
+ up->tg = sched_create_group(&root_task_group);
if (IS_ERR(up->tg))
rc = -ENOMEM;
@@ -193,6 +193,33 @@ static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
static struct kobj_attribute cpu_rt_runtime_attr =
__ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
+
+static ssize_t cpu_rt_period_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct user_struct *up = container_of(kobj, struct user_struct, kobj);
+
+ return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg));
+}
+
+static ssize_t cpu_rt_period_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct user_struct *up = container_of(kobj, struct user_struct, kobj);
+ unsigned long rt_period;
+ int rc;
+
+ sscanf(buf, "%lu", &rt_period);
+
+ rc = sched_group_set_rt_period(up->tg, rt_period);
+
+ return (rc ? rc : size);
+}
+
+static struct kobj_attribute cpu_rt_period_attr =
+ __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store);
#endif
/* default attributes per uid directory */
@@ -202,6 +229,7 @@ static struct attribute *uids_attributes[] = {
#endif
#ifdef CONFIG_RT_GROUP_SCHED
&cpu_rt_runtime_attr.attr,
+ &cpu_rt_period_attr.attr,
#endif
NULL
};