summaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c201
1 files changed, 121 insertions, 80 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b4fbbc440453..38933cafea8a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -52,7 +52,6 @@
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/percpu.h>
-#include <linux/cpu_acct.h>
#include <linux/kthread.h>
#include <linux/seq_file.h>
#include <linux/sysctl.h>
@@ -75,7 +74,7 @@
*/
unsigned long long __attribute__((weak)) sched_clock(void)
{
- return (unsigned long long)jiffies * (1000000000 / HZ);
+ return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
}
/*
@@ -99,8 +98,8 @@ unsigned long long __attribute__((weak)) sched_clock(void)
/*
* Some helpers for converting nanosecond timing to jiffy resolution
*/
-#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (1000000000 / HZ))
-#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
+#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
+#define JIFFIES_TO_NS(TIME) ((TIME) * (NSEC_PER_SEC / HZ))
#define NICE_0_LOAD SCHED_LOAD_SCALE
#define NICE_0_SHIFT SCHED_LOAD_SHIFT
@@ -172,6 +171,7 @@ struct task_group {
unsigned long shares;
/* spinlock to serialize modification to shares */
spinlock_t lock;
+ struct rcu_head rcu;
};
/* Default task group's sched entity on each cpu */
@@ -216,15 +216,15 @@ static inline struct task_group *task_group(struct task_struct *p)
}
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
-static inline void set_task_cfs_rq(struct task_struct *p)
+static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu)
{
- p->se.cfs_rq = task_group(p)->cfs_rq[task_cpu(p)];
- p->se.parent = task_group(p)->se[task_cpu(p)];
+ p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
+ p->se.parent = task_group(p)->se[cpu];
}
#else
-static inline void set_task_cfs_rq(struct task_struct *p) { }
+static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { }
#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -258,7 +258,6 @@ struct cfs_rq {
*/
struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
struct task_group *tg; /* group that "owns" this runqueue */
- struct rcu_head rcu;
#endif
};
@@ -456,24 +455,28 @@ static void update_rq_clock(struct rq *rq)
*/
enum {
SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
- SCHED_FEAT_START_DEBIT = 2,
- SCHED_FEAT_TREE_AVG = 4,
- SCHED_FEAT_APPROX_AVG = 8,
- SCHED_FEAT_WAKEUP_PREEMPT = 16,
- SCHED_FEAT_PREEMPT_RESTRICT = 32,
+ SCHED_FEAT_WAKEUP_PREEMPT = 2,
+ SCHED_FEAT_START_DEBIT = 4,
+ SCHED_FEAT_TREE_AVG = 8,
+ SCHED_FEAT_APPROX_AVG = 16,
};
const_debug unsigned int sysctl_sched_features =
SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
+ SCHED_FEAT_WAKEUP_PREEMPT * 1 |
SCHED_FEAT_START_DEBIT * 1 |
SCHED_FEAT_TREE_AVG * 0 |
- SCHED_FEAT_APPROX_AVG * 0 |
- SCHED_FEAT_WAKEUP_PREEMPT * 1 |
- SCHED_FEAT_PREEMPT_RESTRICT * 1;
+ SCHED_FEAT_APPROX_AVG * 0;
#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
/*
+ * Number of tasks to iterate in a single balance run.
+ * Limited because this is done with IRQs disabled.
+ */
+const_debug unsigned int sysctl_sched_nr_migrate = 32;
+
+/*
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
* clock constructed from sched_clock():
*/
@@ -1019,10 +1022,16 @@ unsigned long weighted_cpuload(const int cpu)
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
{
+ set_task_cfs_rq(p, cpu);
#ifdef CONFIG_SMP
+ /*
+ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
+ * successfuly executed on another CPU. We must ensure that updates of
+ * per-task data have been completed by this moment.
+ */
+ smp_wmb();
task_thread_info(p)->cpu = cpu;
#endif
- set_task_cfs_rq(p);
}
#ifdef CONFIG_SMP
@@ -2237,7 +2246,7 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
enum cpu_idle_type idle, int *all_pinned,
int *this_best_prio, struct rq_iterator *iterator)
{
- int pulled = 0, pinned = 0, skip_for_load;
+ int loops = 0, pulled = 0, pinned = 0, skip_for_load;
struct task_struct *p;
long rem_load_move = max_load_move;
@@ -2251,10 +2260,10 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
*/
p = iterator->start(iterator->arg);
next:
- if (!p)
+ if (!p || loops++ > sysctl_sched_nr_migrate)
goto out;
/*
- * To help distribute high priority tasks accross CPUs we don't
+ * To help distribute high priority tasks across CPUs we don't
* skip a task if it will be the highest priority task (i.e. smallest
* prio value) on its new queue regardless of its load weight
*/
@@ -2271,8 +2280,7 @@ next:
rem_load_move -= p->se.load.weight;
/*
- * We only want to steal up to the prescribed number of tasks
- * and the prescribed amount of weighted load.
+ * We only want to steal up to the prescribed amount of weighted load.
*/
if (rem_load_move > 0) {
if (p->prio < *this_best_prio)
@@ -3335,13 +3343,9 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t tmp;
- struct rq *rq = this_rq();
p->utime = cputime_add(p->utime, cputime);
- if (p != rq->idle)
- cpuacct_charge(p, cputime);
-
/* Add user time to cpustat. */
tmp = cputime_to_cputime64(cputime);
if (TASK_NICE(p) > 0)
@@ -3355,7 +3359,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in virtual machine since the last update
*/
-void account_guest_time(struct task_struct *p, cputime_t cputime)
+static void account_guest_time(struct task_struct *p, cputime_t cputime)
{
cputime64_t tmp;
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
@@ -3392,10 +3396,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
struct rq *rq = this_rq();
cputime64_t tmp;
- if (p->flags & PF_VCPU) {
- account_guest_time(p, cputime);
- return;
- }
+ if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0))
+ return account_guest_time(p, cputime);
p->stime = cputime_add(p->stime, cputime);
@@ -3405,10 +3407,9 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
cpustat->irq = cputime64_add(cpustat->irq, tmp);
else if (softirq_count())
cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
- else if (p != rq->idle) {
+ else if (p != rq->idle)
cpustat->system = cputime64_add(cpustat->system, tmp);
- cpuacct_charge(p, cputime);
- } else if (atomic_read(&rq->nr_iowait) > 0)
+ else if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
else
cpustat->idle = cputime64_add(cpustat->idle, tmp);
@@ -3444,10 +3445,8 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
else
cpustat->idle = cputime64_add(cpustat->idle, tmp);
- } else {
+ } else
cpustat->steal = cputime64_add(cpustat->steal, tmp);
- cpuacct_charge(p, -tmp);
- }
}
/*
@@ -4992,6 +4991,32 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
*/
cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
+/*
+ * Increase the granularity value when there are more CPUs,
+ * because with more CPUs the 'effective latency' as visible
+ * to users decreases. But the relationship is not linear,
+ * so pick a second-best guess by going with the log2 of the
+ * number of CPUs.
+ *
+ * This idea comes from the SD scheduler of Con Kolivas:
+ */
+static inline void sched_init_granularity(void)
+{
+ unsigned int factor = 1 + ilog2(num_online_cpus());
+ const unsigned long limit = 200000000;
+
+ sysctl_sched_min_granularity *= factor;
+ if (sysctl_sched_min_granularity > limit)
+ sysctl_sched_min_granularity = limit;
+
+ sysctl_sched_latency *= factor;
+ if (sysctl_sched_latency > limit)
+ sysctl_sched_latency = limit;
+
+ sysctl_sched_wakeup_granularity *= factor;
+ sysctl_sched_batch_wakeup_granularity *= factor;
+}
+
#ifdef CONFIG_SMP
/*
* This is how migration works:
@@ -5257,23 +5282,9 @@ static void migrate_live_tasks(int src_cpu)
}
/*
- * activate_idle_task - move idle task to the _front_ of runqueue.
- */
-static void activate_idle_task(struct task_struct *p, struct rq *rq)
-{
- update_rq_clock(rq);
-
- if (p->state == TASK_UNINTERRUPTIBLE)
- rq->nr_uninterruptible--;
-
- enqueue_task(rq, p, 0);
- inc_nr_running(p, rq);
-}
-
-/*
* Schedules idle task to be the next runnable task on current CPU.
- * It does so by boosting its priority to highest possible and adding it to
- * the _front_ of the runqueue. Used by CPU offline code.
+ * It does so by boosting its priority to highest possible.
+ * Used by CPU offline code.
*/
void sched_idle_next(void)
{
@@ -5293,8 +5304,8 @@ void sched_idle_next(void)
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
- /* Add idle task to the _front_ of its priority queue: */
- activate_idle_task(p, rq);
+ update_rq_clock(rq);
+ activate_task(rq, p, 0);
spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -5365,7 +5376,7 @@ static struct ctl_table sd_ctl_dir[] = {
.procname = "sched_domain",
.mode = 0555,
},
- {0,},
+ {0, },
};
static struct ctl_table sd_ctl_root[] = {
@@ -5375,7 +5386,7 @@ static struct ctl_table sd_ctl_root[] = {
.mode = 0555,
.child = sd_ctl_dir,
},
- {0,},
+ {0, },
};
static struct ctl_table *sd_alloc_ctl_entry(int n)
@@ -5621,7 +5632,7 @@ static struct notifier_block __cpuinitdata migration_notifier = {
.priority = 10
};
-int __init migration_init(void)
+void __init migration_init(void)
{
void *cpu = (void *)(long)smp_processor_id();
int err;
@@ -5631,8 +5642,6 @@ int __init migration_init(void)
BUG_ON(err == NOTIFY_BAD);
migration_call(&migration_notifier, CPU_ONLINE, cpu);
register_cpu_notifier(&migration_notifier);
-
- return 0;
}
#endif
@@ -6688,10 +6697,12 @@ void __init sched_init_smp(void)
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed(current, non_isolated_cpus) < 0)
BUG();
+ sched_init_granularity();
}
#else
void __init sched_init_smp(void)
{
+ sched_init_granularity();
}
#endif /* CONFIG_SMP */
@@ -7019,8 +7030,8 @@ err:
/* rcu callback to free various structures associated with a task group */
static void free_sched_group(struct rcu_head *rhp)
{
- struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu);
- struct task_group *tg = cfs_rq->tg;
+ struct task_group *tg = container_of(rhp, struct task_group, rcu);
+ struct cfs_rq *cfs_rq;
struct sched_entity *se;
int i;
@@ -7041,7 +7052,7 @@ static void free_sched_group(struct rcu_head *rhp)
/* Destroy runqueue etc associated with a task group */
void sched_destroy_group(struct task_group *tg)
{
- struct cfs_rq *cfs_rq;
+ struct cfs_rq *cfs_rq = NULL;
int i;
for_each_possible_cpu(i) {
@@ -7049,10 +7060,10 @@ void sched_destroy_group(struct task_group *tg)
list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
}
- cfs_rq = tg->cfs_rq[0];
+ BUG_ON(!cfs_rq);
/* wait for possible concurrent references to cfs_rqs complete */
- call_rcu(&cfs_rq->rcu, free_sched_group);
+ call_rcu(&tg->rcu, free_sched_group);
}
/* change task's runqueue when it moves between groups.
@@ -7068,8 +7079,10 @@ void sched_move_task(struct task_struct *tsk)
rq = task_rq_lock(tsk, &flags);
- if (tsk->sched_class != &fair_sched_class)
+ if (tsk->sched_class != &fair_sched_class) {
+ set_task_cfs_rq(tsk, task_cpu(tsk));
goto done;
+ }
update_rq_clock(rq);
@@ -7082,7 +7095,7 @@ void sched_move_task(struct task_struct *tsk)
tsk->sched_class->put_prev_task(rq, tsk);
}
- set_task_cfs_rq(tsk);
+ set_task_cfs_rq(tsk, task_cpu(tsk));
if (on_rq) {
if (unlikely(running))
@@ -7211,25 +7224,53 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
return (u64) tg->shares;
}
-static struct cftype cpu_shares = {
- .name = "shares",
- .read_uint = cpu_shares_read_uint,
- .write_uint = cpu_shares_write_uint,
+static u64 cpu_usage_read(struct cgroup *cgrp, struct cftype *cft)
+{
+ struct task_group *tg = cgroup_tg(cgrp);
+ unsigned long flags;
+ u64 res = 0;
+ int i;
+
+ for_each_possible_cpu(i) {
+ /*
+ * Lock to prevent races with updating 64-bit counters
+ * on 32-bit arches.
+ */
+ spin_lock_irqsave(&cpu_rq(i)->lock, flags);
+ res += tg->se[i]->sum_exec_runtime;
+ spin_unlock_irqrestore(&cpu_rq(i)->lock, flags);
+ }
+ /* Convert from ns to ms */
+ do_div(res, NSEC_PER_MSEC);
+
+ return res;
+}
+
+static struct cftype cpu_files[] = {
+ {
+ .name = "shares",
+ .read_uint = cpu_shares_read_uint,
+ .write_uint = cpu_shares_write_uint,
+ },
+ {
+ .name = "usage",
+ .read_uint = cpu_usage_read,
+ },
};
static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
{
- return cgroup_add_file(cont, ss, &cpu_shares);
+ return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
}
struct cgroup_subsys cpu_cgroup_subsys = {
- .name = "cpu",
- .create = cpu_cgroup_create,
- .destroy = cpu_cgroup_destroy,
- .can_attach = cpu_cgroup_can_attach,
- .attach = cpu_cgroup_attach,
- .populate = cpu_cgroup_populate,
- .subsys_id = cpu_cgroup_subsys_id,
+ .name = "cpu",
+ .create = cpu_cgroup_create,
+ .destroy = cpu_cgroup_destroy,
+ .can_attach = cpu_cgroup_can_attach,
+ .attach = cpu_cgroup_attach,
+ .populate = cpu_cgroup_populate,
+ .subsys_id = cpu_cgroup_subsys_id,
.early_init = 1,
};