summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c65
-rw-r--r--kernel/sched/features.h2
-rw-r--r--kernel/sched/rt.c40
-rw-r--r--kernel/sched/sched.h35
4 files changed, 96 insertions, 46 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e8b335016c52..62cb6b24ab46 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -73,6 +73,7 @@
#include <linux/init_task.h>
#include <linux/binfmts.h>
#include <linux/context_tracking.h>
+#include <linux/tegra_profiler.h>
#include <asm/switch_to.h>
#include <asm/tlb.h>
@@ -1849,6 +1850,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
trace_sched_switch(prev, next);
sched_info_switch(prev, next);
perf_event_task_sched_out(prev, next);
+ quadd_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next);
prepare_lock_switch(rq, next);
prepare_arch_switch(next);
@@ -1892,6 +1894,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
vtime_task_switch(prev);
finish_arch_switch(prev);
perf_event_task_sched_in(prev, current);
+ quadd_task_sched_in(prev, current);
finish_lock_switch(rq, prev);
finish_arch_post_lock_switch();
@@ -2075,6 +2078,33 @@ unsigned long this_cpu_load(void)
return this->cpu_load[0];
}
+u64 nr_running_integral(unsigned int cpu)
+{
+ unsigned int seqcnt;
+ u64 integral;
+ struct rq *q;
+
+ if (cpu >= nr_cpu_ids)
+ return 0;
+
+ q = cpu_rq(cpu);
+
+ /*
+ * Update average to avoid reading stalled value if there were
+ * no run-queue changes for a long time. On the other hand if
+ * the changes are happening right now, just read current value
+ * directly.
+ */
+
+ seqcnt = read_seqcount_begin(&q->ave_seqcnt);
+ integral = do_nr_running_integral(q);
+ if (read_seqcount_retry(&q->ave_seqcnt, seqcnt)) {
+ read_seqcount_begin(&q->ave_seqcnt);
+ integral = q->nr_running_integral;
+ }
+
+ return integral;
+}
/*
* Global load-average calculations
@@ -5206,6 +5236,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_UP_PREPARE:
rq->calc_load_update = calc_load_update;
+ rq->next_balance = jiffies;
break;
case CPU_ONLINE:
@@ -6867,9 +6898,6 @@ void __init sched_init_smp(void)
hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
- /* RT runtime code needs to handle some hotplug events */
- hotcpu_notifier(update_runtime, 0);
-
init_hrtick();
/* Move init over to a non-isolated CPU */
@@ -7093,13 +7121,24 @@ static inline int preempt_count_equals(int preempt_offset)
return (nested == preempt_offset);
}
+static int __might_sleep_init_called;
+int __init __might_sleep_init(void)
+{
+ __might_sleep_init_called = 1;
+ return 0;
+}
+early_initcall(__might_sleep_init);
+
void __might_sleep(const char *file, int line, int preempt_offset)
{
static unsigned long prev_jiffy; /* ratelimiting */
rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
- system_state != SYSTEM_RUNNING || oops_in_progress)
+ oops_in_progress)
+ return;
+ if (system_state != SYSTEM_RUNNING &&
+ (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
return;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
@@ -7705,6 +7744,23 @@ static void cpu_cgroup_css_offline(struct cgroup *cgrp)
sched_offline_group(tg);
}
+static int
+cpu_cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+ const struct cred *cred = current_cred(), *tcred;
+ struct task_struct *task;
+
+ cgroup_taskset_for_each(task, cgrp, tset) {
+ tcred = __task_cred(task);
+
+ if ((current != task) && !capable(CAP_SYS_NICE) &&
+ cred->euid != tcred->uid && cred->euid != tcred->suid)
+ return -EACCES;
+ }
+
+ return 0;
+}
+
static int cpu_cgroup_can_attach(struct cgroup *cgrp,
struct cgroup_taskset *tset)
{
@@ -8064,6 +8120,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
.css_offline = cpu_cgroup_css_offline,
.can_attach = cpu_cgroup_can_attach,
.attach = cpu_cgroup_attach,
+ .allow_attach = cpu_cgroup_allow_attach,
.exit = cpu_cgroup_exit,
.subsys_id = cpu_cgroup_subsys_id,
.base_cftypes = cpu_files,
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 99399f8e4799..0945d3837e6e 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -57,7 +57,7 @@ SCHED_FEAT(NONTASK_POWER, true)
SCHED_FEAT(TTWU_QUEUE, true)
SCHED_FEAT(FORCE_SD_OVERLAP, false)
-SCHED_FEAT(RT_RUNTIME_SHARE, true)
+SCHED_FEAT(RT_RUNTIME_SHARE, false)
SCHED_FEAT(LB_MIN, false)
/*
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 127a2c4cf4ab..111539a27757 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -699,15 +699,6 @@ balanced:
}
}
-static void disable_runtime(struct rq *rq)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&rq->lock, flags);
- __disable_runtime(rq);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
-}
-
static void __enable_runtime(struct rq *rq)
{
rt_rq_iter_t iter;
@@ -732,37 +723,6 @@ static void __enable_runtime(struct rq *rq)
}
}
-static void enable_runtime(struct rq *rq)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&rq->lock, flags);
- __enable_runtime(rq);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
-}
-
-int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu)
-{
- int cpu = (int)(long)hcpu;
-
- switch (action) {
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- disable_runtime(cpu_rq(cpu));
- return NOTIFY_OK;
-
- case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- enable_runtime(cpu_rq(cpu));
- return NOTIFY_OK;
-
- default:
- return NOTIFY_DONE;
- }
-}
-
static int balance_runtime(struct rt_rq *rt_rq)
{
int more = 0;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ce39224d6155..35bd8b7f3a87 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -415,6 +415,11 @@ struct rq {
#endif
int skip_clock_update;
+ /* time-based average load */
+ u64 nr_last_stamp;
+ u64 nr_running_integral;
+ seqcount_t ave_seqcnt;
+
/* capture load from *all* tasks on this cpu: */
struct load_weight load;
unsigned long nr_load_updates;
@@ -1051,7 +1056,6 @@ static inline void idle_balance(int cpu, struct rq *rq)
extern void sysrq_sched_debug_show(void);
extern void sched_init_granularity(void);
extern void update_max_interval(void);
-extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu);
extern void init_sched_rt_class(void);
extern void init_sched_fair_class(void);
@@ -1073,9 +1077,34 @@ static inline u64 steal_ticks(u64 steal)
}
#endif
+/* 27 ~= 134217728ns = 134.2ms
+ * 26 ~= 67108864ns = 67.1ms
+ * 25 ~= 33554432ns = 33.5ms
+ * 24 ~= 16777216ns = 16.8ms
+ */
+#define NR_AVE_SCALE(x) ((x) << FSHIFT)
+
+
+static inline u64 do_nr_running_integral(struct rq *rq)
+{
+ s64 nr, deltax;
+ u64 nr_running_integral = rq->nr_running_integral;
+
+ deltax = rq->clock_task - rq->nr_last_stamp;
+ nr = NR_AVE_SCALE(rq->nr_running);
+
+ nr_running_integral += nr * deltax;
+
+ return nr_running_integral;
+}
+
static inline void inc_nr_running(struct rq *rq)
{
+ write_seqcount_begin(&rq->ave_seqcnt);
+ rq->nr_running_integral = do_nr_running_integral(rq);
+ rq->nr_last_stamp = rq->clock_task;
rq->nr_running++;
+ write_seqcount_end(&rq->ave_seqcnt);
#ifdef CONFIG_NO_HZ_FULL
if (rq->nr_running == 2) {
@@ -1090,7 +1119,11 @@ static inline void inc_nr_running(struct rq *rq)
static inline void dec_nr_running(struct rq *rq)
{
+ write_seqcount_begin(&rq->ave_seqcnt);
+ rq->nr_running_integral = do_nr_running_integral(rq);
+ rq->nr_last_stamp = rq->clock_task;
rq->nr_running--;
+ write_seqcount_end(&rq->ave_seqcnt);
}
static inline void rq_last_tick_reset(struct rq *rq)