summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--init/Kconfig8
-rw-r--r--kernel/printk.c4
-rw-r--r--kernel/sched.c10
-rw-r--r--kernel/sched_fair.c8
4 files changed, 21 insertions, 9 deletions
diff --git a/init/Kconfig b/init/Kconfig
index 0d0bbf218f1f..dcc96a8c8c69 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -775,6 +775,14 @@ config PREEMPT_NOTIFIERS
choice
prompt "RCU implementation type:"
default CLASSIC_RCU
+ help
+ This allows you to choose either the classic RCU implementation
+ that is designed for best read-side performance on non-realtime
+ systems, or the preemptible RCU implementation for best latency
+ on realtime systems. Note that some kernel preemption modes
+ will restrict your choice.
+
+ Select the default if you are unsure.
config CLASSIC_RCU
bool "Classic RCU"
diff --git a/kernel/printk.c b/kernel/printk.c
index 58bbec684119..29ae1e99cde0 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -455,10 +455,10 @@ static int __init ignore_loglevel_setup(char *str)
ignore_loglevel = 1;
printk(KERN_INFO "debug: ignoring loglevel setting.\n");
- return 1;
+ return 0;
}
-__setup("ignore_loglevel", ignore_loglevel_setup);
+early_param("ignore_loglevel", ignore_loglevel_setup);
/*
* Write out chars from start to end - 1 inclusive
diff --git a/kernel/sched.c b/kernel/sched.c
index ba4c88088f62..8355e007e021 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1255,12 +1255,12 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
#define sched_class_highest (&rt_sched_class)
-static void inc_nr_running(struct task_struct *p, struct rq *rq)
+static void inc_nr_running(struct rq *rq)
{
rq->nr_running++;
}
-static void dec_nr_running(struct task_struct *p, struct rq *rq)
+static void dec_nr_running(struct rq *rq)
{
rq->nr_running--;
}
@@ -1354,7 +1354,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
rq->nr_uninterruptible--;
enqueue_task(rq, p, wakeup);
- inc_nr_running(p, rq);
+ inc_nr_running(rq);
}
/*
@@ -1366,7 +1366,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
rq->nr_uninterruptible++;
dequeue_task(rq, p, sleep);
- dec_nr_running(p, rq);
+ dec_nr_running(rq);
}
/**
@@ -2006,7 +2006,7 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
* management (if any):
*/
p->sched_class->task_new(rq, p);
- inc_nr_running(p, rq);
+ inc_nr_running(rq);
}
check_preempt_curr(rq, p);
#ifdef CONFIG_SMP
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 72e25c7a3a18..6c091d6e159d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -520,7 +520,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
if (!initial) {
/* sleeps upto a single latency don't count. */
- if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se))
+ if (sched_feat(NEW_FAIR_SLEEPERS))
vruntime -= sysctl_sched_latency;
/* ensure we never gain time by being placed backwards. */
@@ -1106,7 +1106,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
}
gran = sysctl_sched_wakeup_granularity;
- if (unlikely(se->load.weight != NICE_0_LOAD))
+ /*
+ * More easily preempt - nice tasks, while not making
+ * it harder for + nice tasks.
+ */
+ if (unlikely(se->load.weight > NICE_0_LOAD))
gran = calc_delta_fair(gran, &se->load);
if (pse->vruntime + gran < se->vruntime)