summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorOtavio Salvador <otavio@ossystems.com.br>2020-10-29 19:42:29 -0300
committerGitHub <noreply@github.com>2020-10-29 19:42:29 -0300
commit1a578877cf48eb96b5c8554873cdee756ae16c7e (patch)
tree10978aa5e09161d68426acc33554450032ceda65 /kernel/sched
parent859f1281231fd3c1ca687bb517748daa19eaf402 (diff)
parent8081974b75314b3f52a7afee916c352319261a16 (diff)
Merge pull request #160 from zandrey/5.4-2.1.x-imx
Update 5.4-2.1.x-imx to v5.4.73 from stable
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c9
-rw-r--r--kernel/sched/sched.h13
3 files changed, 16 insertions, 8 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 79ce22de4409..4511532b08b8 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -36,7 +36,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
+#ifdef CONFIG_SCHED_DEBUG
/*
* Debugging: various feature bits
*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b02a83ff4068..dddaf61378f6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5936,7 +5936,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
/*
* Scan the local SMT mask for idle CPUs.
*/
-static int select_idle_smt(struct task_struct *p, int target)
+static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
{
int cpu, si_cpu = -1;
@@ -5944,7 +5944,8 @@ static int select_idle_smt(struct task_struct *p, int target)
return -1;
for_each_cpu(cpu, cpu_smt_mask(target)) {
- if (!cpumask_test_cpu(cpu, p->cpus_ptr))
+ if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
+ !cpumask_test_cpu(cpu, sched_domain_span(sd)))
continue;
if (available_idle_cpu(cpu))
return cpu;
@@ -5962,7 +5963,7 @@ static inline int select_idle_core(struct task_struct *p, struct sched_domain *s
return -1;
}
-static inline int select_idle_smt(struct task_struct *p, int target)
+static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
{
return -1;
}
@@ -6072,7 +6073,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
if ((unsigned)i < nr_cpumask_bits)
return i;
- i = select_idle_smt(p, target);
+ i = select_idle_smt(p, sd, target);
if ((unsigned)i < nr_cpumask_bits)
return i;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 9f2a9e34a78d..3e7590813844 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1568,7 +1568,7 @@ enum {
#undef SCHED_FEAT
-#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
+#ifdef CONFIG_SCHED_DEBUG
/*
* To support run-time toggling of sched features, all the translation units
@@ -1576,6 +1576,7 @@ enum {
*/
extern const_debug unsigned int sysctl_sched_features;
+#ifdef CONFIG_JUMP_LABEL
#define SCHED_FEAT(name, enabled) \
static __always_inline bool static_branch_##name(struct static_key *key) \
{ \
@@ -1588,7 +1589,13 @@ static __always_inline bool static_branch_##name(struct static_key *key) \
extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
-#else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */
+#else /* !CONFIG_JUMP_LABEL */
+
+#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
+
+#endif /* CONFIG_JUMP_LABEL */
+
+#else /* !SCHED_DEBUG */
/*
* Each translation unit has its own copy of sysctl_sched_features to allow
@@ -1604,7 +1611,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features =
#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
-#endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */
+#endif /* SCHED_DEBUG */
extern struct static_key_false sched_numa_balancing;
extern struct static_key_false sched_schedstats;