diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-26 21:39:17 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-27 17:28:43 +0100 |
commit | 6e15cf04860074ad032e88c306bea656bbdd0f22 (patch) | |
tree | c346383bb7563e8d66b2f4a502f875b259c34870 /kernel/sched_rt.c | |
parent | be0ea69674ed95e1e98cb3687a241badc756d228 (diff) | |
parent | 60db56422043aaa455ac7f858ce23c273220f9d9 (diff) |
Merge branch 'core/percpu' into percpu-cpumask-x86-for-linus-2
Conflicts:
arch/parisc/kernel/irq.c
arch/x86/include/asm/fixmap_64.h
arch/x86/include/asm/setup.h
kernel/irq/handle.c
Semantic merge:
arch/x86/include/asm/fixmap.h
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 32 |
1 files changed, 20 insertions, 12 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index c79dc7844012..299d012b4394 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1122,12 +1122,13 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); -static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) +static inline int pick_optimal_cpu(int this_cpu, + const struct cpumask *mask) { int first; /* "this_cpu" is cheaper to preempt than a remote processor */ - if ((this_cpu != -1) && cpu_isset(this_cpu, *mask)) + if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask)) return this_cpu; first = cpumask_first(mask); @@ -1143,6 +1144,7 @@ static int find_lowest_rq(struct task_struct *task) struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); int this_cpu = smp_processor_id(); int cpu = task_cpu(task); + cpumask_var_t domain_mask; if (task->rt.nr_cpus_allowed == 1) return -1; /* No other targets possible */ @@ -1175,19 +1177,25 @@ static int find_lowest_rq(struct task_struct *task) if (this_cpu == cpu) this_cpu = -1; /* Skip this_cpu opt if the same */ - for_each_domain(cpu, sd) { - if (sd->flags & SD_WAKE_AFFINE) { - cpumask_t domain_mask; - int best_cpu; + if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) { + for_each_domain(cpu, sd) { + if (sd->flags & SD_WAKE_AFFINE) { + int best_cpu; - cpumask_and(&domain_mask, sched_domain_span(sd), - lowest_mask); + cpumask_and(domain_mask, + sched_domain_span(sd), + lowest_mask); - best_cpu = pick_optimal_cpu(this_cpu, - &domain_mask); - if (best_cpu != -1) - return best_cpu; + best_cpu = pick_optimal_cpu(this_cpu, + domain_mask); + + if (best_cpu != -1) { + free_cpumask_var(domain_mask); + return best_cpu; + } + } } + free_cpumask_var(domain_mask); } /* |