[PATCH] sched: make idlest_group/cpu cpus_allowed-aware
Add relevant checks into find_idlest_group() and find_idlest_cpu() to make
them return only the groups that have allowed CPUs and allowed CPUs
respectively.
Signed-off-by: M.Baris Demiray <baris@labristeknoloji.com>
Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/kernel/sched.c b/kernel/sched.c
index ef748e6..bac23fb 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -966,8 +966,11 @@
int local_group;
int i;
+ /* Skip over this group if it has no CPUs allowed */
+ if (!cpus_intersects(group->cpumask, p->cpus_allowed))
+ goto nextgroup;
+
local_group = cpu_isset(this_cpu, group->cpumask);
- /* XXX: put a cpus allowed check */
/* Tally up the load of all CPUs in the group */
avg_load = 0;
@@ -992,6 +995,7 @@
min_load = avg_load;
idlest = group;
}
+nextgroup:
group = group->next;
} while (group != sd->groups);
@@ -1003,13 +1007,18 @@
/*
* find_idlest_queue - find the idlest runqueue among the cpus in group.
*/
-static int find_idlest_cpu(struct sched_group *group, int this_cpu)
+static int find_idlest_cpu(struct sched_group *group,
+ struct task_struct *p, int this_cpu)
{
+ cpumask_t tmp;
unsigned long load, min_load = ULONG_MAX;
int idlest = -1;
int i;
- for_each_cpu_mask(i, group->cpumask) {
+ /* Traverse only the allowed CPUs */
+ cpus_and(tmp, group->cpumask, p->cpus_allowed);
+
+ for_each_cpu_mask(i, tmp) {
load = source_load(i, 0);
if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -1052,7 +1061,7 @@
if (!group)
goto nextlevel;
- new_cpu = find_idlest_cpu(group, cpu);
+ new_cpu = find_idlest_cpu(group, t, cpu);
if (new_cpu == -1 || new_cpu == cpu)
goto nextlevel;