cpumask: use topology_core_cpumask/topology_thread_cpumask instead of cpu_core_map/cpu_sibling_map

Impact: cleanup

This is presumably what those definitions are for, and while all archs
define cpu_core_map/cpu_sibling map, that's changing (eg. x86 wants to
change it to a pointer).

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
Rusty Russell 2009-03-13 14:49:46 +10:30
parent d95c357812
commit c69fc56de1
2 changed files with 5 additions and 5 deletions

View file

@ -102,7 +102,7 @@ static inline int blk_cpu_to_group(int cpu)
const struct cpumask *mask = cpu_coregroup_mask(cpu); const struct cpumask *mask = cpu_coregroup_mask(cpu);
return cpumask_first(mask); return cpumask_first(mask);
#elif defined(CONFIG_SCHED_SMT) #elif defined(CONFIG_SCHED_SMT)
return first_cpu(per_cpu(cpu_sibling_map, cpu)); return cpumask_first(topology_thread_cpumask(cpu));
#else #else
return cpu; return cpu;
#endif #endif

View file

@ -7249,7 +7249,7 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
{ {
int group; int group;
cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
group = cpumask_first(mask); group = cpumask_first(mask);
if (sg) if (sg)
*sg = &per_cpu(sched_group_core, group).sg; *sg = &per_cpu(sched_group_core, group).sg;
@ -7278,7 +7278,7 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
group = cpumask_first(mask); group = cpumask_first(mask);
#elif defined(CONFIG_SCHED_SMT) #elif defined(CONFIG_SCHED_SMT)
cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
group = cpumask_first(mask); group = cpumask_first(mask);
#else #else
group = cpu; group = cpu;
@ -7621,7 +7621,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
SD_INIT(sd, SIBLING); SD_INIT(sd, SIBLING);
set_domain_attribute(sd, attr); set_domain_attribute(sd, attr);
cpumask_and(sched_domain_span(sd), cpumask_and(sched_domain_span(sd),
&per_cpu(cpu_sibling_map, i), cpu_map); topology_thread_cpumask(i), cpu_map);
sd->parent = p; sd->parent = p;
p->child = sd; p->child = sd;
cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
@ -7632,7 +7632,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
/* Set up CPU (sibling) groups */ /* Set up CPU (sibling) groups */
for_each_cpu(i, cpu_map) { for_each_cpu(i, cpu_map) {
cpumask_and(this_sibling_map, cpumask_and(this_sibling_map,
&per_cpu(cpu_sibling_map, i), cpu_map); topology_thread_cpumask(i), cpu_map);
if (i != cpumask_first(this_sibling_map)) if (i != cpumask_first(this_sibling_map))
continue; continue;