sched: core: Fix usage of cpu core group mask

To find number of clusters, we depend on topology_core_cpumask(),
but that returns current online sibling cpus, if cluster have
offlined cpus, then finding no. of clusters could go wrong.

If system booted with less no. of cpus and others comes online after
bootup, then sched_up/down migrate setup breaks.

Fix this by using core's possible sibling mask.

While at it, refactor cpu possible siblings mask API to avoid other
architecture compilation errors.

Change-Id: I4543ad5b711181e60271fa887bb7d5855a55ba90
Signed-off-by: Lingutla Chandrasekhar <clingutla@codeaurora.org>
This commit is contained in:
Lingutla Chandrasekhar 2019-07-19 17:31:20 +05:30
parent adee9374b3
commit 72395648aa
6 changed files with 10 additions and 14 deletions

View file

@ -20,14 +20,11 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
#define topology_possible_sibling_cpumask topology_core_cpumask
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
static inline const struct cpumask *cpu_possible_coregroup_mask(int cpu)
{
return cpu_coregroup_mask(cpu);
}
#include <linux/arch_topology.h>

View file

@ -22,12 +22,13 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
#define topology_possible_sibling_cpumask(cpu) \
(&cpu_topology[cpu].core_possible_sibling)
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
void remove_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
const struct cpumask *cpu_possible_coregroup_mask(int cpu);
#ifdef CONFIG_NUMA

View file

@ -213,11 +213,6 @@ static int __init parse_dt_topology(void)
struct cpu_topology cpu_topology[NR_CPUS];
EXPORT_SYMBOL_GPL(cpu_topology);
const struct cpumask *cpu_possible_coregroup_mask(int cpu)
{
return &cpu_topology[cpu].core_possible_sibling;
}
const struct cpumask *cpu_coregroup_mask(int cpu)
{
const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));

View file

@ -193,6 +193,9 @@ static inline int cpu_to_mem(int cpu)
#ifndef topology_core_cpumask
#define topology_core_cpumask(cpu) cpumask_of(cpu)
#endif
#ifndef topology_possible_sibling_cpumask
#define topology_possible_sibling_cpumask(cpu) cpumask_of(cpu)
#endif
#ifdef CONFIG_SCHED_SMT
static inline const struct cpumask *cpu_smt_mask(int cpu)

View file

@ -6824,7 +6824,7 @@ static int find_capacity_margin_levels(void)
int cpu, max_clusters;
for (cpu = max_clusters = 0; cpu < num_possible_cpus();) {
cpu += cpumask_weight(topology_core_cpumask(cpu));
cpu += cpumask_weight(topology_possible_sibling_cpumask(cpu));
max_clusters++;
}
@ -6886,8 +6886,8 @@ static void sched_update_updown_migrate_values(unsigned int *data,
for (i = cpu = 0; (!cluster_cpus[i]) &&
cpu < num_possible_cpus(); i++) {
cluster_cpus[i] = topology_core_cpumask(cpu);
cpu += cpumask_weight(topology_core_cpumask(cpu));
cluster_cpus[i] = topology_possible_sibling_cpumask(cpu);
cpu += cpumask_weight(topology_possible_sibling_cpumask(cpu));
}
if (data == &sysctl_sched_capacity_margin_up[0])

View file

@ -2360,7 +2360,7 @@ void update_cluster_topology(void)
INIT_LIST_HEAD(&new_head);
for_each_cpu(i, &cpus) {
cluster_cpus = cpu_possible_coregroup_mask(i);
cluster_cpus = topology_possible_sibling_cpumask(i);
cpumask_or(&all_cluster_cpus, &all_cluster_cpus, cluster_cpus);
cpumask_andnot(&cpus, &cpus, cluster_cpus);
add_cluster(cluster_cpus, &new_head);