c88d591089
The problem with wake_idle() is that is doesn't respect things like cpu_power, which means it doesn't deal well with SMT nor the recent RT interaction. To cure this, it needs to do what sched_balance_self() does, which leads to the possibility of merging select_task_rq_fair() and sched_balance_self(). Modify sched_balance_self() to: - update_shares() when walking up the domain tree, (it only called it for the top domain, but it should have done this anyway), which allows us to remove this ugly bit from try_to_wake_up(). - do wake_affine() on the smallest domain that contains both this (the waking) and the prev (the wakee) cpu for WAKE invocations. Then use the top-down balance steps it had to replace wake_idle(). This leads to the dissapearance of SD_WAKE_BALANCE and SD_WAKE_IDLE_FAR, with SD_WAKE_IDLE replaced with SD_BALANCE_WAKE. SD_WAKE_AFFINE needs SD_BALANCE_WAKE to be effective. Touch all topology bits to replace the old with new SD flags -- platforms might need re-tuning, enabling SD_BALANCE_WAKE conditionally on a NUMA distance seems like a good additional feature, magny-core and small nehalem systems would want this enabled, systems with slow interconnects would not. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
85 lines
2.1 KiB
C
85 lines
2.1 KiB
C
#ifndef _ASM_SPARC64_TOPOLOGY_H
|
|
#define _ASM_SPARC64_TOPOLOGY_H
|
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
#include <asm/mmzone.h>
|
|
|
|
static inline int cpu_to_node(int cpu)
|
|
{
|
|
return numa_cpu_lookup_table[cpu];
|
|
}
|
|
|
|
#define parent_node(node) (node)
|
|
|
|
static inline cpumask_t node_to_cpumask(int node)
|
|
{
|
|
return numa_cpumask_lookup_table[node];
|
|
}
|
|
#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
|
|
|
|
/*
|
|
* Returns a pointer to the cpumask of CPUs on Node 'node'.
|
|
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
|
|
*/
|
|
#define node_to_cpumask_ptr(v, node) \
|
|
cpumask_t *v = &(numa_cpumask_lookup_table[node])
|
|
|
|
#define node_to_cpumask_ptr_next(v, node) \
|
|
v = &(numa_cpumask_lookup_table[node])
|
|
|
|
struct pci_bus;
|
|
#ifdef CONFIG_PCI
|
|
extern int pcibus_to_node(struct pci_bus *pbus);
|
|
#else
|
|
static inline int pcibus_to_node(struct pci_bus *pbus)
|
|
{
|
|
return -1;
|
|
}
|
|
#endif
|
|
|
|
#define cpumask_of_pcibus(bus) \
|
|
(pcibus_to_node(bus) == -1 ? \
|
|
cpu_all_mask : \
|
|
cpumask_of_node(pcibus_to_node(bus)))
|
|
|
|
#define SD_NODE_INIT (struct sched_domain) { \
|
|
.min_interval = 8, \
|
|
.max_interval = 32, \
|
|
.busy_factor = 32, \
|
|
.imbalance_pct = 125, \
|
|
.cache_nice_tries = 2, \
|
|
.busy_idx = 3, \
|
|
.idle_idx = 2, \
|
|
.newidle_idx = 0, \
|
|
.wake_idx = 1, \
|
|
.forkexec_idx = 1, \
|
|
.flags = SD_LOAD_BALANCE \
|
|
| SD_BALANCE_FORK \
|
|
| SD_BALANCE_EXEC \
|
|
| SD_BALANCE_WAKE \
|
|
| SD_SERIALIZE, \
|
|
.last_balance = jiffies, \
|
|
.balance_interval = 1, \
|
|
}
|
|
|
|
#else /* CONFIG_NUMA */
|
|
|
|
#include <asm-generic/topology.h>
|
|
|
|
#endif /* !(CONFIG_NUMA) */
|
|
|
|
#ifdef CONFIG_SMP
|
|
#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
|
|
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
|
|
#define topology_core_siblings(cpu) (cpu_core_map[cpu])
|
|
#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
|
|
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
|
|
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
|
#define mc_capable() (sparc64_multi_core)
|
|
#define smt_capable() (sparc64_multi_core)
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#define cpu_coregroup_mask(cpu) (&cpu_core_map[cpu])
|
|
|
|
#endif /* _ASM_SPARC64_TOPOLOGY_H */
|