Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
[CPUFREQ] Fix stale cpufreq_cpu_governor pointer
[CPUFREQ] Resolve time unit thinko in ondemand/conservative govs
[CPUFREQ] speedstep-ich: fix error caused by 394122ab14
[CPUFREQ] Fix use after free on governor restore
[CPUFREQ] acpi-cpufreq: blacklist Intel 0f68: Fix HT detection and put in notification message
[CPUFREQ] powernow-k8: Fix test in get_transition_latency()
[CPUFREQ] longhaul: select Longhaul version 2 for capable CPUs
This commit is contained in:
commit
66b00a7c93
7 changed files with 70 additions and 32 deletions
|
@ -526,16 +526,22 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = {
|
||||||
|
|
||||||
static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
|
static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
/* http://www.intel.com/Assets/PDF/specupdate/314554.pdf
|
/* Intel Xeon Processor 7100 Series Specification Update
|
||||||
|
* http://www.intel.com/Assets/PDF/specupdate/314554.pdf
|
||||||
* AL30: A Machine Check Exception (MCE) Occurring during an
|
* AL30: A Machine Check Exception (MCE) Occurring during an
|
||||||
* Enhanced Intel SpeedStep Technology Ratio Change May Cause
|
* Enhanced Intel SpeedStep Technology Ratio Change May Cause
|
||||||
* Both Processor Cores to Lock Up when HT is enabled*/
|
* Both Processor Cores to Lock Up. */
|
||||||
if (c->x86_vendor == X86_VENDOR_INTEL) {
|
if (c->x86_vendor == X86_VENDOR_INTEL) {
|
||||||
if ((c->x86 == 15) &&
|
if ((c->x86 == 15) &&
|
||||||
(c->x86_model == 6) &&
|
(c->x86_model == 6) &&
|
||||||
(c->x86_mask == 8) && smt_capable())
|
(c->x86_mask == 8)) {
|
||||||
|
printk(KERN_INFO "acpi-cpufreq: Intel(R) "
|
||||||
|
"Xeon(R) 7100 Errata AL30, processors may "
|
||||||
|
"lock up on frequency changes: disabling "
|
||||||
|
"acpi-cpufreq.\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -549,13 +555,18 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||||
unsigned int result = 0;
|
unsigned int result = 0;
|
||||||
struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
|
struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
|
||||||
struct acpi_processor_performance *perf;
|
struct acpi_processor_performance *perf;
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
static int blacklisted;
|
||||||
|
#endif
|
||||||
|
|
||||||
dprintk("acpi_cpufreq_cpu_init\n");
|
dprintk("acpi_cpufreq_cpu_init\n");
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
result = acpi_cpufreq_blacklist(c);
|
if (blacklisted)
|
||||||
if (result)
|
return blacklisted;
|
||||||
return result;
|
blacklisted = acpi_cpufreq_blacklist(c);
|
||||||
|
if (blacklisted)
|
||||||
|
return blacklisted;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
|
data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
|
||||||
|
|
|
@ -813,7 +813,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
|
||||||
memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr));
|
memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr));
|
||||||
break;
|
break;
|
||||||
case 1 ... 15:
|
case 1 ... 15:
|
||||||
longhaul_version = TYPE_LONGHAUL_V1;
|
longhaul_version = TYPE_LONGHAUL_V2;
|
||||||
if (c->x86_mask < 8) {
|
if (c->x86_mask < 8) {
|
||||||
cpu_model = CPU_SAMUEL2;
|
cpu_model = CPU_SAMUEL2;
|
||||||
cpuname = "C3 'Samuel 2' [C5B]";
|
cpuname = "C3 'Samuel 2' [C5B]";
|
||||||
|
|
|
@ -1022,7 +1022,7 @@ static int get_transition_latency(struct powernow_k8_data *data)
|
||||||
* set it to 1 to avoid problems in the future.
|
* set it to 1 to avoid problems in the future.
|
||||||
* For all others it's a BIOS bug.
|
* For all others it's a BIOS bug.
|
||||||
*/
|
*/
|
||||||
if (!boot_cpu_data.x86 == 0x11)
|
if (boot_cpu_data.x86 != 0x11)
|
||||||
printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
|
printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
|
||||||
"latency\n");
|
"latency\n");
|
||||||
max_latency = 1;
|
max_latency = 1;
|
||||||
|
|
|
@ -232,28 +232,23 @@ static unsigned int speedstep_detect_chipset(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct get_freq_data {
|
static void get_freq_data(void *_speed)
|
||||||
unsigned int speed;
|
|
||||||
unsigned int processor;
|
|
||||||
};
|
|
||||||
|
|
||||||
static void get_freq_data(void *_data)
|
|
||||||
{
|
{
|
||||||
struct get_freq_data *data = _data;
|
unsigned int *speed = _speed;
|
||||||
|
|
||||||
data->speed = speedstep_get_frequency(data->processor);
|
*speed = speedstep_get_frequency(speedstep_processor);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int speedstep_get(unsigned int cpu)
|
static unsigned int speedstep_get(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct get_freq_data data = { .processor = cpu };
|
unsigned int speed;
|
||||||
|
|
||||||
/* You're supposed to ensure CPU is online. */
|
/* You're supposed to ensure CPU is online. */
|
||||||
if (smp_call_function_single(cpu, get_freq_data, &data, 1) != 0)
|
if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0)
|
||||||
BUG();
|
BUG();
|
||||||
|
|
||||||
dprintk("detected %u kHz as current frequency\n", data.speed);
|
dprintk("detected %u kHz as current frequency\n", speed);
|
||||||
return data.speed;
|
return speed;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -41,7 +41,7 @@ static struct cpufreq_driver *cpufreq_driver;
|
||||||
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
|
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
/* This one keeps track of the previously set governor of a removed CPU */
|
/* This one keeps track of the previously set governor of a removed CPU */
|
||||||
static DEFINE_PER_CPU(struct cpufreq_governor *, cpufreq_cpu_governor);
|
static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
|
||||||
#endif
|
#endif
|
||||||
static DEFINE_SPINLOCK(cpufreq_driver_lock);
|
static DEFINE_SPINLOCK(cpufreq_driver_lock);
|
||||||
|
|
||||||
|
@ -774,10 +774,12 @@ int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy,
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int j;
|
unsigned int j;
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
if (per_cpu(cpufreq_cpu_governor, cpu)) {
|
struct cpufreq_governor *gov;
|
||||||
policy->governor = per_cpu(cpufreq_cpu_governor, cpu);
|
|
||||||
|
gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
|
||||||
|
if (gov) {
|
||||||
|
policy->governor = gov;
|
||||||
dprintk("Restoring governor %s for cpu %d\n",
|
dprintk("Restoring governor %s for cpu %d\n",
|
||||||
policy->governor->name, cpu);
|
policy->governor->name, cpu);
|
||||||
}
|
}
|
||||||
|
@ -949,10 +951,13 @@ int cpufreq_add_dev_interface(unsigned int cpu, struct cpufreq_policy *policy,
|
||||||
static int cpufreq_add_dev(struct sys_device *sys_dev)
|
static int cpufreq_add_dev(struct sys_device *sys_dev)
|
||||||
{
|
{
|
||||||
unsigned int cpu = sys_dev->id;
|
unsigned int cpu = sys_dev->id;
|
||||||
int ret = 0;
|
int ret = 0, found = 0;
|
||||||
struct cpufreq_policy *policy;
|
struct cpufreq_policy *policy;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int j;
|
unsigned int j;
|
||||||
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
int sibling;
|
||||||
|
#endif
|
||||||
|
|
||||||
if (cpu_is_offline(cpu))
|
if (cpu_is_offline(cpu))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -999,6 +1004,18 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
|
||||||
INIT_WORK(&policy->update, handle_update);
|
INIT_WORK(&policy->update, handle_update);
|
||||||
|
|
||||||
/* Set governor before ->init, so that driver could check it */
|
/* Set governor before ->init, so that driver could check it */
|
||||||
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
for_each_online_cpu(sibling) {
|
||||||
|
struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
|
||||||
|
if (cp && cp->governor &&
|
||||||
|
(cpumask_test_cpu(cpu, cp->related_cpus))) {
|
||||||
|
policy->governor = cp->governor;
|
||||||
|
found = 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
if (!found)
|
||||||
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
|
||||||
/* call driver. From then on the cpufreq must be able
|
/* call driver. From then on the cpufreq must be able
|
||||||
* to accept all calls to ->verify and ->setpolicy for this CPU
|
* to accept all calls to ->verify and ->setpolicy for this CPU
|
||||||
|
@ -1111,7 +1128,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
per_cpu(cpufreq_cpu_governor, cpu) = data->governor;
|
strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
|
||||||
|
CPUFREQ_NAME_LEN);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* if we have other CPUs still registered, we need to unlink them,
|
/* if we have other CPUs still registered, we need to unlink them,
|
||||||
|
@ -1135,7 +1153,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
|
||||||
continue;
|
continue;
|
||||||
dprintk("removing link for cpu %u\n", j);
|
dprintk("removing link for cpu %u\n", j);
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
per_cpu(cpufreq_cpu_governor, j) = data->governor;
|
strncpy(per_cpu(cpufreq_cpu_governor, j),
|
||||||
|
data->governor->name, CPUFREQ_NAME_LEN);
|
||||||
#endif
|
#endif
|
||||||
cpu_sys_dev = get_cpu_sysdev(j);
|
cpu_sys_dev = get_cpu_sysdev(j);
|
||||||
sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
|
sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
|
||||||
|
@ -1606,9 +1625,22 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor);
|
||||||
|
|
||||||
void cpufreq_unregister_governor(struct cpufreq_governor *governor)
|
void cpufreq_unregister_governor(struct cpufreq_governor *governor)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
int cpu;
|
||||||
|
#endif
|
||||||
|
|
||||||
if (!governor)
|
if (!governor)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
for_each_present_cpu(cpu) {
|
||||||
|
if (cpu_online(cpu))
|
||||||
|
continue;
|
||||||
|
if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
|
||||||
|
strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
mutex_lock(&cpufreq_governor_mutex);
|
mutex_lock(&cpufreq_governor_mutex);
|
||||||
list_del(&governor->governor_list);
|
list_del(&governor->governor_list);
|
||||||
mutex_unlock(&cpufreq_governor_mutex);
|
mutex_unlock(&cpufreq_governor_mutex);
|
||||||
|
|
|
@ -116,9 +116,9 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
|
||||||
|
|
||||||
idle_time = cputime64_sub(cur_wall_time, busy_time);
|
idle_time = cputime64_sub(cur_wall_time, busy_time);
|
||||||
if (wall)
|
if (wall)
|
||||||
*wall = cur_wall_time;
|
*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
|
||||||
|
|
||||||
return idle_time;
|
return (cputime64_t)jiffies_to_usecs(idle_time);;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
|
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
|
||||||
|
|
|
@ -133,9 +133,9 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
|
||||||
|
|
||||||
idle_time = cputime64_sub(cur_wall_time, busy_time);
|
idle_time = cputime64_sub(cur_wall_time, busy_time);
|
||||||
if (wall)
|
if (wall)
|
||||||
*wall = cur_wall_time;
|
*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
|
||||||
|
|
||||||
return idle_time;
|
return (cputime64_t)jiffies_to_usecs(idle_time);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
|
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
|
||||||
|
|
Loading…
Add table
Reference in a new issue