Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq: [CPUFREQ][2/2] preregister support for powernow-k8 [CPUFREQ][1/2] whitespace fix for powernow-k8 [CPUFREQ] Update MAINTAINERS to reflect new mailing list. [CPUFREQ] Fix warning in elanfreq [CPUFREQ] Fix -Wshadow warning in conservative governor. [CPUFREQ] Remove EXPERIMENTAL annotation from VIA C7 powersaver kconfig.
This commit is contained in:
commit
796aadeb1b
6 changed files with 89 additions and 54 deletions
|
@ -1249,7 +1249,7 @@ S: Maintained
|
|||
CPU FREQUENCY DRIVERS
|
||||
P: Dave Jones
|
||||
M: davej@codemonkey.org.uk
|
||||
L: cpufreq@lists.linux.org.uk
|
||||
L: cpufreq@vger.kernel.org
|
||||
W: http://www.codemonkey.org.uk/projects/cpufreq/
|
||||
T: git kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git
|
||||
S: Maintained
|
||||
|
|
|
@ -235,9 +235,9 @@ config X86_LONGHAUL
|
|||
If in doubt, say N.
|
||||
|
||||
config X86_E_POWERSAVER
|
||||
tristate "VIA C7 Enhanced PowerSaver (EXPERIMENTAL)"
|
||||
tristate "VIA C7 Enhanced PowerSaver"
|
||||
select CPU_FREQ_TABLE
|
||||
depends on X86_32 && EXPERIMENTAL
|
||||
depends on X86_32
|
||||
help
|
||||
This adds the CPUFreq driver for VIA C7 processors.
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ struct s_elan_multiplier {
|
|||
* It is important that the frequencies
|
||||
* are listed in ascending order here!
|
||||
*/
|
||||
struct s_elan_multiplier elan_multiplier[] = {
|
||||
static struct s_elan_multiplier elan_multiplier[] = {
|
||||
{1000, 0x02, 0x18},
|
||||
{2000, 0x02, 0x10},
|
||||
{4000, 0x02, 0x08},
|
||||
|
|
|
@ -66,7 +66,6 @@ static u32 find_freq_from_fid(u32 fid)
|
|||
return 800 + (fid * 100);
|
||||
}
|
||||
|
||||
|
||||
/* Return a frequency in KHz, given an input fid */
|
||||
static u32 find_khz_freq_from_fid(u32 fid)
|
||||
{
|
||||
|
@ -78,7 +77,6 @@ static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, u32 p
|
|||
return data[pstate].frequency;
|
||||
}
|
||||
|
||||
|
||||
/* Return the vco fid for an input fid
|
||||
*
|
||||
* Each "low" fid has corresponding "high" fid, and you can get to "low" fids
|
||||
|
@ -166,7 +164,6 @@ static void fidvid_msr_init(void)
|
|||
wrmsr(MSR_FIDVID_CTL, lo, hi);
|
||||
}
|
||||
|
||||
|
||||
/* write the new fid value along with the other control fields to the msr */
|
||||
static int write_new_fid(struct powernow_k8_data *data, u32 fid)
|
||||
{
|
||||
|
@ -740,44 +737,63 @@ static int find_psb_table(struct powernow_k8_data *data)
|
|||
#ifdef CONFIG_X86_POWERNOW_K8_ACPI
|
||||
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index)
|
||||
{
|
||||
if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
|
||||
if (!data->acpi_data->state_count || (cpu_family == CPU_HW_PSTATE))
|
||||
return;
|
||||
|
||||
data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK;
|
||||
data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK;
|
||||
data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
|
||||
data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
|
||||
data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK);
|
||||
data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK;
|
||||
data->irt = (data->acpi_data->states[index].control >> IRT_SHIFT) & IRT_MASK;
|
||||
data->rvo = (data->acpi_data->states[index].control >> RVO_SHIFT) & RVO_MASK;
|
||||
data->exttype = (data->acpi_data->states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
|
||||
data->plllock = (data->acpi_data->states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
|
||||
data->vidmvs = 1 << ((data->acpi_data->states[index].control >> MVS_SHIFT) & MVS_MASK);
|
||||
data->vstable = (data->acpi_data->states[index].control >> VST_SHIFT) & VST_MASK;
|
||||
}
|
||||
|
||||
|
||||
static struct acpi_processor_performance *acpi_perf_data;
|
||||
static int preregister_valid;
|
||||
|
||||
static int powernow_k8_cpu_preinit_acpi(void)
|
||||
{
|
||||
acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
|
||||
if (!acpi_perf_data)
|
||||
return -ENODEV;
|
||||
|
||||
if (acpi_processor_preregister_performance(acpi_perf_data))
|
||||
return -ENODEV;
|
||||
else
|
||||
preregister_valid = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
|
||||
{
|
||||
struct cpufreq_frequency_table *powernow_table;
|
||||
int ret_val;
|
||||
int cpu = 0;
|
||||
|
||||
if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
|
||||
data->acpi_data = percpu_ptr(acpi_perf_data, cpu);
|
||||
if (acpi_processor_register_performance(data->acpi_data, data->cpu)) {
|
||||
dprintk("register performance failed: bad ACPI data\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* verify the data contained in the ACPI structures */
|
||||
if (data->acpi_data.state_count <= 1) {
|
||||
if (data->acpi_data->state_count <= 1) {
|
||||
dprintk("No ACPI P-States\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
|
||||
(data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
|
||||
if ((data->acpi_data->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
|
||||
(data->acpi_data->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
|
||||
dprintk("Invalid control/status registers (%x - %x)\n",
|
||||
data->acpi_data.control_register.space_id,
|
||||
data->acpi_data.status_register.space_id);
|
||||
data->acpi_data->control_register.space_id,
|
||||
data->acpi_data->status_register.space_id);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* fill in data->powernow_table */
|
||||
powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
|
||||
* (data->acpi_data.state_count + 1)), GFP_KERNEL);
|
||||
* (data->acpi_data->state_count + 1)), GFP_KERNEL);
|
||||
if (!powernow_table) {
|
||||
dprintk("powernow_table memory alloc failure\n");
|
||||
goto err_out;
|
||||
|
@ -790,12 +806,12 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
|
|||
if (ret_val)
|
||||
goto err_out_mem;
|
||||
|
||||
powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END;
|
||||
powernow_table[data->acpi_data.state_count].index = 0;
|
||||
powernow_table[data->acpi_data->state_count].frequency = CPUFREQ_TABLE_END;
|
||||
powernow_table[data->acpi_data->state_count].index = 0;
|
||||
data->powernow_table = powernow_table;
|
||||
|
||||
/* fill in data */
|
||||
data->numps = data->acpi_data.state_count;
|
||||
data->numps = data->acpi_data->state_count;
|
||||
if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
|
||||
print_basics(data);
|
||||
powernow_k8_acpi_pst_values(data, 0);
|
||||
|
@ -803,16 +819,31 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
|
|||
/* notify BIOS that we exist */
|
||||
acpi_processor_notify_smm(THIS_MODULE);
|
||||
|
||||
/* determine affinity, from ACPI if available */
|
||||
if (preregister_valid) {
|
||||
if ((data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ALL) ||
|
||||
(data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ANY))
|
||||
data->starting_core_affinity = data->acpi_data->shared_cpu_map;
|
||||
else
|
||||
data->starting_core_affinity = cpumask_of_cpu(data->cpu);
|
||||
} else {
|
||||
/* best guess from family if not */
|
||||
if (cpu_family == CPU_HW_PSTATE)
|
||||
data->starting_core_affinity = cpumask_of_cpu(data->cpu);
|
||||
else
|
||||
data->starting_core_affinity = per_cpu(cpu_core_map, data->cpu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_mem:
|
||||
kfree(powernow_table);
|
||||
|
||||
err_out:
|
||||
acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
|
||||
acpi_processor_unregister_performance(data->acpi_data, data->cpu);
|
||||
|
||||
/* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
|
||||
data->acpi_data.state_count = 0;
|
||||
data->acpi_data->state_count = 0;
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -824,10 +855,10 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf
|
|||
rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo);
|
||||
data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
|
||||
|
||||
for (i = 0; i < data->acpi_data.state_count; i++) {
|
||||
for (i = 0; i < data->acpi_data->state_count; i++) {
|
||||
u32 index;
|
||||
|
||||
index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
|
||||
index = data->acpi_data->states[i].control & HW_PSTATE_MASK;
|
||||
if (index > data->max_hw_pstate) {
|
||||
printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index);
|
||||
printk(KERN_ERR PFX "Please report to BIOS manufacturer\n");
|
||||
|
@ -843,7 +874,7 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf
|
|||
|
||||
powernow_table[i].index = index;
|
||||
|
||||
powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000;
|
||||
powernow_table[i].frequency = data->acpi_data->states[i].core_frequency * 1000;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -852,16 +883,16 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
|
|||
{
|
||||
int i;
|
||||
int cntlofreq = 0;
|
||||
for (i = 0; i < data->acpi_data.state_count; i++) {
|
||||
for (i = 0; i < data->acpi_data->state_count; i++) {
|
||||
u32 fid;
|
||||
u32 vid;
|
||||
|
||||
if (data->exttype) {
|
||||
fid = data->acpi_data.states[i].status & EXT_FID_MASK;
|
||||
vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK;
|
||||
fid = data->acpi_data->states[i].status & EXT_FID_MASK;
|
||||
vid = (data->acpi_data->states[i].status >> VID_SHIFT) & EXT_VID_MASK;
|
||||
} else {
|
||||
fid = data->acpi_data.states[i].control & FID_MASK;
|
||||
vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK;
|
||||
fid = data->acpi_data->states[i].control & FID_MASK;
|
||||
vid = (data->acpi_data->states[i].control >> VID_SHIFT) & VID_MASK;
|
||||
}
|
||||
|
||||
dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
|
||||
|
@ -902,10 +933,10 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
|
|||
cntlofreq = i;
|
||||
}
|
||||
|
||||
if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
|
||||
if (powernow_table[i].frequency != (data->acpi_data->states[i].core_frequency * 1000)) {
|
||||
printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
|
||||
powernow_table[i].frequency,
|
||||
(unsigned int) (data->acpi_data.states[i].core_frequency * 1000));
|
||||
(unsigned int) (data->acpi_data->states[i].core_frequency * 1000));
|
||||
powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
|
||||
continue;
|
||||
}
|
||||
|
@ -915,11 +946,12 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
|
|||
|
||||
static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
|
||||
{
|
||||
if (data->acpi_data.state_count)
|
||||
acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
|
||||
if (data->acpi_data->state_count)
|
||||
acpi_processor_unregister_performance(data->acpi_data, data->cpu);
|
||||
}
|
||||
|
||||
#else
|
||||
static int powernow_k8_cpu_preinit_acpi(void) { return -ENODEV; }
|
||||
static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
|
||||
static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
|
||||
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
|
||||
|
@ -1104,7 +1136,7 @@ static int powernowk8_verify(struct cpufreq_policy *pol)
|
|||
static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
{
|
||||
struct powernow_k8_data *data;
|
||||
cpumask_t oldmask;
|
||||
cpumask_t oldmask = CPU_MASK_ALL;
|
||||
int rc;
|
||||
|
||||
if (!cpu_online(pol->cpu))
|
||||
|
@ -1177,10 +1209,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
|||
/* run on any CPU again */
|
||||
set_cpus_allowed_ptr(current, &oldmask);
|
||||
|
||||
if (cpu_family == CPU_HW_PSTATE)
|
||||
pol->cpus = cpumask_of_cpu(pol->cpu);
|
||||
else
|
||||
pol->cpus = per_cpu(cpu_core_map, pol->cpu);
|
||||
pol->cpus = data->starting_core_affinity;
|
||||
data->available_cores = &(pol->cpus);
|
||||
|
||||
/* Take a crude guess here.
|
||||
|
@ -1303,6 +1332,7 @@ static int __cpuinit powernowk8_init(void)
|
|||
}
|
||||
|
||||
if (supported_cpus == num_online_cpus()) {
|
||||
powernow_k8_cpu_preinit_acpi();
|
||||
printk(KERN_INFO PFX "Found %d %s "
|
||||
"processors (%d cpu cores) (" VERSION ")\n",
|
||||
num_online_nodes(),
|
||||
|
@ -1319,6 +1349,10 @@ static void __exit powernowk8_exit(void)
|
|||
dprintk("exit\n");
|
||||
|
||||
cpufreq_unregister_driver(&cpufreq_amd64_driver);
|
||||
|
||||
#ifdef CONFIG_X86_POWERNOW_K8_ACPI
|
||||
free_percpu(acpi_perf_data);
|
||||
#endif
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>");
|
||||
|
|
|
@ -33,12 +33,13 @@ struct powernow_k8_data {
|
|||
#ifdef CONFIG_X86_POWERNOW_K8_ACPI
|
||||
/* the acpi table needs to be kept. it's only available if ACPI was
|
||||
* used to determine valid frequency/vid/fid states */
|
||||
struct acpi_processor_performance acpi_data;
|
||||
struct acpi_processor_performance *acpi_data;
|
||||
#endif
|
||||
/* we need to keep track of associated cores, but let cpufreq
|
||||
* handle hotplug events - so just point at cpufreq pol->cpus
|
||||
* structure */
|
||||
cpumask_t *available_cores;
|
||||
cpumask_t starting_core_affinity;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -333,7 +333,7 @@ static void dbs_check_cpu(int cpu)
|
|||
{
|
||||
unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
|
||||
unsigned int tmp_idle_ticks, total_idle_ticks;
|
||||
unsigned int freq_step;
|
||||
unsigned int freq_target;
|
||||
unsigned int freq_down_sampling_rate;
|
||||
struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
|
||||
struct cpufreq_policy *policy;
|
||||
|
@ -383,13 +383,13 @@ static void dbs_check_cpu(int cpu)
|
|||
if (this_dbs_info->requested_freq == policy->max)
|
||||
return;
|
||||
|
||||
freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
|
||||
freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
|
||||
|
||||
/* max freq cannot be less than 100. But who knows.... */
|
||||
if (unlikely(freq_step == 0))
|
||||
freq_step = 5;
|
||||
if (unlikely(freq_target == 0))
|
||||
freq_target = 5;
|
||||
|
||||
this_dbs_info->requested_freq += freq_step;
|
||||
this_dbs_info->requested_freq += freq_target;
|
||||
if (this_dbs_info->requested_freq > policy->max)
|
||||
this_dbs_info->requested_freq = policy->max;
|
||||
|
||||
|
@ -425,19 +425,19 @@ static void dbs_check_cpu(int cpu)
|
|||
/*
|
||||
* if we are already at the lowest speed then break out early
|
||||
* or if we 'cannot' reduce the speed as the user might want
|
||||
* freq_step to be zero
|
||||
* freq_target to be zero
|
||||
*/
|
||||
if (this_dbs_info->requested_freq == policy->min
|
||||
|| dbs_tuners_ins.freq_step == 0)
|
||||
return;
|
||||
|
||||
freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
|
||||
freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
|
||||
|
||||
/* max freq cannot be less than 100. But who knows.... */
|
||||
if (unlikely(freq_step == 0))
|
||||
freq_step = 5;
|
||||
if (unlikely(freq_target == 0))
|
||||
freq_target = 5;
|
||||
|
||||
this_dbs_info->requested_freq -= freq_step;
|
||||
this_dbs_info->requested_freq -= freq_target;
|
||||
if (this_dbs_info->requested_freq < policy->min)
|
||||
this_dbs_info->requested_freq = policy->min;
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue