Merge branches 'pm-opp-fixes', 'pm-cpufreq-fixes' and 'pm-cpuidle-fixes'
* pm-opp-fixes: PM / OPP: Remove useless check * pm-cpufreq-fixes: intel_pstate: Fix intel_pstate_get() cpufreq: intel_pstate: Fix HWP on boot CPU after system resume cpufreq: st: enable selective initialization based on the platform * pm-cpuidle-fixes: ARM: cpuidle: Pass on arm_cpuidle_suspend()'s return value
This commit is contained in:
commit
5f2f88e330
5 changed files with 38 additions and 23 deletions
|
@ -259,9 +259,6 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
|
||||||
reg = opp_table->regulator;
|
reg = opp_table->regulator;
|
||||||
if (IS_ERR(reg)) {
|
if (IS_ERR(reg)) {
|
||||||
/* Regulator may not be required for device */
|
/* Regulator may not be required for device */
|
||||||
if (reg)
|
|
||||||
dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
|
|
||||||
PTR_ERR(reg));
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1557,21 +1557,25 @@ void cpufreq_suspend(void)
|
||||||
if (!cpufreq_driver)
|
if (!cpufreq_driver)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!has_target())
|
if (!has_target() && !cpufreq_driver->suspend)
|
||||||
goto suspend;
|
goto suspend;
|
||||||
|
|
||||||
pr_debug("%s: Suspending Governors\n", __func__);
|
pr_debug("%s: Suspending Governors\n", __func__);
|
||||||
|
|
||||||
for_each_active_policy(policy) {
|
for_each_active_policy(policy) {
|
||||||
down_write(&policy->rwsem);
|
if (has_target()) {
|
||||||
ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
|
down_write(&policy->rwsem);
|
||||||
up_write(&policy->rwsem);
|
ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
|
||||||
|
up_write(&policy->rwsem);
|
||||||
|
|
||||||
if (ret)
|
if (ret) {
|
||||||
pr_err("%s: Failed to stop governor for policy: %p\n",
|
pr_err("%s: Failed to stop governor for policy: %p\n",
|
||||||
__func__, policy);
|
__func__, policy);
|
||||||
else if (cpufreq_driver->suspend
|
continue;
|
||||||
&& cpufreq_driver->suspend(policy))
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
|
||||||
pr_err("%s: Failed to suspend driver: %p\n", __func__,
|
pr_err("%s: Failed to suspend driver: %p\n", __func__,
|
||||||
policy);
|
policy);
|
||||||
}
|
}
|
||||||
|
@ -1596,7 +1600,7 @@ void cpufreq_resume(void)
|
||||||
|
|
||||||
cpufreq_suspended = false;
|
cpufreq_suspended = false;
|
||||||
|
|
||||||
if (!has_target())
|
if (!has_target() && !cpufreq_driver->resume)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pr_debug("%s: Resuming Governors\n", __func__);
|
pr_debug("%s: Resuming Governors\n", __func__);
|
||||||
|
@ -1605,7 +1609,7 @@ void cpufreq_resume(void)
|
||||||
if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
|
if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
|
||||||
pr_err("%s: Failed to resume driver: %p\n", __func__,
|
pr_err("%s: Failed to resume driver: %p\n", __func__,
|
||||||
policy);
|
policy);
|
||||||
} else {
|
} else if (has_target()) {
|
||||||
down_write(&policy->rwsem);
|
down_write(&policy->rwsem);
|
||||||
ret = cpufreq_start_governor(policy);
|
ret = cpufreq_start_governor(policy);
|
||||||
up_write(&policy->rwsem);
|
up_write(&policy->rwsem);
|
||||||
|
|
|
@ -453,6 +453,14 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
|
||||||
|
{
|
||||||
|
if (hwp_active)
|
||||||
|
intel_pstate_hwp_set(policy->cpus);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void intel_pstate_hwp_set_online_cpus(void)
|
static void intel_pstate_hwp_set_online_cpus(void)
|
||||||
{
|
{
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
|
@ -1062,8 +1070,9 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
|
||||||
|
|
||||||
static inline int32_t get_avg_frequency(struct cpudata *cpu)
|
static inline int32_t get_avg_frequency(struct cpudata *cpu)
|
||||||
{
|
{
|
||||||
return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf *
|
return fp_toint(mul_fp(cpu->sample.core_pct_busy,
|
||||||
cpu->pstate.scaling, cpu->sample.mperf);
|
int_tofp(cpu->pstate.max_pstate_physical *
|
||||||
|
cpu->pstate.scaling / 100)));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
|
static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
|
||||||
|
@ -1106,8 +1115,6 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
|
||||||
int32_t core_busy, max_pstate, current_pstate, sample_ratio;
|
int32_t core_busy, max_pstate, current_pstate, sample_ratio;
|
||||||
u64 duration_ns;
|
u64 duration_ns;
|
||||||
|
|
||||||
intel_pstate_calc_busy(cpu);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* core_busy is the ratio of actual performance to max
|
* core_busy is the ratio of actual performance to max
|
||||||
* max_pstate is the max non turbo pstate available
|
* max_pstate is the max non turbo pstate available
|
||||||
|
@ -1191,8 +1198,11 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
|
||||||
if ((s64)delta_ns >= pid_params.sample_rate_ns) {
|
if ((s64)delta_ns >= pid_params.sample_rate_ns) {
|
||||||
bool sample_taken = intel_pstate_sample(cpu, time);
|
bool sample_taken = intel_pstate_sample(cpu, time);
|
||||||
|
|
||||||
if (sample_taken && !hwp_active)
|
if (sample_taken) {
|
||||||
intel_pstate_adjust_busy_pstate(cpu);
|
intel_pstate_calc_busy(cpu);
|
||||||
|
if (!hwp_active)
|
||||||
|
intel_pstate_adjust_busy_pstate(cpu);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1346,8 +1356,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
||||||
out:
|
out:
|
||||||
intel_pstate_set_update_util_hook(policy->cpu);
|
intel_pstate_set_update_util_hook(policy->cpu);
|
||||||
|
|
||||||
if (hwp_active)
|
intel_pstate_hwp_set_policy(policy);
|
||||||
intel_pstate_hwp_set(policy->cpus);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1411,6 +1420,7 @@ static struct cpufreq_driver intel_pstate_driver = {
|
||||||
.flags = CPUFREQ_CONST_LOOPS,
|
.flags = CPUFREQ_CONST_LOOPS,
|
||||||
.verify = intel_pstate_verify_policy,
|
.verify = intel_pstate_verify_policy,
|
||||||
.setpolicy = intel_pstate_set_policy,
|
.setpolicy = intel_pstate_set_policy,
|
||||||
|
.resume = intel_pstate_hwp_set_policy,
|
||||||
.get = intel_pstate_get,
|
.get = intel_pstate_get,
|
||||||
.init = intel_pstate_cpu_init,
|
.init = intel_pstate_cpu_init,
|
||||||
.stop_cpu = intel_pstate_stop_cpu,
|
.stop_cpu = intel_pstate_stop_cpu,
|
||||||
|
|
|
@ -259,6 +259,10 @@ static int sti_cpufreq_init(void)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if ((!of_machine_is_compatible("st,stih407")) &&
|
||||||
|
(!of_machine_is_compatible("st,stih410")))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
ddata.cpu = get_cpu_device(0);
|
ddata.cpu = get_cpu_device(0);
|
||||||
if (!ddata.cpu) {
|
if (!ddata.cpu) {
|
||||||
dev_err(ddata.cpu, "Failed to get device for CPU0\n");
|
dev_err(ddata.cpu, "Failed to get device for CPU0\n");
|
||||||
|
|
|
@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
|
||||||
* call the CPU ops suspend protocol with idle index as a
|
* call the CPU ops suspend protocol with idle index as a
|
||||||
* parameter.
|
* parameter.
|
||||||
*/
|
*/
|
||||||
arm_cpuidle_suspend(idx);
|
ret = arm_cpuidle_suspend(idx);
|
||||||
|
|
||||||
cpu_pm_exit();
|
cpu_pm_exit();
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue