Merge branch 'pm-sleep'

* pm-sleep:
  ACPI / PM: Check low power idle constraints for debug only
  PM / s2idle: Rename platform operations structure
  PM / s2idle: Rename ->enter_freeze to ->enter_s2idle
  PM / s2idle: Rename freeze_state enum and related items
  PM / s2idle: Rename PM_SUSPEND_FREEZE to PM_SUSPEND_TO_IDLE
  ACPI / PM: Prefer suspend-to-idle over S3 on some systems
  platform/x86: intel-hid: Wake up Dell Latitude 7275 from suspend-to-idle
  PM / suspend: Define pr_fmt() in suspend.c
  PM / suspend: Use mem_sleep_labels[] strings in messages
  PM / sleep: Put pm_test under CONFIG_PM_SLEEP_DEBUG
  PM / sleep: Check pm_wakeup_pending() in __device_suspend_noirq()
  PM / core: Add error argument to dpm_show_time()
  PM / core: Split dpm_suspend_noirq() and dpm_resume_noirq()
  PM / s2idle: Rearrange the main suspend-to-idle loop
  PM / timekeeping: Print debug messages when requested
  PM / sleep: Mark suspend/hibernation start and finish
  PM / sleep: Do not print debug messages by default
  PM / suspend: Export pm_suspend_target_state
This commit is contained in:
Rafael J. Wysocki 2017-09-04 00:06:02 +02:00
commit 7b01463e51
22 changed files with 622 additions and 291 deletions

View file

@ -273,3 +273,15 @@ Description:
This output is useful for system wakeup diagnostics of spurious
wakeup interrupts.
What: /sys/power/pm_debug_messages
Date: July 2017
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
Description:
The /sys/power/pm_debug_messages file controls the printing
of debug messages from the system suspend/hiberbation
infrastructure to the kernel log.
Writing a "1" to this file enables the debug messages and
writing a "0" (default) to it disables them. Reads from
this file return the current value.

View file

@ -35,7 +35,9 @@ only one way to cause the system to go into the Suspend-To-RAM state (write
The default suspend mode (ie. the one to be used without writing anything into
/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
"s2idle", but it can be overridden by the value of the "mem_sleep_default"
parameter in the kernel command line.
parameter in the kernel command line. On some ACPI-based systems, depending on
the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
is supported.
The properties of all of the sleep states are described below.

View file

@ -60,7 +60,7 @@ static int tegra114_idle_power_down(struct cpuidle_device *dev,
return index;
}
static void tegra114_idle_enter_freeze(struct cpuidle_device *dev,
static void tegra114_idle_enter_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
@ -77,7 +77,7 @@ static struct cpuidle_driver tegra_idle_driver = {
#ifdef CONFIG_PM_SLEEP
[1] = {
.enter = tegra114_idle_power_down,
.enter_freeze = tegra114_idle_enter_freeze,
.enter_s2idle = tegra114_idle_enter_s2idle,
.exit_latency = 500,
.target_residency = 1000,
.flags = CPUIDLE_FLAG_TIMER_STOP,

View file

@ -791,7 +791,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
return index;
}
static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
static void acpi_idle_enter_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
@ -876,14 +876,14 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
drv->safe_state_index = count;
}
/*
* Halt-induced C1 is not good for ->enter_freeze, because it
* Halt-induced C1 is not good for ->enter_s2idle, because it
* re-enables interrupts on exit. Moreover, C1 is generally not
* particularly interesting from the suspend-to-idle angle, so
* avoid C1 and the situations in which we may need to fall back
* to it altogether.
*/
if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
state->enter_freeze = acpi_idle_enter_freeze;
state->enter_s2idle = acpi_idle_enter_s2idle;
count++;
if (count == CPUIDLE_STATE_MAX)

View file

@ -669,6 +669,7 @@ static const struct acpi_device_id lps0_device_ids[] = {
#define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
#define ACPI_LPS0_SCREEN_OFF 3
#define ACPI_LPS0_SCREEN_ON 4
#define ACPI_LPS0_ENTRY 5
@ -680,6 +681,166 @@ static acpi_handle lps0_device_handle;
static guid_t lps0_dsm_guid;
static char lps0_dsm_func_mask;
/* Device constraint entry structure */
struct lpi_device_info {
char *name;
int enabled;
union acpi_object *package;
};
/* Constraint package structure */
struct lpi_device_constraint {
int uid;
int min_dstate;
int function_states;
};
struct lpi_constraints {
acpi_handle handle;
int min_dstate;
};
static struct lpi_constraints *lpi_constraints_table;
static int lpi_constraints_table_size;
static void lpi_device_get_constraints(void)
{
union acpi_object *out_obj;
int i;
out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
NULL, ACPI_TYPE_PACKAGE);
acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
out_obj ? "successful" : "failed");
if (!out_obj)
return;
lpi_constraints_table = kcalloc(out_obj->package.count,
sizeof(*lpi_constraints_table),
GFP_KERNEL);
if (!lpi_constraints_table)
goto free_acpi_buffer;
acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n");
for (i = 0; i < out_obj->package.count; i++) {
struct lpi_constraints *constraint;
acpi_status status;
union acpi_object *package = &out_obj->package.elements[i];
struct lpi_device_info info = { };
int package_count = 0, j;
if (!package)
continue;
for (j = 0; j < package->package.count; ++j) {
union acpi_object *element =
&(package->package.elements[j]);
switch (element->type) {
case ACPI_TYPE_INTEGER:
info.enabled = element->integer.value;
break;
case ACPI_TYPE_STRING:
info.name = element->string.pointer;
break;
case ACPI_TYPE_PACKAGE:
package_count = element->package.count;
info.package = element->package.elements;
break;
}
}
if (!info.enabled || !info.package || !info.name)
continue;
constraint = &lpi_constraints_table[lpi_constraints_table_size];
status = acpi_get_handle(NULL, info.name, &constraint->handle);
if (ACPI_FAILURE(status))
continue;
acpi_handle_debug(lps0_device_handle,
"index:%d Name:%s\n", i, info.name);
constraint->min_dstate = -1;
for (j = 0; j < package_count; ++j) {
union acpi_object *info_obj = &info.package[j];
union acpi_object *cnstr_pkg;
union acpi_object *obj;
struct lpi_device_constraint dev_info;
switch (info_obj->type) {
case ACPI_TYPE_INTEGER:
/* version */
break;
case ACPI_TYPE_PACKAGE:
if (info_obj->package.count < 2)
break;
cnstr_pkg = info_obj->package.elements;
obj = &cnstr_pkg[0];
dev_info.uid = obj->integer.value;
obj = &cnstr_pkg[1];
dev_info.min_dstate = obj->integer.value;
acpi_handle_debug(lps0_device_handle,
"uid:%d min_dstate:%s\n",
dev_info.uid,
acpi_power_state_string(dev_info.min_dstate));
constraint->min_dstate = dev_info.min_dstate;
break;
}
}
if (constraint->min_dstate < 0) {
acpi_handle_debug(lps0_device_handle,
"Incomplete constraint defined\n");
continue;
}
lpi_constraints_table_size++;
}
acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n");
free_acpi_buffer:
ACPI_FREE(out_obj);
}
static void lpi_check_constraints(void)
{
int i;
for (i = 0; i < lpi_constraints_table_size; ++i) {
struct acpi_device *adev;
if (acpi_bus_get_device(lpi_constraints_table[i].handle, &adev))
continue;
acpi_handle_debug(adev->handle,
"LPI: required min power state:%s current power state:%s\n",
acpi_power_state_string(lpi_constraints_table[i].min_dstate),
acpi_power_state_string(adev->power.state));
if (!adev->flags.power_manageable) {
acpi_handle_info(adev->handle, "LPI: Device not power manageble\n");
continue;
}
if (adev->power.state < lpi_constraints_table[i].min_dstate)
acpi_handle_info(adev->handle,
"LPI: Constraint not met; min power state:%s current power state:%s\n",
acpi_power_state_string(lpi_constraints_table[i].min_dstate),
acpi_power_state_string(adev->power.state));
}
}
static void acpi_sleep_run_lps0_dsm(unsigned int func)
{
union acpi_object *out_obj;
@ -714,6 +875,12 @@ static int lps0_device_attach(struct acpi_device *adev,
if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) {
lps0_dsm_func_mask = bitmask;
lps0_device_handle = adev->handle;
/*
* Use suspend-to-idle by default if the default
* suspend mode was not set from the command line.
*/
if (mem_sleep_default > PM_SUSPEND_MEM)
mem_sleep_current = PM_SUSPEND_TO_IDLE;
}
acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
@ -723,6 +890,9 @@ static int lps0_device_attach(struct acpi_device *adev,
"_DSM function 0 evaluation failed\n");
}
ACPI_FREE(out_obj);
lpi_device_get_constraints();
return 0;
}
@ -731,14 +901,14 @@ static struct acpi_scan_handler lps0_handler = {
.attach = lps0_device_attach,
};
static int acpi_freeze_begin(void)
static int acpi_s2idle_begin(void)
{
acpi_scan_lock_acquire();
s2idle_in_progress = true;
return 0;
}
static int acpi_freeze_prepare(void)
static int acpi_s2idle_prepare(void)
{
if (lps0_device_handle) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
@ -758,8 +928,12 @@ static int acpi_freeze_prepare(void)
return 0;
}
static void acpi_freeze_wake(void)
static void acpi_s2idle_wake(void)
{
if (pm_debug_messages_on)
lpi_check_constraints();
/*
* If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
* that the SCI has triggered while suspended, so cancel the wakeup in
@ -772,7 +946,7 @@ static void acpi_freeze_wake(void)
}
}
static void acpi_freeze_sync(void)
static void acpi_s2idle_sync(void)
{
/*
* Process all pending events in case there are any wakeup ones.
@ -785,7 +959,7 @@ static void acpi_freeze_sync(void)
s2idle_wakeup = false;
}
static void acpi_freeze_restore(void)
static void acpi_s2idle_restore(void)
{
if (acpi_sci_irq_valid())
disable_irq_wake(acpi_sci_irq);
@ -798,19 +972,19 @@ static void acpi_freeze_restore(void)
}
}
static void acpi_freeze_end(void)
static void acpi_s2idle_end(void)
{
s2idle_in_progress = false;
acpi_scan_lock_release();
}
static const struct platform_freeze_ops acpi_freeze_ops = {
.begin = acpi_freeze_begin,
.prepare = acpi_freeze_prepare,
.wake = acpi_freeze_wake,
.sync = acpi_freeze_sync,
.restore = acpi_freeze_restore,
.end = acpi_freeze_end,
static const struct platform_s2idle_ops acpi_s2idle_ops = {
.begin = acpi_s2idle_begin,
.prepare = acpi_s2idle_prepare,
.wake = acpi_s2idle_wake,
.sync = acpi_s2idle_sync,
.restore = acpi_s2idle_restore,
.end = acpi_s2idle_end,
};
static void acpi_sleep_suspend_setup(void)
@ -825,7 +999,7 @@ static void acpi_sleep_suspend_setup(void)
&acpi_suspend_ops_old : &acpi_suspend_ops);
acpi_scan_add_handler(&lps0_handler);
freeze_set_ops(&acpi_freeze_ops);
s2idle_set_ops(&acpi_s2idle_ops);
}
#else /* !CONFIG_SUSPEND */

View file

@ -418,8 +418,7 @@ static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
dev_name(dev), pm_verb(state.event), info, error);
}
#ifdef CONFIG_PM_DEBUG
static void dpm_show_time(ktime_t starttime, pm_message_t state,
static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
const char *info)
{
ktime_t calltime;
@ -432,14 +431,12 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state,
usecs = usecs64;
if (usecs == 0)
usecs = 1;
pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
info ?: "", info ? " " : "", pm_verb(state.event),
usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
info ?: "", info ? " " : "", pm_verb(state.event),
error ? "aborted" : "complete",
usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}
#else
static inline void dpm_show_time(ktime_t starttime, pm_message_t state,
const char *info) {}
#endif /* CONFIG_PM_DEBUG */
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
pm_message_t state, const char *info)
@ -602,14 +599,7 @@ static void async_resume_noirq(void *data, async_cookie_t cookie)
put_device(dev);
}
/**
* dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
* Call the "noirq" resume handlers for all devices in dpm_noirq_list and
* enable device drivers to receive interrupts.
*/
void dpm_resume_noirq(pm_message_t state)
void dpm_noirq_resume_devices(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
@ -654,11 +644,28 @@ void dpm_resume_noirq(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, "noirq");
dpm_show_time(starttime, state, 0, "noirq");
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
void dpm_noirq_end(void)
{
resume_device_irqs();
device_wakeup_disarm_wake_irqs();
cpuidle_resume();
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
/**
* dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
* Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
* allow device drivers' interrupt handlers to be called.
*/
void dpm_resume_noirq(pm_message_t state)
{
dpm_noirq_resume_devices(state);
dpm_noirq_end();
}
/**
@ -776,7 +783,7 @@ void dpm_resume_early(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, "early");
dpm_show_time(starttime, state, 0, "early");
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
}
@ -948,7 +955,7 @@ void dpm_resume(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, NULL);
dpm_show_time(starttime, state, 0, NULL);
cpufreq_resume();
trace_suspend_resume(TPS("dpm_resume"), state.event, false);
@ -1098,6 +1105,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (async_error)
goto Complete;
if (pm_wakeup_pending()) {
async_error = -EBUSY;
goto Complete;
}
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
@ -1158,22 +1170,19 @@ static int device_suspend_noirq(struct device *dev)
return __device_suspend_noirq(dev, pm_transition, false);
}
/**
* dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
* Prevent device drivers from receiving interrupts and call the "noirq" suspend
* handlers for all non-sysdev devices.
*/
int dpm_suspend_noirq(pm_message_t state)
void dpm_noirq_begin(void)
{
cpuidle_pause();
device_wakeup_arm_wake_irqs();
suspend_device_irqs();
}
int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
cpuidle_pause();
device_wakeup_arm_wake_irqs();
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
@ -1208,14 +1217,31 @@ int dpm_suspend_noirq(pm_message_t state)
if (error) {
suspend_stats.failed_suspend_noirq++;
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
dpm_resume_noirq(resume_event(state));
} else {
dpm_show_time(starttime, state, "noirq");
}
dpm_show_time(starttime, state, error, "noirq");
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
return error;
}
/**
* dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
* Prevent device drivers' interrupt handlers from being called and invoke
* "noirq" suspend callbacks for all non-sysdev devices.
*/
int dpm_suspend_noirq(pm_message_t state)
{
int ret;
dpm_noirq_begin();
ret = dpm_noirq_suspend_devices(state);
if (ret)
dpm_resume_noirq(resume_event(state));
return ret;
}
/**
* device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
@ -1350,9 +1376,8 @@ int dpm_suspend_late(pm_message_t state)
suspend_stats.failed_suspend_late++;
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
} else {
dpm_show_time(starttime, state, "late");
}
dpm_show_time(starttime, state, error, "late");
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
return error;
}
@ -1618,8 +1643,8 @@ int dpm_suspend(pm_message_t state)
if (error) {
suspend_stats.failed_suspend++;
dpm_save_failed_step(SUSPEND_SUSPEND);
} else
dpm_show_time(starttime, state, NULL);
}
dpm_show_time(starttime, state, error, NULL);
trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
return error;
}

View file

@ -865,7 +865,7 @@ bool pm_wakeup_pending(void)
void pm_system_wakeup(void)
{
atomic_inc(&pm_abort_suspend);
freeze_wake();
s2idle_wake();
}
EXPORT_SYMBOL_GPL(pm_system_wakeup);

View file

@ -77,7 +77,7 @@ static int find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev,
unsigned int max_latency,
unsigned int forbidden_flags,
bool freeze)
bool s2idle)
{
unsigned int latency_req = 0;
int i, ret = 0;
@ -89,7 +89,7 @@ static int find_deepest_state(struct cpuidle_driver *drv,
if (s->disabled || su->disable || s->exit_latency <= latency_req
|| s->exit_latency > max_latency
|| (s->flags & forbidden_flags)
|| (freeze && !s->enter_freeze))
|| (s2idle && !s->enter_s2idle))
continue;
latency_req = s->exit_latency;
@ -128,7 +128,7 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
}
#ifdef CONFIG_SUSPEND
static void enter_freeze_proper(struct cpuidle_driver *drv,
static void enter_s2idle_proper(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
{
/*
@ -143,7 +143,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
* suspended is generally unsafe.
*/
stop_critical_timings();
drv->states[index].enter_freeze(dev, drv, index);
drv->states[index].enter_s2idle(dev, drv, index);
WARN_ON(!irqs_disabled());
/*
* timekeeping_resume() that will be called by tick_unfreeze() for the
@ -155,25 +155,25 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
}
/**
* cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
* cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle.
* @drv: cpuidle driver for the given CPU.
* @dev: cpuidle device for the given CPU.
*
* If there are states with the ->enter_freeze callback, find the deepest of
* If there are states with the ->enter_s2idle callback, find the deepest of
* them and enter it with frozen tick.
*/
int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
int index;
/*
* Find the deepest state with ->enter_freeze present, which guarantees
* Find the deepest state with ->enter_s2idle present, which guarantees
* that interrupts won't be enabled when it exits and allows the tick to
* be frozen safely.
*/
index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
if (index > 0)
enter_freeze_proper(drv, dev, index);
enter_s2idle_proper(drv, dev, index);
return index;
}

View file

@ -41,9 +41,9 @@ static int init_state_node(struct cpuidle_state *idle_state,
/*
* Since this is not a "coupled" state, it's safe to assume interrupts
* won't be enabled when it exits allowing the tick to be frozen
* safely. So enter() can be also enter_freeze() callback.
* safely. So enter() can be also enter_s2idle() callback.
*/
idle_state->enter_freeze = match_id->data;
idle_state->enter_s2idle = match_id->data;
err = of_property_read_u32(state_node, "wakeup-latency-us",
&idle_state->exit_latency);

View file

@ -97,7 +97,7 @@ static const struct idle_cpu *icpu;
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
static void intel_idle_freeze(struct cpuidle_device *dev,
static void intel_idle_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
static struct cpuidle_state *cpuidle_state_table;
@ -132,7 +132,7 @@ static struct cpuidle_state nehalem_cstates[] = {
.exit_latency = 3,
.target_residency = 6,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@ -140,7 +140,7 @@ static struct cpuidle_state nehalem_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@ -148,7 +148,7 @@ static struct cpuidle_state nehalem_cstates[] = {
.exit_latency = 20,
.target_residency = 80,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[] = {
.exit_latency = 200,
.target_residency = 800,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@ -169,7 +169,7 @@ static struct cpuidle_state snb_cstates[] = {
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@ -177,7 +177,7 @@ static struct cpuidle_state snb_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@ -185,7 +185,7 @@ static struct cpuidle_state snb_cstates[] = {
.exit_latency = 80,
.target_residency = 211,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@ -193,7 +193,7 @@ static struct cpuidle_state snb_cstates[] = {
.exit_latency = 104,
.target_residency = 345,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
.desc = "MWAIT 0x30",
@ -201,7 +201,7 @@ static struct cpuidle_state snb_cstates[] = {
.exit_latency = 109,
.target_residency = 345,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@ -214,7 +214,7 @@ static struct cpuidle_state byt_cstates[] = {
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6N",
.desc = "MWAIT 0x58",
@ -222,7 +222,7 @@ static struct cpuidle_state byt_cstates[] = {
.exit_latency = 300,
.target_residency = 275,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
.desc = "MWAIT 0x52",
@ -230,7 +230,7 @@ static struct cpuidle_state byt_cstates[] = {
.exit_latency = 500,
.target_residency = 560,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
.desc = "MWAIT 0x60",
@ -238,7 +238,7 @@ static struct cpuidle_state byt_cstates[] = {
.exit_latency = 1200,
.target_residency = 4000,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7S",
.desc = "MWAIT 0x64",
@ -246,7 +246,7 @@ static struct cpuidle_state byt_cstates[] = {
.exit_latency = 10000,
.target_residency = 20000,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@ -259,7 +259,7 @@ static struct cpuidle_state cht_cstates[] = {
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6N",
.desc = "MWAIT 0x58",
@ -267,7 +267,7 @@ static struct cpuidle_state cht_cstates[] = {
.exit_latency = 80,
.target_residency = 275,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
.desc = "MWAIT 0x52",
@ -275,7 +275,7 @@ static struct cpuidle_state cht_cstates[] = {
.exit_latency = 200,
.target_residency = 560,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
.desc = "MWAIT 0x60",
@ -283,7 +283,7 @@ static struct cpuidle_state cht_cstates[] = {
.exit_latency = 1200,
.target_residency = 4000,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7S",
.desc = "MWAIT 0x64",
@ -291,7 +291,7 @@ static struct cpuidle_state cht_cstates[] = {
.exit_latency = 10000,
.target_residency = 20000,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@ -304,7 +304,7 @@ static struct cpuidle_state ivb_cstates[] = {
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@ -312,7 +312,7 @@ static struct cpuidle_state ivb_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@ -320,7 +320,7 @@ static struct cpuidle_state ivb_cstates[] = {
.exit_latency = 59,
.target_residency = 156,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@ -328,7 +328,7 @@ static struct cpuidle_state ivb_cstates[] = {
.exit_latency = 80,
.target_residency = 300,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
.desc = "MWAIT 0x30",
@ -336,7 +336,7 @@ static struct cpuidle_state ivb_cstates[] = {
.exit_latency = 87,
.target_residency = 300,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@ -349,7 +349,7 @@ static struct cpuidle_state ivt_cstates[] = {
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@ -357,7 +357,7 @@ static struct cpuidle_state ivt_cstates[] = {
.exit_latency = 10,
.target_residency = 80,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@ -365,7 +365,7 @@ static struct cpuidle_state ivt_cstates[] = {
.exit_latency = 59,
.target_residency = 156,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@ -373,7 +373,7 @@ static struct cpuidle_state ivt_cstates[] = {
.exit_latency = 82,
.target_residency = 300,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@ -386,7 +386,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@ -394,7 +394,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
.exit_latency = 10,
.target_residency = 250,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@ -402,7 +402,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
.exit_latency = 59,
.target_residency = 300,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@ -410,7 +410,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
.exit_latency = 84,
.target_residency = 400,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@ -423,7 +423,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@ -431,7 +431,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
.exit_latency = 10,
.target_residency = 500,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@ -439,7 +439,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
.exit_latency = 59,
.target_residency = 600,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@ -447,7 +447,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
.exit_latency = 88,
.target_residency = 700,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@ -460,7 +460,7 @@ static struct cpuidle_state hsw_cstates[] = {
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@ -468,7 +468,7 @@ static struct cpuidle_state hsw_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@ -476,7 +476,7 @@ static struct cpuidle_state hsw_cstates[] = {
.exit_latency = 33,
.target_residency = 100,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@ -484,7 +484,7 @@ static struct cpuidle_state hsw_cstates[] = {
.exit_latency = 133,
.target_residency = 400,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
.desc = "MWAIT 0x32",
@ -492,7 +492,7 @@ static struct cpuidle_state hsw_cstates[] = {
.exit_latency = 166,
.target_residency = 500,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
.desc = "MWAIT 0x40",
@ -500,7 +500,7 @@ static struct cpuidle_state hsw_cstates[] = {
.exit_latency = 300,
.target_residency = 900,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
.desc = "MWAIT 0x50",
@ -508,7 +508,7 @@ static struct cpuidle_state hsw_cstates[] = {
.exit_latency = 600,
.target_residency = 1800,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
.desc = "MWAIT 0x60",
@ -516,7 +516,7 @@ static struct cpuidle_state hsw_cstates[] = {
.exit_latency = 2600,
.target_residency = 7700,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@ -528,7 +528,7 @@ static struct cpuidle_state bdw_cstates[] = {
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@ -536,7 +536,7 @@ static struct cpuidle_state bdw_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@ -544,7 +544,7 @@ static struct cpuidle_state bdw_cstates[] = {
.exit_latency = 40,
.target_residency = 100,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@ -552,7 +552,7 @@ static struct cpuidle_state bdw_cstates[] = {
.exit_latency = 133,
.target_residency = 400,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
.desc = "MWAIT 0x32",
@ -560,7 +560,7 @@ static struct cpuidle_state bdw_cstates[] = {
.exit_latency = 166,
.target_residency = 500,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
.desc = "MWAIT 0x40",
@ -568,7 +568,7 @@ static struct cpuidle_state bdw_cstates[] = {
.exit_latency = 300,
.target_residency = 900,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
.desc = "MWAIT 0x50",
@ -576,7 +576,7 @@ static struct cpuidle_state bdw_cstates[] = {
.exit_latency = 600,
.target_residency = 1800,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
.desc = "MWAIT 0x60",
@ -584,7 +584,7 @@ static struct cpuidle_state bdw_cstates[] = {
.exit_latency = 2600,
.target_residency = 7700,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@ -597,7 +597,7 @@ static struct cpuidle_state skl_cstates[] = {
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@ -605,7 +605,7 @@ static struct cpuidle_state skl_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@ -613,7 +613,7 @@ static struct cpuidle_state skl_cstates[] = {
.exit_latency = 70,
.target_residency = 100,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@ -621,7 +621,7 @@ static struct cpuidle_state skl_cstates[] = {
.exit_latency = 85,
.target_residency = 200,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
.desc = "MWAIT 0x33",
@ -629,7 +629,7 @@ static struct cpuidle_state skl_cstates[] = {
.exit_latency = 124,
.target_residency = 800,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
.desc = "MWAIT 0x40",
@ -637,7 +637,7 @@ static struct cpuidle_state skl_cstates[] = {
.exit_latency = 200,
.target_residency = 800,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
.desc = "MWAIT 0x50",
@ -645,7 +645,7 @@ static struct cpuidle_state skl_cstates[] = {
.exit_latency = 480,
.target_residency = 5000,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
.desc = "MWAIT 0x60",
@ -653,7 +653,7 @@ static struct cpuidle_state skl_cstates[] = {
.exit_latency = 890,
.target_residency = 5000,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@ -666,7 +666,7 @@ static struct cpuidle_state skx_cstates[] = {
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@ -674,7 +674,7 @@ static struct cpuidle_state skx_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@ -682,7 +682,7 @@ static struct cpuidle_state skx_cstates[] = {
.exit_latency = 133,
.target_residency = 600,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@ -695,7 +695,7 @@ static struct cpuidle_state atom_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C2",
.desc = "MWAIT 0x10",
@ -703,7 +703,7 @@ static struct cpuidle_state atom_cstates[] = {
.exit_latency = 20,
.target_residency = 80,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C4",
.desc = "MWAIT 0x30",
@ -711,7 +711,7 @@ static struct cpuidle_state atom_cstates[] = {
.exit_latency = 100,
.target_residency = 400,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x52",
@ -719,7 +719,7 @@ static struct cpuidle_state atom_cstates[] = {
.exit_latency = 140,
.target_residency = 560,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@ -731,7 +731,7 @@ static struct cpuidle_state tangier_cstates[] = {
.exit_latency = 1,
.target_residency = 4,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C4",
.desc = "MWAIT 0x30",
@ -739,7 +739,7 @@ static struct cpuidle_state tangier_cstates[] = {
.exit_latency = 100,
.target_residency = 400,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x52",
@ -747,7 +747,7 @@ static struct cpuidle_state tangier_cstates[] = {
.exit_latency = 140,
.target_residency = 560,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
.desc = "MWAIT 0x60",
@ -755,7 +755,7 @@ static struct cpuidle_state tangier_cstates[] = {
.exit_latency = 1200,
.target_residency = 4000,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
.desc = "MWAIT 0x64",
@ -763,7 +763,7 @@ static struct cpuidle_state tangier_cstates[] = {
.exit_latency = 10000,
.target_residency = 20000,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@ -775,7 +775,7 @@ static struct cpuidle_state avn_cstates[] = {
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x51",
@ -783,7 +783,7 @@ static struct cpuidle_state avn_cstates[] = {
.exit_latency = 15,
.target_residency = 45,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@ -795,7 +795,7 @@ static struct cpuidle_state knl_cstates[] = {
.exit_latency = 1,
.target_residency = 2,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze },
.enter_s2idle = intel_idle_s2idle },
{
.name = "C6",
.desc = "MWAIT 0x10",
@ -803,7 +803,7 @@ static struct cpuidle_state knl_cstates[] = {
.exit_latency = 120,
.target_residency = 500,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze },
.enter_s2idle = intel_idle_s2idle },
{
.enter = NULL }
};
@ -816,7 +816,7 @@ static struct cpuidle_state bxt_cstates[] = {
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@ -824,7 +824,7 @@ static struct cpuidle_state bxt_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@ -832,7 +832,7 @@ static struct cpuidle_state bxt_cstates[] = {
.exit_latency = 133,
.target_residency = 133,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
.desc = "MWAIT 0x31",
@ -840,7 +840,7 @@ static struct cpuidle_state bxt_cstates[] = {
.exit_latency = 155,
.target_residency = 155,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
.desc = "MWAIT 0x40",
@ -848,7 +848,7 @@ static struct cpuidle_state bxt_cstates[] = {
.exit_latency = 1000,
.target_residency = 1000,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
.desc = "MWAIT 0x50",
@ -856,7 +856,7 @@ static struct cpuidle_state bxt_cstates[] = {
.exit_latency = 2000,
.target_residency = 2000,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
.desc = "MWAIT 0x60",
@ -864,7 +864,7 @@ static struct cpuidle_state bxt_cstates[] = {
.exit_latency = 10000,
.target_residency = 10000,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@ -877,7 +877,7 @@ static struct cpuidle_state dnv_cstates[] = {
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@ -885,7 +885,7 @@ static struct cpuidle_state dnv_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@ -893,7 +893,7 @@ static struct cpuidle_state dnv_cstates[] = {
.exit_latency = 50,
.target_residency = 500,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@ -936,12 +936,12 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
}
/**
* intel_idle_freeze - simplified "enter" callback routine for suspend-to-idle
* intel_idle_s2idle - simplified "enter" callback routine for suspend-to-idle
* @dev: cpuidle_device
* @drv: cpuidle driver
* @index: state index
*/
static void intel_idle_freeze(struct cpuidle_device *dev,
static void intel_idle_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
unsigned long ecx = 1; /* break on interrupt flag */
@ -1338,7 +1338,7 @@ static void __init intel_idle_cpuidle_driver_init(void)
int num_substates, mwait_hint, mwait_cstate;
if ((cpuidle_state_table[cstate].enter == NULL) &&
(cpuidle_state_table[cstate].enter_freeze == NULL))
(cpuidle_state_table[cstate].enter_s2idle == NULL))
break;
if (cstate + 1 > max_cstate) {

View file

@ -203,15 +203,26 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
acpi_status status;
if (priv->wakeup_mode) {
/*
* Needed for wakeup from suspend-to-idle to work on some
* platforms that don't expose the 5-button array, but still
* send notifies with the power button event code to this
* device object on power button actions while suspended.
*/
if (event == 0xce)
goto wakeup;
/* Wake up on 5-button array events only. */
if (event == 0xc0 || !priv->array)
return;
if (sparse_keymap_entry_from_scancode(priv->array, event))
pm_wakeup_hard_event(&device->dev);
else
if (!sparse_keymap_entry_from_scancode(priv->array, event)) {
dev_info(&device->dev, "unknown event 0x%x\n", event);
return;
}
wakeup:
pm_wakeup_hard_event(&device->dev);
return;
}

View file

@ -150,7 +150,7 @@ static void of_get_regulation_constraints(struct device_node *np,
suspend_state = &constraints->state_disk;
break;
case PM_SUSPEND_ON:
case PM_SUSPEND_FREEZE:
case PM_SUSPEND_TO_IDLE:
case PM_SUSPEND_STANDBY:
default:
continue;

View file

@ -52,11 +52,11 @@ struct cpuidle_state {
int (*enter_dead) (struct cpuidle_device *dev, int index);
/*
* CPUs execute ->enter_freeze with the local tick or entire timekeeping
* CPUs execute ->enter_s2idle with the local tick or entire timekeeping
* suspended, so it must not re-enable interrupts at any point (even
* temporarily) or attempt to change states of clock event devices.
*/
void (*enter_freeze) (struct cpuidle_device *dev,
void (*enter_s2idle) (struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index);
};
@ -198,14 +198,14 @@ static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
#ifdef CONFIG_CPU_IDLE
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern void cpuidle_use_deepest_state(bool enable);
#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_use_deepest_state(bool enable)

View file

@ -689,6 +689,8 @@ struct dev_pm_domain {
extern void device_pm_lock(void);
extern void dpm_resume_start(pm_message_t state);
extern void dpm_resume_end(pm_message_t state);
extern void dpm_noirq_resume_devices(pm_message_t state);
extern void dpm_noirq_end(void);
extern void dpm_resume_noirq(pm_message_t state);
extern void dpm_resume_early(pm_message_t state);
extern void dpm_resume(pm_message_t state);
@ -697,6 +699,8 @@ extern void dpm_complete(pm_message_t state);
extern void device_pm_unlock(void);
extern int dpm_suspend_end(pm_message_t state);
extern int dpm_suspend_start(pm_message_t state);
extern void dpm_noirq_begin(void);
extern int dpm_noirq_suspend_devices(pm_message_t state);
extern int dpm_suspend_noirq(pm_message_t state);
extern int dpm_suspend_late(pm_message_t state);
extern int dpm_suspend(pm_message_t state);

View file

@ -33,10 +33,10 @@ static inline void pm_restore_console(void)
typedef int __bitwise suspend_state_t;
#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
#define PM_SUSPEND_FREEZE ((__force suspend_state_t) 1)
#define PM_SUSPEND_TO_IDLE ((__force suspend_state_t) 1)
#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 2)
#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
#define PM_SUSPEND_MIN PM_SUSPEND_FREEZE
#define PM_SUSPEND_MIN PM_SUSPEND_TO_IDLE
#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
enum suspend_stat_step {
@ -186,7 +186,7 @@ struct platform_suspend_ops {
void (*recover)(void);
};
struct platform_freeze_ops {
struct platform_s2idle_ops {
int (*begin)(void);
int (*prepare)(void);
void (*wake)(void);
@ -196,6 +196,9 @@ struct platform_freeze_ops {
};
#ifdef CONFIG_SUSPEND
extern suspend_state_t mem_sleep_current;
extern suspend_state_t mem_sleep_default;
/**
* suspend_set_ops - set platform dependent suspend operations
* @ops: The new suspend operations to set.
@ -234,22 +237,22 @@ static inline bool pm_resume_via_firmware(void)
}
/* Suspend-to-idle state machnine. */
enum freeze_state {
FREEZE_STATE_NONE, /* Not suspended/suspending. */
FREEZE_STATE_ENTER, /* Enter suspend-to-idle. */
FREEZE_STATE_WAKE, /* Wake up from suspend-to-idle. */
enum s2idle_states {
S2IDLE_STATE_NONE, /* Not suspended/suspending. */
S2IDLE_STATE_ENTER, /* Enter suspend-to-idle. */
S2IDLE_STATE_WAKE, /* Wake up from suspend-to-idle. */
};
extern enum freeze_state __read_mostly suspend_freeze_state;
extern enum s2idle_states __read_mostly s2idle_state;
static inline bool idle_should_freeze(void)
static inline bool idle_should_enter_s2idle(void)
{
return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER);
return unlikely(s2idle_state == S2IDLE_STATE_ENTER);
}
extern void __init pm_states_init(void);
extern void freeze_set_ops(const struct platform_freeze_ops *ops);
extern void freeze_wake(void);
extern void s2idle_set_ops(const struct platform_s2idle_ops *ops);
extern void s2idle_wake(void);
/**
* arch_suspend_disable_irqs - disable IRQs for suspend
@ -281,10 +284,10 @@ static inline bool pm_resume_via_firmware(void) { return false; }
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
static inline bool idle_should_freeze(void) { return false; }
static inline bool idle_should_enter_s2idle(void) { return false; }
static inline void __init pm_states_init(void) {}
static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {}
static inline void freeze_wake(void) {}
static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {}
static inline void s2idle_wake(void) {}
#endif /* !CONFIG_SUSPEND */
/* struct pbe is used for creating lists of pages that should be restored
@ -427,6 +430,7 @@ extern int unregister_pm_notifier(struct notifier_block *nb);
/* drivers/base/power/wakeup.c */
extern bool events_check_enabled;
extern unsigned int pm_wakeup_irq;
extern suspend_state_t pm_suspend_target_state;
extern bool pm_wakeup_pending(void);
extern void pm_system_wakeup(void);
@ -491,10 +495,24 @@ static inline void unlock_system_sleep(void) {}
#ifdef CONFIG_PM_SLEEP_DEBUG
extern bool pm_print_times_enabled;
extern bool pm_debug_messages_on;
extern __printf(2, 3) void __pm_pr_dbg(bool defer, const char *fmt, ...);
#else
#define pm_print_times_enabled (false)
#define pm_debug_messages_on (false)
#include <linux/printk.h>
#define __pm_pr_dbg(defer, fmt, ...) \
no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
#endif
#define pm_pr_dbg(fmt, ...) \
__pm_pr_dbg(false, fmt, ##__VA_ARGS__)
#define pm_deferred_pr_dbg(fmt, ...) \
__pm_pr_dbg(true, fmt, ##__VA_ARGS__)
#ifdef CONFIG_PM_AUTOSLEEP
/* kernel/power/autosleep.c */

View file

@ -651,7 +651,7 @@ static int load_image_and_restore(void)
int error;
unsigned int flags;
pr_debug("Loading hibernation image.\n");
pm_pr_dbg("Loading hibernation image.\n");
lock_device_hotplug();
error = create_basic_memory_bitmaps();
@ -681,7 +681,7 @@ int hibernate(void)
bool snapshot_test = false;
if (!hibernation_available()) {
pr_debug("Hibernation not available.\n");
pm_pr_dbg("Hibernation not available.\n");
return -EPERM;
}
@ -692,6 +692,7 @@ int hibernate(void)
goto Unlock;
}
pr_info("hibernation entry\n");
pm_prepare_console();
error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
if (error) {
@ -727,7 +728,7 @@ int hibernate(void)
else
flags |= SF_CRC32_MODE;
pr_debug("Writing image.\n");
pm_pr_dbg("Writing image.\n");
error = swsusp_write(flags);
swsusp_free();
if (!error) {
@ -739,7 +740,7 @@ int hibernate(void)
in_suspend = 0;
pm_restore_gfp_mask();
} else {
pr_debug("Image restored successfully.\n");
pm_pr_dbg("Image restored successfully.\n");
}
Free_bitmaps:
@ -747,7 +748,7 @@ int hibernate(void)
Thaw:
unlock_device_hotplug();
if (snapshot_test) {
pr_debug("Checking hibernation image\n");
pm_pr_dbg("Checking hibernation image\n");
error = swsusp_check();
if (!error)
error = load_image_and_restore();
@ -762,6 +763,8 @@ int hibernate(void)
atomic_inc(&snapshot_device_available);
Unlock:
unlock_system_sleep();
pr_info("hibernation exit\n");
return error;
}
@ -811,7 +814,7 @@ static int software_resume(void)
goto Unlock;
}
pr_debug("Checking hibernation image partition %s\n", resume_file);
pm_pr_dbg("Checking hibernation image partition %s\n", resume_file);
if (resume_delay) {
pr_info("Waiting %dsec before reading resume device ...\n",
@ -853,10 +856,10 @@ static int software_resume(void)
}
Check_image:
pr_debug("Hibernation image partition %d:%d present\n",
pm_pr_dbg("Hibernation image partition %d:%d present\n",
MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
pr_debug("Looking for hibernation image.\n");
pm_pr_dbg("Looking for hibernation image.\n");
error = swsusp_check();
if (error)
goto Unlock;
@ -868,6 +871,7 @@ static int software_resume(void)
goto Unlock;
}
pr_info("resume from hibernation\n");
pm_prepare_console();
error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
if (error) {
@ -875,7 +879,7 @@ static int software_resume(void)
goto Close_Finish;
}
pr_debug("Preparing processes for restore.\n");
pm_pr_dbg("Preparing processes for restore.\n");
error = freeze_processes();
if (error)
goto Close_Finish;
@ -884,11 +888,12 @@ static int software_resume(void)
Finish:
__pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
pm_restore_console();
pr_info("resume from hibernation failed (%d)\n", error);
atomic_inc(&snapshot_device_available);
/* For success case, the suspend path will release the lock */
Unlock:
mutex_unlock(&pm_mutex);
pr_debug("Hibernation image not present or could not be loaded.\n");
pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
return error;
Close_Finish:
swsusp_close(FMODE_READ);
@ -1012,8 +1017,8 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
error = -EINVAL;
if (!error)
pr_debug("Hibernation mode set to '%s'\n",
hibernation_modes[mode]);
pm_pr_dbg("Hibernation mode set to '%s'\n",
hibernation_modes[mode]);
unlock_system_sleep();
return error ? error : n;
}

View file

@ -150,7 +150,7 @@ static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr
power_attr(mem_sleep);
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_PM_DEBUG
#ifdef CONFIG_PM_SLEEP_DEBUG
int pm_test_level = TEST_NONE;
static const char * const pm_tests[__TEST_AFTER_LAST] = {
@ -211,7 +211,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
}
power_attr(pm_test);
#endif /* CONFIG_PM_DEBUG */
#endif /* CONFIG_PM_SLEEP_DEBUG */
#ifdef CONFIG_DEBUG_FS
static char *suspend_step_name(enum suspend_stat_step step)
@ -361,6 +361,61 @@ static ssize_t pm_wakeup_irq_show(struct kobject *kobj,
power_attr_ro(pm_wakeup_irq);
bool pm_debug_messages_on __read_mostly;
static ssize_t pm_debug_messages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", pm_debug_messages_on);
}
static ssize_t pm_debug_messages_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
if (val > 1)
return -EINVAL;
pm_debug_messages_on = !!val;
return n;
}
power_attr(pm_debug_messages);
/**
* __pm_pr_dbg - Print a suspend debug message to the kernel log.
* @defer: Whether or not to use printk_deferred() to print the message.
* @fmt: Message format.
*
* The message will be emitted if enabled through the pm_debug_messages
* sysfs attribute.
*/
void __pm_pr_dbg(bool defer, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
if (!pm_debug_messages_on)
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (defer)
printk_deferred(KERN_DEBUG "PM: %pV", &vaf);
else
printk(KERN_DEBUG "PM: %pV", &vaf);
va_end(args);
}
#else /* !CONFIG_PM_SLEEP_DEBUG */
static inline void pm_print_times_init(void) {}
#endif /* CONFIG_PM_SLEEP_DEBUG */
@ -691,12 +746,11 @@ static struct attribute * g[] = {
&wake_lock_attr.attr,
&wake_unlock_attr.attr,
#endif
#ifdef CONFIG_PM_DEBUG
&pm_test_attr.attr,
#endif
#ifdef CONFIG_PM_SLEEP_DEBUG
&pm_test_attr.attr,
&pm_print_times_attr.attr,
&pm_wakeup_irq_attr.attr,
&pm_debug_messages_attr.attr,
#endif
#endif
#ifdef CONFIG_FREEZER

View file

@ -192,7 +192,6 @@ extern void swsusp_show_speed(ktime_t, ktime_t, unsigned int, char *);
extern const char * const pm_labels[];
extern const char *pm_states[];
extern const char *mem_sleep_states[];
extern suspend_state_t mem_sleep_current;
extern int suspend_devices_and_enter(suspend_state_t state);
#else /* !CONFIG_SUSPEND */
@ -245,7 +244,11 @@ enum {
#define TEST_FIRST TEST_NONE
#define TEST_MAX (__TEST_AFTER_LAST - 1)
#ifdef CONFIG_PM_SLEEP_DEBUG
extern int pm_test_level;
#else
#define pm_test_level (TEST_NONE)
#endif
#ifdef CONFIG_SUSPEND_FREEZER
static inline int suspend_freeze_processes(void)

View file

@ -8,6 +8,8 @@
* This file is released under the GPLv2.
*/
#define pr_fmt(fmt) "PM: " fmt
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
@ -33,53 +35,55 @@
#include "power.h"
const char * const pm_labels[] = {
[PM_SUSPEND_FREEZE] = "freeze",
[PM_SUSPEND_TO_IDLE] = "freeze",
[PM_SUSPEND_STANDBY] = "standby",
[PM_SUSPEND_MEM] = "mem",
};
const char *pm_states[PM_SUSPEND_MAX];
static const char * const mem_sleep_labels[] = {
[PM_SUSPEND_FREEZE] = "s2idle",
[PM_SUSPEND_TO_IDLE] = "s2idle",
[PM_SUSPEND_STANDBY] = "shallow",
[PM_SUSPEND_MEM] = "deep",
};
const char *mem_sleep_states[PM_SUSPEND_MAX];
suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE;
static suspend_state_t mem_sleep_default = PM_SUSPEND_MEM;
suspend_state_t mem_sleep_current = PM_SUSPEND_TO_IDLE;
suspend_state_t mem_sleep_default = PM_SUSPEND_MAX;
suspend_state_t pm_suspend_target_state;
EXPORT_SYMBOL_GPL(pm_suspend_target_state);
unsigned int pm_suspend_global_flags;
EXPORT_SYMBOL_GPL(pm_suspend_global_flags);
static const struct platform_suspend_ops *suspend_ops;
static const struct platform_freeze_ops *freeze_ops;
static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
static const struct platform_s2idle_ops *s2idle_ops;
static DECLARE_WAIT_QUEUE_HEAD(s2idle_wait_head);
enum freeze_state __read_mostly suspend_freeze_state;
static DEFINE_SPINLOCK(suspend_freeze_lock);
enum s2idle_states __read_mostly s2idle_state;
static DEFINE_SPINLOCK(s2idle_lock);
void freeze_set_ops(const struct platform_freeze_ops *ops)
void s2idle_set_ops(const struct platform_s2idle_ops *ops)
{
lock_system_sleep();
freeze_ops = ops;
s2idle_ops = ops;
unlock_system_sleep();
}
static void freeze_begin(void)
static void s2idle_begin(void)
{
suspend_freeze_state = FREEZE_STATE_NONE;
s2idle_state = S2IDLE_STATE_NONE;
}
static void freeze_enter(void)
static void s2idle_enter(void)
{
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true);
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, true);
spin_lock_irq(&suspend_freeze_lock);
spin_lock_irq(&s2idle_lock);
if (pm_wakeup_pending())
goto out;
suspend_freeze_state = FREEZE_STATE_ENTER;
spin_unlock_irq(&suspend_freeze_lock);
s2idle_state = S2IDLE_STATE_ENTER;
spin_unlock_irq(&s2idle_lock);
get_online_cpus();
cpuidle_resume();
@ -87,56 +91,75 @@ static void freeze_enter(void)
/* Push all the CPUs into the idle loop. */
wake_up_all_idle_cpus();
/* Make the current CPU wait so it can enter the idle loop too. */
wait_event(suspend_freeze_wait_head,
suspend_freeze_state == FREEZE_STATE_WAKE);
wait_event(s2idle_wait_head,
s2idle_state == S2IDLE_STATE_WAKE);
cpuidle_pause();
put_online_cpus();
spin_lock_irq(&suspend_freeze_lock);
spin_lock_irq(&s2idle_lock);
out:
suspend_freeze_state = FREEZE_STATE_NONE;
spin_unlock_irq(&suspend_freeze_lock);
s2idle_state = S2IDLE_STATE_NONE;
spin_unlock_irq(&s2idle_lock);
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false);
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, false);
}
static void s2idle_loop(void)
{
pr_debug("PM: suspend-to-idle\n");
pm_pr_dbg("suspend-to-idle\n");
do {
freeze_enter();
for (;;) {
int error;
if (freeze_ops && freeze_ops->wake)
freeze_ops->wake();
dpm_noirq_begin();
dpm_resume_noirq(PMSG_RESUME);
if (freeze_ops && freeze_ops->sync)
freeze_ops->sync();
/*
* Suspend-to-idle equals
* frozen processes + suspended devices + idle processors.
* Thus s2idle_enter() should be called right after
* all devices have been suspended.
*/
error = dpm_noirq_suspend_devices(PMSG_SUSPEND);
if (!error)
s2idle_enter();
dpm_noirq_resume_devices(PMSG_RESUME);
if (error && (error != -EBUSY || !pm_wakeup_pending())) {
dpm_noirq_end();
break;
}
if (s2idle_ops && s2idle_ops->wake)
s2idle_ops->wake();
dpm_noirq_end();
if (s2idle_ops && s2idle_ops->sync)
s2idle_ops->sync();
if (pm_wakeup_pending())
break;
pm_wakeup_clear(false);
} while (!dpm_suspend_noirq(PMSG_SUSPEND));
}
pr_debug("PM: resume from suspend-to-idle\n");
pm_pr_dbg("resume from suspend-to-idle\n");
}
void freeze_wake(void)
void s2idle_wake(void)
{
unsigned long flags;
spin_lock_irqsave(&suspend_freeze_lock, flags);
if (suspend_freeze_state > FREEZE_STATE_NONE) {
suspend_freeze_state = FREEZE_STATE_WAKE;
wake_up(&suspend_freeze_wait_head);
spin_lock_irqsave(&s2idle_lock, flags);
if (s2idle_state > S2IDLE_STATE_NONE) {
s2idle_state = S2IDLE_STATE_WAKE;
wake_up(&s2idle_wait_head);
}
spin_unlock_irqrestore(&suspend_freeze_lock, flags);
spin_unlock_irqrestore(&s2idle_lock, flags);
}
EXPORT_SYMBOL_GPL(freeze_wake);
EXPORT_SYMBOL_GPL(s2idle_wake);
static bool valid_state(suspend_state_t state)
{
@ -152,19 +175,19 @@ void __init pm_states_init(void)
{
/* "mem" and "freeze" are always present in /sys/power/state. */
pm_states[PM_SUSPEND_MEM] = pm_labels[PM_SUSPEND_MEM];
pm_states[PM_SUSPEND_FREEZE] = pm_labels[PM_SUSPEND_FREEZE];
pm_states[PM_SUSPEND_TO_IDLE] = pm_labels[PM_SUSPEND_TO_IDLE];
/*
* Suspend-to-idle should be supported even without any suspend_ops,
* initialize mem_sleep_states[] accordingly here.
*/
mem_sleep_states[PM_SUSPEND_FREEZE] = mem_sleep_labels[PM_SUSPEND_FREEZE];
mem_sleep_states[PM_SUSPEND_TO_IDLE] = mem_sleep_labels[PM_SUSPEND_TO_IDLE];
}
static int __init mem_sleep_default_setup(char *str)
{
suspend_state_t state;
for (state = PM_SUSPEND_FREEZE; state <= PM_SUSPEND_MEM; state++)
for (state = PM_SUSPEND_TO_IDLE; state <= PM_SUSPEND_MEM; state++)
if (mem_sleep_labels[state] &&
!strcmp(str, mem_sleep_labels[state])) {
mem_sleep_default = state;
@ -193,7 +216,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops)
}
if (valid_state(PM_SUSPEND_MEM)) {
mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM];
if (mem_sleep_default == PM_SUSPEND_MEM)
if (mem_sleep_default >= PM_SUSPEND_MEM)
mem_sleep_current = PM_SUSPEND_MEM;
}
@ -216,49 +239,49 @@ EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
static bool sleep_state_supported(suspend_state_t state)
{
return state == PM_SUSPEND_FREEZE || (suspend_ops && suspend_ops->enter);
return state == PM_SUSPEND_TO_IDLE || (suspend_ops && suspend_ops->enter);
}
static int platform_suspend_prepare(suspend_state_t state)
{
return state != PM_SUSPEND_FREEZE && suspend_ops->prepare ?
return state != PM_SUSPEND_TO_IDLE && suspend_ops->prepare ?
suspend_ops->prepare() : 0;
}
static int platform_suspend_prepare_late(suspend_state_t state)
{
return state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->prepare ?
freeze_ops->prepare() : 0;
return state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->prepare ?
s2idle_ops->prepare() : 0;
}
static int platform_suspend_prepare_noirq(suspend_state_t state)
{
return state != PM_SUSPEND_FREEZE && suspend_ops->prepare_late ?
return state != PM_SUSPEND_TO_IDLE && suspend_ops->prepare_late ?
suspend_ops->prepare_late() : 0;
}
static void platform_resume_noirq(suspend_state_t state)
{
if (state != PM_SUSPEND_FREEZE && suspend_ops->wake)
if (state != PM_SUSPEND_TO_IDLE && suspend_ops->wake)
suspend_ops->wake();
}
static void platform_resume_early(suspend_state_t state)
{
if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->restore)
freeze_ops->restore();
if (state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->restore)
s2idle_ops->restore();
}
static void platform_resume_finish(suspend_state_t state)
{
if (state != PM_SUSPEND_FREEZE && suspend_ops->finish)
if (state != PM_SUSPEND_TO_IDLE && suspend_ops->finish)
suspend_ops->finish();
}
static int platform_suspend_begin(suspend_state_t state)
{
if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin)
return freeze_ops->begin();
if (state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->begin)
return s2idle_ops->begin();
else if (suspend_ops && suspend_ops->begin)
return suspend_ops->begin(state);
else
@ -267,21 +290,21 @@ static int platform_suspend_begin(suspend_state_t state)
static void platform_resume_end(suspend_state_t state)
{
if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
freeze_ops->end();
if (state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->end)
s2idle_ops->end();
else if (suspend_ops && suspend_ops->end)
suspend_ops->end();
}
static void platform_recover(suspend_state_t state)
{
if (state != PM_SUSPEND_FREEZE && suspend_ops->recover)
if (state != PM_SUSPEND_TO_IDLE && suspend_ops->recover)
suspend_ops->recover();
}
static bool platform_suspend_again(suspend_state_t state)
{
return state != PM_SUSPEND_FREEZE && suspend_ops->suspend_again ?
return state != PM_SUSPEND_TO_IDLE && suspend_ops->suspend_again ?
suspend_ops->suspend_again() : false;
}
@ -370,16 +393,21 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
error = dpm_suspend_late(PMSG_SUSPEND);
if (error) {
pr_err("PM: late suspend of devices failed\n");
pr_err("late suspend of devices failed\n");
goto Platform_finish;
}
error = platform_suspend_prepare_late(state);
if (error)
goto Devices_early_resume;
if (state == PM_SUSPEND_TO_IDLE && pm_test_level != TEST_PLATFORM) {
s2idle_loop();
goto Platform_early_resume;
}
error = dpm_suspend_noirq(PMSG_SUSPEND);
if (error) {
pr_err("PM: noirq suspend of devices failed\n");
pr_err("noirq suspend of devices failed\n");
goto Platform_early_resume;
}
error = platform_suspend_prepare_noirq(state);
@ -389,17 +417,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
if (suspend_test(TEST_PLATFORM))
goto Platform_wake;
/*
* PM_SUSPEND_FREEZE equals
* frozen processes + suspended devices + idle processors.
* Thus we should invoke freeze_enter() soon after
* all the devices are suspended.
*/
if (state == PM_SUSPEND_FREEZE) {
s2idle_loop();
goto Platform_early_resume;
}
error = disable_nonboot_cpus();
if (error || suspend_test(TEST_CPUS))
goto Enable_cpus;
@ -456,6 +473,8 @@ int suspend_devices_and_enter(suspend_state_t state)
if (!sleep_state_supported(state))
return -ENOSYS;
pm_suspend_target_state = state;
error = platform_suspend_begin(state);
if (error)
goto Close;
@ -464,7 +483,7 @@ int suspend_devices_and_enter(suspend_state_t state)
suspend_test_start();
error = dpm_suspend_start(PMSG_SUSPEND);
if (error) {
pr_err("PM: Some devices failed to suspend, or early wake event detected\n");
pr_err("Some devices failed to suspend, or early wake event detected\n");
goto Recover_platform;
}
suspend_test_finish("suspend devices");
@ -485,6 +504,7 @@ int suspend_devices_and_enter(suspend_state_t state)
Close:
platform_resume_end(state);
pm_suspend_target_state = PM_SUSPEND_ON;
return error;
Recover_platform:
@ -518,10 +538,10 @@ static int enter_state(suspend_state_t state)
int error;
trace_suspend_resume(TPS("suspend_enter"), state, true);
if (state == PM_SUSPEND_FREEZE) {
if (state == PM_SUSPEND_TO_IDLE) {
#ifdef CONFIG_PM_DEBUG
if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
pr_warn("PM: Unsupported test mode for suspend to idle, please choose none/freezer/devices/platform.\n");
pr_warn("Unsupported test mode for suspend to idle, please choose none/freezer/devices/platform.\n");
return -EAGAIN;
}
#endif
@ -531,18 +551,18 @@ static int enter_state(suspend_state_t state)
if (!mutex_trylock(&pm_mutex))
return -EBUSY;
if (state == PM_SUSPEND_FREEZE)
freeze_begin();
if (state == PM_SUSPEND_TO_IDLE)
s2idle_begin();
#ifndef CONFIG_SUSPEND_SKIP_SYNC
trace_suspend_resume(TPS("sync_filesystems"), 0, true);
pr_info("PM: Syncing filesystems ... ");
pr_info("Syncing filesystems ... ");
sys_sync();
pr_cont("done.\n");
trace_suspend_resume(TPS("sync_filesystems"), 0, false);
#endif
pr_debug("PM: Preparing system for sleep (%s)\n", pm_states[state]);
pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]);
pm_suspend_clear_flags();
error = suspend_prepare(state);
if (error)
@ -552,13 +572,13 @@ static int enter_state(suspend_state_t state)
goto Finish;
trace_suspend_resume(TPS("suspend_enter"), state, false);
pr_debug("PM: Suspending system (%s)\n", pm_states[state]);
pm_pr_dbg("Suspending system (%s)\n", mem_sleep_labels[state]);
pm_restrict_gfp_mask();
error = suspend_devices_and_enter(state);
pm_restore_gfp_mask();
Finish:
pr_debug("PM: Finishing wakeup.\n");
pm_pr_dbg("Finishing wakeup.\n");
suspend_finish();
Unlock:
mutex_unlock(&pm_mutex);
@ -579,6 +599,7 @@ int pm_suspend(suspend_state_t state)
if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
return -EINVAL;
pr_info("suspend entry (%s)\n", mem_sleep_labels[state]);
error = enter_state(state);
if (error) {
suspend_stats.fail++;
@ -586,6 +607,7 @@ int pm_suspend(suspend_state_t state)
} else {
suspend_stats.success++;
}
pr_info("suspend exit\n");
return error;
}
EXPORT_SYMBOL(pm_suspend);

View file

@ -104,9 +104,9 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
printk(info_test, pm_states[state]);
status = pm_suspend(state);
if (status < 0)
state = PM_SUSPEND_FREEZE;
state = PM_SUSPEND_TO_IDLE;
}
if (state == PM_SUSPEND_FREEZE) {
if (state == PM_SUSPEND_TO_IDLE) {
printk(info_test, pm_states[state]);
status = pm_suspend(state);
}

View file

@ -158,7 +158,7 @@ static void cpuidle_idle_call(void)
}
/*
* Suspend-to-idle ("freeze") is a system state in which all user space
* Suspend-to-idle ("s2idle") is a system state in which all user space
* has been frozen, all I/O devices have been suspended and the only
* activity happens here and in iterrupts (if any). In that case bypass
* the cpuidle governor and go stratight for the deepest idle state
@ -167,9 +167,9 @@ static void cpuidle_idle_call(void)
* until a proper wakeup interrupt happens.
*/
if (idle_should_freeze() || dev->use_deepest_state) {
if (idle_should_freeze()) {
entered_state = cpuidle_enter_freeze(drv, dev);
if (idle_should_enter_s2idle() || dev->use_deepest_state) {
if (idle_should_enter_s2idle()) {
entered_state = cpuidle_enter_s2idle(drv, dev);
if (entered_state > 0) {
local_irq_enable();
goto exit_idle;

View file

@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/seq_file.h>
#include <linux/suspend.h>
#include <linux/time.h>
#include "timekeeping_internal.h"
@ -75,7 +76,7 @@ void tk_debug_account_sleep_time(struct timespec64 *t)
int bin = min(fls(t->tv_sec), NUM_BINS-1);
sleep_time_bin[bin]++;
printk_deferred(KERN_INFO "Suspended for %lld.%03lu seconds\n",
(s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC);
pm_deferred_pr_dbg("Timekeeping suspended for %lld.%03lu seconds\n",
(s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC);
}