6e5fdeedca
These files were getting <linux/module.h> via an implicit non-obvious path, but we want to crush those out of existence since they cost time during compiles of processing thousands of lines of headers for no reason. Give them the lightweight header that just contains the EXPORT_SYMBOL infrastructure. Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
451 lines
11 KiB
C
451 lines
11 KiB
C
/*
|
|
* kernel/power/main.c - PM subsystem core functionality.
|
|
*
|
|
* Copyright (c) 2003 Patrick Mochel
|
|
* Copyright (c) 2003 Open Source Development Lab
|
|
*
|
|
* This file is released under the GPLv2
|
|
*
|
|
*/
|
|
|
|
#include <linux/export.h>
|
|
#include <linux/kobject.h>
|
|
#include <linux/string.h>
|
|
#include <linux/resume-trace.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/seq_file.h>
|
|
|
|
#include "power.h"
|
|
|
|
DEFINE_MUTEX(pm_mutex);
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
|
/* Routines for PM-transition notifications */
|
|
|
|
static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
|
|
|
|
int register_pm_notifier(struct notifier_block *nb)
|
|
{
|
|
return blocking_notifier_chain_register(&pm_chain_head, nb);
|
|
}
|
|
EXPORT_SYMBOL_GPL(register_pm_notifier);
|
|
|
|
int unregister_pm_notifier(struct notifier_block *nb)
|
|
{
|
|
return blocking_notifier_chain_unregister(&pm_chain_head, nb);
|
|
}
|
|
EXPORT_SYMBOL_GPL(unregister_pm_notifier);
|
|
|
|
int pm_notifier_call_chain(unsigned long val)
|
|
{
|
|
int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
|
|
|
|
return notifier_to_errno(ret);
|
|
}
|
|
|
|
/* If set, devices may be suspended and resumed asynchronously. */
|
|
int pm_async_enabled = 1;
|
|
|
|
static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr,
|
|
char *buf)
|
|
{
|
|
return sprintf(buf, "%d\n", pm_async_enabled);
|
|
}
|
|
|
|
static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
|
|
const char *buf, size_t n)
|
|
{
|
|
unsigned long val;
|
|
|
|
if (strict_strtoul(buf, 10, &val))
|
|
return -EINVAL;
|
|
|
|
if (val > 1)
|
|
return -EINVAL;
|
|
|
|
pm_async_enabled = val;
|
|
return n;
|
|
}
|
|
|
|
power_attr(pm_async);
|
|
|
|
#ifdef CONFIG_PM_DEBUG
|
|
int pm_test_level = TEST_NONE;
|
|
|
|
static const char * const pm_tests[__TEST_AFTER_LAST] = {
|
|
[TEST_NONE] = "none",
|
|
[TEST_CORE] = "core",
|
|
[TEST_CPUS] = "processors",
|
|
[TEST_PLATFORM] = "platform",
|
|
[TEST_DEVICES] = "devices",
|
|
[TEST_FREEZER] = "freezer",
|
|
};
|
|
|
|
static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
|
|
char *buf)
|
|
{
|
|
char *s = buf;
|
|
int level;
|
|
|
|
for (level = TEST_FIRST; level <= TEST_MAX; level++)
|
|
if (pm_tests[level]) {
|
|
if (level == pm_test_level)
|
|
s += sprintf(s, "[%s] ", pm_tests[level]);
|
|
else
|
|
s += sprintf(s, "%s ", pm_tests[level]);
|
|
}
|
|
|
|
if (s != buf)
|
|
/* convert the last space to a newline */
|
|
*(s-1) = '\n';
|
|
|
|
return (s - buf);
|
|
}
|
|
|
|
static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
|
|
const char *buf, size_t n)
|
|
{
|
|
const char * const *s;
|
|
int level;
|
|
char *p;
|
|
int len;
|
|
int error = -EINVAL;
|
|
|
|
p = memchr(buf, '\n', n);
|
|
len = p ? p - buf : n;
|
|
|
|
mutex_lock(&pm_mutex);
|
|
|
|
level = TEST_FIRST;
|
|
for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
|
|
if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
|
|
pm_test_level = level;
|
|
error = 0;
|
|
break;
|
|
}
|
|
|
|
mutex_unlock(&pm_mutex);
|
|
|
|
return error ? error : n;
|
|
}
|
|
|
|
power_attr(pm_test);
|
|
#endif /* CONFIG_PM_DEBUG */
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
static char *suspend_step_name(enum suspend_stat_step step)
|
|
{
|
|
switch (step) {
|
|
case SUSPEND_FREEZE:
|
|
return "freeze";
|
|
case SUSPEND_PREPARE:
|
|
return "prepare";
|
|
case SUSPEND_SUSPEND:
|
|
return "suspend";
|
|
case SUSPEND_SUSPEND_NOIRQ:
|
|
return "suspend_noirq";
|
|
case SUSPEND_RESUME_NOIRQ:
|
|
return "resume_noirq";
|
|
case SUSPEND_RESUME:
|
|
return "resume";
|
|
default:
|
|
return "";
|
|
}
|
|
}
|
|
|
|
static int suspend_stats_show(struct seq_file *s, void *unused)
|
|
{
|
|
int i, index, last_dev, last_errno, last_step;
|
|
|
|
last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
|
|
last_dev %= REC_FAILED_NUM;
|
|
last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
|
|
last_errno %= REC_FAILED_NUM;
|
|
last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
|
|
last_step %= REC_FAILED_NUM;
|
|
seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
|
|
"%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
|
|
"success", suspend_stats.success,
|
|
"fail", suspend_stats.fail,
|
|
"failed_freeze", suspend_stats.failed_freeze,
|
|
"failed_prepare", suspend_stats.failed_prepare,
|
|
"failed_suspend", suspend_stats.failed_suspend,
|
|
"failed_suspend_noirq",
|
|
suspend_stats.failed_suspend_noirq,
|
|
"failed_resume", suspend_stats.failed_resume,
|
|
"failed_resume_noirq",
|
|
suspend_stats.failed_resume_noirq);
|
|
seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
|
|
suspend_stats.failed_devs[last_dev]);
|
|
for (i = 1; i < REC_FAILED_NUM; i++) {
|
|
index = last_dev + REC_FAILED_NUM - i;
|
|
index %= REC_FAILED_NUM;
|
|
seq_printf(s, "\t\t\t%-s\n",
|
|
suspend_stats.failed_devs[index]);
|
|
}
|
|
seq_printf(s, " last_failed_errno:\t%-d\n",
|
|
suspend_stats.errno[last_errno]);
|
|
for (i = 1; i < REC_FAILED_NUM; i++) {
|
|
index = last_errno + REC_FAILED_NUM - i;
|
|
index %= REC_FAILED_NUM;
|
|
seq_printf(s, "\t\t\t%-d\n",
|
|
suspend_stats.errno[index]);
|
|
}
|
|
seq_printf(s, " last_failed_step:\t%-s\n",
|
|
suspend_step_name(
|
|
suspend_stats.failed_steps[last_step]));
|
|
for (i = 1; i < REC_FAILED_NUM; i++) {
|
|
index = last_step + REC_FAILED_NUM - i;
|
|
index %= REC_FAILED_NUM;
|
|
seq_printf(s, "\t\t\t%-s\n",
|
|
suspend_step_name(
|
|
suspend_stats.failed_steps[index]));
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int suspend_stats_open(struct inode *inode, struct file *file)
|
|
{
|
|
return single_open(file, suspend_stats_show, NULL);
|
|
}
|
|
|
|
static const struct file_operations suspend_stats_operations = {
|
|
.open = suspend_stats_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = single_release,
|
|
};
|
|
|
|
static int __init pm_debugfs_init(void)
|
|
{
|
|
debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
|
|
NULL, NULL, &suspend_stats_operations);
|
|
return 0;
|
|
}
|
|
|
|
late_initcall(pm_debugfs_init);
|
|
#endif /* CONFIG_DEBUG_FS */
|
|
|
|
#endif /* CONFIG_PM_SLEEP */
|
|
|
|
struct kobject *power_kobj;
|
|
|
|
/**
|
|
* state - control system power state.
|
|
*
|
|
* show() returns what states are supported, which is hard-coded to
|
|
* 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
|
|
* 'disk' (Suspend-to-Disk).
|
|
*
|
|
* store() accepts one of those strings, translates it into the
|
|
* proper enumerated value, and initiates a suspend transition.
|
|
*/
|
|
static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
|
|
char *buf)
|
|
{
|
|
char *s = buf;
|
|
#ifdef CONFIG_SUSPEND
|
|
int i;
|
|
|
|
for (i = 0; i < PM_SUSPEND_MAX; i++) {
|
|
if (pm_states[i] && valid_state(i))
|
|
s += sprintf(s,"%s ", pm_states[i]);
|
|
}
|
|
#endif
|
|
#ifdef CONFIG_HIBERNATION
|
|
s += sprintf(s, "%s\n", "disk");
|
|
#else
|
|
if (s != buf)
|
|
/* convert the last space to a newline */
|
|
*(s-1) = '\n';
|
|
#endif
|
|
return (s - buf);
|
|
}
|
|
|
|
static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
|
|
const char *buf, size_t n)
|
|
{
|
|
#ifdef CONFIG_SUSPEND
|
|
suspend_state_t state = PM_SUSPEND_STANDBY;
|
|
const char * const *s;
|
|
#endif
|
|
char *p;
|
|
int len;
|
|
int error = -EINVAL;
|
|
|
|
p = memchr(buf, '\n', n);
|
|
len = p ? p - buf : n;
|
|
|
|
/* First, check if we are requested to hibernate */
|
|
if (len == 4 && !strncmp(buf, "disk", len)) {
|
|
error = hibernate();
|
|
goto Exit;
|
|
}
|
|
|
|
#ifdef CONFIG_SUSPEND
|
|
for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) {
|
|
if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
|
|
break;
|
|
}
|
|
if (state < PM_SUSPEND_MAX && *s)
|
|
error = enter_state(state);
|
|
if (error) {
|
|
suspend_stats.fail++;
|
|
dpm_save_failed_errno(error);
|
|
} else
|
|
suspend_stats.success++;
|
|
#endif
|
|
|
|
Exit:
|
|
return error ? error : n;
|
|
}
|
|
|
|
power_attr(state);
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
/*
|
|
* The 'wakeup_count' attribute, along with the functions defined in
|
|
* drivers/base/power/wakeup.c, provides a means by which wakeup events can be
|
|
* handled in a non-racy way.
|
|
*
|
|
* If a wakeup event occurs when the system is in a sleep state, it simply is
|
|
* woken up. In turn, if an event that would wake the system up from a sleep
|
|
* state occurs when it is undergoing a transition to that sleep state, the
|
|
* transition should be aborted. Moreover, if such an event occurs when the
|
|
* system is in the working state, an attempt to start a transition to the
|
|
* given sleep state should fail during certain period after the detection of
|
|
* the event. Using the 'state' attribute alone is not sufficient to satisfy
|
|
* these requirements, because a wakeup event may occur exactly when 'state'
|
|
* is being written to and may be delivered to user space right before it is
|
|
* frozen, so the event will remain only partially processed until the system is
|
|
* woken up by another event. In particular, it won't cause the transition to
|
|
* a sleep state to be aborted.
|
|
*
|
|
* This difficulty may be overcome if user space uses 'wakeup_count' before
|
|
* writing to 'state'. It first should read from 'wakeup_count' and store
|
|
* the read value. Then, after carrying out its own preparations for the system
|
|
* transition to a sleep state, it should write the stored value to
|
|
* 'wakeup_count'. If that fails, at least one wakeup event has occurred since
|
|
* 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
|
|
* is allowed to write to 'state', but the transition will be aborted if there
|
|
* are any wakeup events detected after 'wakeup_count' was written to.
|
|
*/
|
|
|
|
static ssize_t wakeup_count_show(struct kobject *kobj,
|
|
struct kobj_attribute *attr,
|
|
char *buf)
|
|
{
|
|
unsigned int val;
|
|
|
|
return pm_get_wakeup_count(&val) ? sprintf(buf, "%u\n", val) : -EINTR;
|
|
}
|
|
|
|
static ssize_t wakeup_count_store(struct kobject *kobj,
|
|
struct kobj_attribute *attr,
|
|
const char *buf, size_t n)
|
|
{
|
|
unsigned int val;
|
|
|
|
if (sscanf(buf, "%u", &val) == 1) {
|
|
if (pm_save_wakeup_count(val))
|
|
return n;
|
|
}
|
|
return -EINVAL;
|
|
}
|
|
|
|
power_attr(wakeup_count);
|
|
#endif /* CONFIG_PM_SLEEP */
|
|
|
|
#ifdef CONFIG_PM_TRACE
|
|
int pm_trace_enabled;
|
|
|
|
static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr,
|
|
char *buf)
|
|
{
|
|
return sprintf(buf, "%d\n", pm_trace_enabled);
|
|
}
|
|
|
|
static ssize_t
|
|
pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
|
|
const char *buf, size_t n)
|
|
{
|
|
int val;
|
|
|
|
if (sscanf(buf, "%d", &val) == 1) {
|
|
pm_trace_enabled = !!val;
|
|
return n;
|
|
}
|
|
return -EINVAL;
|
|
}
|
|
|
|
power_attr(pm_trace);
|
|
|
|
static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
|
|
struct kobj_attribute *attr,
|
|
char *buf)
|
|
{
|
|
return show_trace_dev_match(buf, PAGE_SIZE);
|
|
}
|
|
|
|
static ssize_t
|
|
pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr,
|
|
const char *buf, size_t n)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
|
|
power_attr(pm_trace_dev_match);
|
|
|
|
#endif /* CONFIG_PM_TRACE */
|
|
|
|
static struct attribute * g[] = {
|
|
&state_attr.attr,
|
|
#ifdef CONFIG_PM_TRACE
|
|
&pm_trace_attr.attr,
|
|
&pm_trace_dev_match_attr.attr,
|
|
#endif
|
|
#ifdef CONFIG_PM_SLEEP
|
|
&pm_async_attr.attr,
|
|
&wakeup_count_attr.attr,
|
|
#ifdef CONFIG_PM_DEBUG
|
|
&pm_test_attr.attr,
|
|
#endif
|
|
#endif
|
|
NULL,
|
|
};
|
|
|
|
static struct attribute_group attr_group = {
|
|
.attrs = g,
|
|
};
|
|
|
|
#ifdef CONFIG_PM_RUNTIME
|
|
struct workqueue_struct *pm_wq;
|
|
EXPORT_SYMBOL_GPL(pm_wq);
|
|
|
|
static int __init pm_start_workqueue(void)
|
|
{
|
|
pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
|
|
|
|
return pm_wq ? 0 : -ENOMEM;
|
|
}
|
|
#else
|
|
static inline int pm_start_workqueue(void) { return 0; }
|
|
#endif
|
|
|
|
static int __init pm_init(void)
|
|
{
|
|
int error = pm_start_workqueue();
|
|
if (error)
|
|
return error;
|
|
hibernate_image_size_init();
|
|
hibernate_reserved_size_init();
|
|
power_kobj = kobject_create_and_add("power", NULL);
|
|
if (!power_kobj)
|
|
return -ENOMEM;
|
|
return sysfs_create_group(power_kobj, &attr_group);
|
|
}
|
|
|
|
core_initcall(pm_init);
|