Merge branch 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
* 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (76 commits) PM / Hibernate: Implement compat_ioctl for /dev/snapshot PM / Freezer: fix return value of freezable_schedule_timeout_killable() PM / shmobile: Allow the A4R domain to be turned off at run time PM / input / touchscreen: Make st1232 use device PM QoS constraints PM / QoS: Introduce dev_pm_qos_add_ancestor_request() PM / shmobile: Remove the stay_on flag from SH7372's PM domains PM / shmobile: Don't include SH7372's INTCS in syscore suspend/resume PM / shmobile: Add support for the sh7372 A4S power domain / sleep mode PM: Drop generic_subsys_pm_ops PM / Sleep: Remove forward-only callbacks from AMBA bus type PM / Sleep: Remove forward-only callbacks from platform bus type PM: Run the driver callback directly if the subsystem one is not there PM / Sleep: Make pm_op() and pm_noirq_op() return callback pointers PM/Devfreq: Add Exynos4-bus device DVFS driver for Exynos4210/4212/4412. PM / Sleep: Merge internal functions in generic_ops.c PM / Sleep: Simplify generic system suspend callbacks PM / Hibernate: Remove deprecated hibernation snapshot ioctls PM / Sleep: Fix freezer failures due to racy usermodehelper_is_disabled() ARM: S3C64XX: Implement basic power domain support PM / shmobile: Use common always on power domain governor ... Fix up trivial conflict in fs/xfs/xfs_buf.c due to removal of unused XBT_FORCE_SLEEP bit
This commit is contained in:
commit
eb59c505f8
107 changed files with 3252 additions and 1533 deletions
|
@ -85,17 +85,6 @@ Who: Robin Getz <rgetz@blackfin.uclinux.org> & Matt Mackall <mpm@selenic.com>
|
|||
|
||||
---------------------------
|
||||
|
||||
What: Deprecated snapshot ioctls
|
||||
When: 2.6.36
|
||||
|
||||
Why: The ioctls in kernel/power/user.c were marked as deprecated long time
|
||||
ago. Now they notify users about that so that they need to replace
|
||||
their userspace. After some more time, remove them completely.
|
||||
|
||||
Who: Jiri Slaby <jirislaby@gmail.com>
|
||||
|
||||
---------------------------
|
||||
|
||||
What: The ieee80211_regdom module parameter
|
||||
When: March 2010 / desktop catchup
|
||||
|
||||
|
|
|
@ -126,7 +126,9 @@ The core methods to suspend and resume devices reside in struct dev_pm_ops
|
|||
pointed to by the ops member of struct dev_pm_domain, or by the pm member of
|
||||
struct bus_type, struct device_type and struct class. They are mostly of
|
||||
interest to the people writing infrastructure for platforms and buses, like PCI
|
||||
or USB, or device type and device class drivers.
|
||||
or USB, or device type and device class drivers. They also are relevant to the
|
||||
writers of device drivers whose subsystems (PM domains, device types, device
|
||||
classes and bus types) don't provide all power management methods.
|
||||
|
||||
Bus drivers implement these methods as appropriate for the hardware and the
|
||||
drivers using it; PCI works differently from USB, and so on. Not many people
|
||||
|
@ -268,32 +270,35 @@ various phases always run after tasks have been frozen and before they are
|
|||
unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have
|
||||
been disabled (except for those marked with the IRQF_NO_SUSPEND flag).
|
||||
|
||||
All phases use PM domain, bus, type, or class callbacks (that is, methods
|
||||
defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, or dev->class->pm).
|
||||
These callbacks are regarded by the PM core as mutually exclusive. Moreover,
|
||||
PM domain callbacks always take precedence over bus, type and class callbacks,
|
||||
while type callbacks take precedence over bus and class callbacks, and class
|
||||
callbacks take precedence over bus callbacks. To be precise, the following
|
||||
rules are used to determine which callback to execute in the given phase:
|
||||
All phases use PM domain, bus, type, class or driver callbacks (that is, methods
|
||||
defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, dev->class->pm or
|
||||
dev->driver->pm). These callbacks are regarded by the PM core as mutually
|
||||
exclusive. Moreover, PM domain callbacks always take precedence over all of the
|
||||
other callbacks and, for example, type callbacks take precedence over bus, class
|
||||
and driver callbacks. To be precise, the following rules are used to determine
|
||||
which callback to execute in the given phase:
|
||||
|
||||
1. If dev->pm_domain is present, the PM core will attempt to execute the
|
||||
callback included in dev->pm_domain->ops. If that callback is not
|
||||
present, no action will be carried out for the given device.
|
||||
1. If dev->pm_domain is present, the PM core will choose the callback
|
||||
included in dev->pm_domain->ops for execution
|
||||
|
||||
2. Otherwise, if both dev->type and dev->type->pm are present, the callback
|
||||
included in dev->type->pm will be executed.
|
||||
included in dev->type->pm will be chosen for execution.
|
||||
|
||||
3. Otherwise, if both dev->class and dev->class->pm are present, the
|
||||
callback included in dev->class->pm will be executed.
|
||||
callback included in dev->class->pm will be chosen for execution.
|
||||
|
||||
4. Otherwise, if both dev->bus and dev->bus->pm are present, the callback
|
||||
included in dev->bus->pm will be executed.
|
||||
included in dev->bus->pm will be chosen for execution.
|
||||
|
||||
This allows PM domains and device types to override callbacks provided by bus
|
||||
types or device classes if necessary.
|
||||
|
||||
These callbacks may in turn invoke device- or driver-specific methods stored in
|
||||
dev->driver->pm, but they don't have to.
|
||||
The PM domain, type, class and bus callbacks may in turn invoke device- or
|
||||
driver-specific methods stored in dev->driver->pm, but they don't have to do
|
||||
that.
|
||||
|
||||
If the subsystem callback chosen for execution is not present, the PM core will
|
||||
execute the corresponding method from dev->driver->pm instead if there is one.
|
||||
|
||||
|
||||
Entering System Suspend
|
||||
|
|
|
@ -21,7 +21,7 @@ freeze_processes() (defined in kernel/power/process.c) is called. It executes
|
|||
try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
|
||||
either wakes them up, if they are kernel threads, or sends fake signals to them,
|
||||
if they are user space processes. A task that has TIF_FREEZE set, should react
|
||||
to it by calling the function called refrigerator() (defined in
|
||||
to it by calling the function called __refrigerator() (defined in
|
||||
kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
|
||||
to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
|
||||
Then, we say that the task is 'frozen' and therefore the set of functions
|
||||
|
@ -29,10 +29,10 @@ handling this mechanism is referred to as 'the freezer' (these functions are
|
|||
defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
|
||||
User space processes are generally frozen before kernel threads.
|
||||
|
||||
It is not recommended to call refrigerator() directly. Instead, it is
|
||||
recommended to use the try_to_freeze() function (defined in
|
||||
include/linux/freezer.h), that checks the task's TIF_FREEZE flag and makes the
|
||||
task enter refrigerator() if the flag is set.
|
||||
__refrigerator() must not be called directly. Instead, use the
|
||||
try_to_freeze() function (defined in include/linux/freezer.h), that checks
|
||||
the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the
|
||||
flag is set.
|
||||
|
||||
For user space processes try_to_freeze() is called automatically from the
|
||||
signal-handling code, but the freezable kernel threads need to call it
|
||||
|
@ -61,13 +61,13 @@ wait_event_freezable() and wait_event_freezable_timeout() macros.
|
|||
After the system memory state has been restored from a hibernation image and
|
||||
devices have been reinitialized, the function thaw_processes() is called in
|
||||
order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that
|
||||
have been frozen leave refrigerator() and continue running.
|
||||
have been frozen leave __refrigerator() and continue running.
|
||||
|
||||
III. Which kernel threads are freezable?
|
||||
|
||||
Kernel threads are not freezable by default. However, a kernel thread may clear
|
||||
PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE
|
||||
directly is strongly discouraged). From this point it is regarded as freezable
|
||||
directly is not allowed). From this point it is regarded as freezable
|
||||
and must call try_to_freeze() in a suitable place.
|
||||
|
||||
IV. Why do we do that?
|
||||
|
@ -176,3 +176,28 @@ tasks, since it generally exists anyway.
|
|||
A driver must have all firmwares it may need in RAM before suspend() is called.
|
||||
If keeping them is not practical, for example due to their size, they must be
|
||||
requested early enough using the suspend notifier API described in notifiers.txt.
|
||||
|
||||
VI. Are there any precautions to be taken to prevent freezing failures?
|
||||
|
||||
Yes, there are.
|
||||
|
||||
First of all, grabbing the 'pm_mutex' lock to mutually exclude a piece of code
|
||||
from system-wide sleep such as suspend/hibernation is not encouraged.
|
||||
If possible, that piece of code must instead hook onto the suspend/hibernation
|
||||
notifiers to achieve mutual exclusion. Look at the CPU-Hotplug code
|
||||
(kernel/cpu.c) for an example.
|
||||
|
||||
However, if that is not feasible, and grabbing 'pm_mutex' is deemed necessary,
|
||||
it is strongly discouraged to directly call mutex_[un]lock(&pm_mutex) since
|
||||
that could lead to freezing failures, because if the suspend/hibernate code
|
||||
successfully acquired the 'pm_mutex' lock, and hence that other entity failed
|
||||
to acquire the lock, then that task would get blocked in TASK_UNINTERRUPTIBLE
|
||||
state. As a consequence, the freezer would not be able to freeze that task,
|
||||
leading to freezing failure.
|
||||
|
||||
However, the [un]lock_system_sleep() APIs are safe to use in this scenario,
|
||||
since they ask the freezer to skip freezing this task, since it is anyway
|
||||
"frozen enough" as it is blocked on 'pm_mutex', which will be released
|
||||
only after the entire suspend/hibernation sequence is complete.
|
||||
So, to summarize, use [un]lock_system_sleep() instead of directly using
|
||||
mutex_[un]lock(&pm_mutex). That would prevent freezing failures.
|
||||
|
|
|
@ -57,6 +57,10 @@ the following:
|
|||
|
||||
4. Bus type of the device, if both dev->bus and dev->bus->pm are present.
|
||||
|
||||
If the subsystem chosen by applying the above rules doesn't provide the relevant
|
||||
callback, the PM core will invoke the corresponding driver callback stored in
|
||||
dev->driver->pm directly (if present).
|
||||
|
||||
The PM core always checks which callback to use in the order given above, so the
|
||||
priority order of callbacks from high to low is: PM domain, device type, class
|
||||
and bus type. Moreover, the high-priority one will always take precedence over
|
||||
|
@ -64,86 +68,88 @@ a low-priority one. The PM domain, bus type, device type and class callbacks
|
|||
are referred to as subsystem-level callbacks in what follows.
|
||||
|
||||
By default, the callbacks are always invoked in process context with interrupts
|
||||
enabled. However, subsystems can use the pm_runtime_irq_safe() helper function
|
||||
to tell the PM core that their ->runtime_suspend(), ->runtime_resume() and
|
||||
->runtime_idle() callbacks may be invoked in atomic context with interrupts
|
||||
disabled for a given device. This implies that the callback routines in
|
||||
question must not block or sleep, but it also means that the synchronous helper
|
||||
functions listed at the end of Section 4 may be used for that device within an
|
||||
interrupt handler or generally in an atomic context.
|
||||
enabled. However, the pm_runtime_irq_safe() helper function can be used to tell
|
||||
the PM core that it is safe to run the ->runtime_suspend(), ->runtime_resume()
|
||||
and ->runtime_idle() callbacks for the given device in atomic context with
|
||||
interrupts disabled. This implies that the callback routines in question must
|
||||
not block or sleep, but it also means that the synchronous helper functions
|
||||
listed at the end of Section 4 may be used for that device within an interrupt
|
||||
handler or generally in an atomic context.
|
||||
|
||||
The subsystem-level suspend callback is _entirely_ _responsible_ for handling
|
||||
the suspend of the device as appropriate, which may, but need not include
|
||||
executing the device driver's own ->runtime_suspend() callback (from the
|
||||
The subsystem-level suspend callback, if present, is _entirely_ _responsible_
|
||||
for handling the suspend of the device as appropriate, which may, but need not
|
||||
include executing the device driver's own ->runtime_suspend() callback (from the
|
||||
PM core's point of view it is not necessary to implement a ->runtime_suspend()
|
||||
callback in a device driver as long as the subsystem-level suspend callback
|
||||
knows what to do to handle the device).
|
||||
|
||||
* Once the subsystem-level suspend callback has completed successfully
|
||||
for given device, the PM core regards the device as suspended, which need
|
||||
not mean that the device has been put into a low power state. It is
|
||||
supposed to mean, however, that the device will not process data and will
|
||||
not communicate with the CPU(s) and RAM until the subsystem-level resume
|
||||
callback is executed for it. The runtime PM status of a device after
|
||||
successful execution of the subsystem-level suspend callback is 'suspended'.
|
||||
* Once the subsystem-level suspend callback (or the driver suspend callback,
|
||||
if invoked directly) has completed successfully for the given device, the PM
|
||||
core regards the device as suspended, which need not mean that it has been
|
||||
put into a low power state. It is supposed to mean, however, that the
|
||||
device will not process data and will not communicate with the CPU(s) and
|
||||
RAM until the appropriate resume callback is executed for it. The runtime
|
||||
PM status of a device after successful execution of the suspend callback is
|
||||
'suspended'.
|
||||
|
||||
* If the subsystem-level suspend callback returns -EBUSY or -EAGAIN,
|
||||
the device's runtime PM status is 'active', which means that the device
|
||||
_must_ be fully operational afterwards.
|
||||
* If the suspend callback returns -EBUSY or -EAGAIN, the device's runtime PM
|
||||
status remains 'active', which means that the device _must_ be fully
|
||||
operational afterwards.
|
||||
|
||||
* If the subsystem-level suspend callback returns an error code different
|
||||
from -EBUSY or -EAGAIN, the PM core regards this as a fatal error and will
|
||||
refuse to run the helper functions described in Section 4 for the device,
|
||||
until the status of it is directly set either to 'active', or to 'suspended'
|
||||
(the PM core provides special helper functions for this purpose).
|
||||
* If the suspend callback returns an error code different from -EBUSY and
|
||||
-EAGAIN, the PM core regards this as a fatal error and will refuse to run
|
||||
the helper functions described in Section 4 for the device until its status
|
||||
is directly set to either'active', or 'suspended' (the PM core provides
|
||||
special helper functions for this purpose).
|
||||
|
||||
In particular, if the driver requires remote wake-up capability (i.e. hardware
|
||||
In particular, if the driver requires remote wakeup capability (i.e. hardware
|
||||
mechanism allowing the device to request a change of its power state, such as
|
||||
PCI PME) for proper functioning and device_run_wake() returns 'false' for the
|
||||
device, then ->runtime_suspend() should return -EBUSY. On the other hand, if
|
||||
device_run_wake() returns 'true' for the device and the device is put into a low
|
||||
power state during the execution of the subsystem-level suspend callback, it is
|
||||
expected that remote wake-up will be enabled for the device. Generally, remote
|
||||
wake-up should be enabled for all input devices put into a low power state at
|
||||
run time.
|
||||
device_run_wake() returns 'true' for the device and the device is put into a
|
||||
low-power state during the execution of the suspend callback, it is expected
|
||||
that remote wakeup will be enabled for the device. Generally, remote wakeup
|
||||
should be enabled for all input devices put into low-power states at run time.
|
||||
|
||||
The subsystem-level resume callback is _entirely_ _responsible_ for handling the
|
||||
resume of the device as appropriate, which may, but need not include executing
|
||||
the device driver's own ->runtime_resume() callback (from the PM core's point of
|
||||
view it is not necessary to implement a ->runtime_resume() callback in a device
|
||||
driver as long as the subsystem-level resume callback knows what to do to handle
|
||||
the device).
|
||||
The subsystem-level resume callback, if present, is _entirely_ _responsible_ for
|
||||
handling the resume of the device as appropriate, which may, but need not
|
||||
include executing the device driver's own ->runtime_resume() callback (from the
|
||||
PM core's point of view it is not necessary to implement a ->runtime_resume()
|
||||
callback in a device driver as long as the subsystem-level resume callback knows
|
||||
what to do to handle the device).
|
||||
|
||||
* Once the subsystem-level resume callback has completed successfully, the PM
|
||||
core regards the device as fully operational, which means that the device
|
||||
_must_ be able to complete I/O operations as needed. The runtime PM status
|
||||
of the device is then 'active'.
|
||||
* Once the subsystem-level resume callback (or the driver resume callback, if
|
||||
invoked directly) has completed successfully, the PM core regards the device
|
||||
as fully operational, which means that the device _must_ be able to complete
|
||||
I/O operations as needed. The runtime PM status of the device is then
|
||||
'active'.
|
||||
|
||||
* If the subsystem-level resume callback returns an error code, the PM core
|
||||
regards this as a fatal error and will refuse to run the helper functions
|
||||
described in Section 4 for the device, until its status is directly set
|
||||
either to 'active' or to 'suspended' (the PM core provides special helper
|
||||
functions for this purpose).
|
||||
* If the resume callback returns an error code, the PM core regards this as a
|
||||
fatal error and will refuse to run the helper functions described in Section
|
||||
4 for the device, until its status is directly set to either 'active', or
|
||||
'suspended' (by means of special helper functions provided by the PM core
|
||||
for this purpose).
|
||||
|
||||
The subsystem-level idle callback is executed by the PM core whenever the device
|
||||
appears to be idle, which is indicated to the PM core by two counters, the
|
||||
device's usage counter and the counter of 'active' children of the device.
|
||||
The idle callback (a subsystem-level one, if present, or the driver one) is
|
||||
executed by the PM core whenever the device appears to be idle, which is
|
||||
indicated to the PM core by two counters, the device's usage counter and the
|
||||
counter of 'active' children of the device.
|
||||
|
||||
* If any of these counters is decreased using a helper function provided by
|
||||
the PM core and it turns out to be equal to zero, the other counter is
|
||||
checked. If that counter also is equal to zero, the PM core executes the
|
||||
subsystem-level idle callback with the device as an argument.
|
||||
idle callback with the device as its argument.
|
||||
|
||||
The action performed by a subsystem-level idle callback is totally dependent on
|
||||
the subsystem in question, but the expected and recommended action is to check
|
||||
The action performed by the idle callback is totally dependent on the subsystem
|
||||
(or driver) in question, but the expected and recommended action is to check
|
||||
if the device can be suspended (i.e. if all of the conditions necessary for
|
||||
suspending the device are satisfied) and to queue up a suspend request for the
|
||||
device in that case. The value returned by this callback is ignored by the PM
|
||||
core.
|
||||
|
||||
The helper functions provided by the PM core, described in Section 4, guarantee
|
||||
that the following constraints are met with respect to the bus type's runtime
|
||||
PM callbacks:
|
||||
that the following constraints are met with respect to runtime PM callbacks for
|
||||
one device:
|
||||
|
||||
(1) The callbacks are mutually exclusive (e.g. it is forbidden to execute
|
||||
->runtime_suspend() in parallel with ->runtime_resume() or with another
|
||||
|
|
|
@ -79,7 +79,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
|
|||
#define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */
|
||||
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */
|
||||
#define TIF_FREEZE 16 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
|
@ -87,7 +86,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
|
|||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
/* Work to do on interrupt/exception return. */
|
||||
#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
||||
|
|
|
@ -142,7 +142,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
|
|||
#define TIF_POLLING_NRFLAG 16
|
||||
#define TIF_USING_IWMMXT 17
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 19
|
||||
#define TIF_RESTORE_SIGMASK 20
|
||||
#define TIF_SECCOMP 21
|
||||
|
||||
|
@ -152,7 +151,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
|
|||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ config PLAT_S3C64XX
|
|||
bool
|
||||
depends on ARCH_S3C64XX
|
||||
select SAMSUNG_WAKEMASK
|
||||
select PM_GENERIC_DOMAINS
|
||||
default y
|
||||
help
|
||||
Base platform code for any Samsung S3C64XX device
|
||||
|
|
|
@ -706,7 +706,7 @@ static void __init crag6410_machine_init(void)
|
|||
|
||||
regulator_has_full_constraints();
|
||||
|
||||
s3c_pm_init();
|
||||
s3c64xx_pm_init();
|
||||
}
|
||||
|
||||
MACHINE_START(WLF_CRAGG_6410, "Wolfson Cragganmore 6410")
|
||||
|
|
|
@ -17,10 +17,12 @@
|
|||
#include <linux/serial_core.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/pm_domain.h>
|
||||
|
||||
#include <mach/map.h>
|
||||
#include <mach/irqs.h>
|
||||
|
||||
#include <plat/devs.h>
|
||||
#include <plat/pm.h>
|
||||
#include <plat/wakeup-mask.h>
|
||||
|
||||
|
@ -31,6 +33,148 @@
|
|||
#include <mach/regs-gpio-memport.h>
|
||||
#include <mach/regs-modem.h>
|
||||
|
||||
struct s3c64xx_pm_domain {
|
||||
char *const name;
|
||||
u32 ena;
|
||||
u32 pwr_stat;
|
||||
struct generic_pm_domain pd;
|
||||
};
|
||||
|
||||
static int s3c64xx_pd_off(struct generic_pm_domain *domain)
|
||||
{
|
||||
struct s3c64xx_pm_domain *pd;
|
||||
u32 val;
|
||||
|
||||
pd = container_of(domain, struct s3c64xx_pm_domain, pd);
|
||||
|
||||
val = __raw_readl(S3C64XX_NORMAL_CFG);
|
||||
val &= ~(pd->ena);
|
||||
__raw_writel(val, S3C64XX_NORMAL_CFG);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int s3c64xx_pd_on(struct generic_pm_domain *domain)
|
||||
{
|
||||
struct s3c64xx_pm_domain *pd;
|
||||
u32 val;
|
||||
long retry = 1000000L;
|
||||
|
||||
pd = container_of(domain, struct s3c64xx_pm_domain, pd);
|
||||
|
||||
val = __raw_readl(S3C64XX_NORMAL_CFG);
|
||||
val |= pd->ena;
|
||||
__raw_writel(val, S3C64XX_NORMAL_CFG);
|
||||
|
||||
/* Not all domains provide power status readback */
|
||||
if (pd->pwr_stat) {
|
||||
do {
|
||||
cpu_relax();
|
||||
if (__raw_readl(S3C64XX_BLK_PWR_STAT) & pd->pwr_stat)
|
||||
break;
|
||||
} while (retry--);
|
||||
|
||||
if (!retry) {
|
||||
pr_err("Failed to start domain %s\n", pd->name);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct s3c64xx_pm_domain s3c64xx_pm_irom = {
|
||||
.name = "IROM",
|
||||
.ena = S3C64XX_NORMALCFG_IROM_ON,
|
||||
.pd = {
|
||||
.power_off = s3c64xx_pd_off,
|
||||
.power_on = s3c64xx_pd_on,
|
||||
},
|
||||
};
|
||||
|
||||
static struct s3c64xx_pm_domain s3c64xx_pm_etm = {
|
||||
.name = "ETM",
|
||||
.ena = S3C64XX_NORMALCFG_DOMAIN_ETM_ON,
|
||||
.pwr_stat = S3C64XX_BLKPWRSTAT_ETM,
|
||||
.pd = {
|
||||
.power_off = s3c64xx_pd_off,
|
||||
.power_on = s3c64xx_pd_on,
|
||||
},
|
||||
};
|
||||
|
||||
static struct s3c64xx_pm_domain s3c64xx_pm_s = {
|
||||
.name = "S",
|
||||
.ena = S3C64XX_NORMALCFG_DOMAIN_S_ON,
|
||||
.pwr_stat = S3C64XX_BLKPWRSTAT_S,
|
||||
.pd = {
|
||||
.power_off = s3c64xx_pd_off,
|
||||
.power_on = s3c64xx_pd_on,
|
||||
},
|
||||
};
|
||||
|
||||
static struct s3c64xx_pm_domain s3c64xx_pm_f = {
|
||||
.name = "F",
|
||||
.ena = S3C64XX_NORMALCFG_DOMAIN_F_ON,
|
||||
.pwr_stat = S3C64XX_BLKPWRSTAT_F,
|
||||
.pd = {
|
||||
.power_off = s3c64xx_pd_off,
|
||||
.power_on = s3c64xx_pd_on,
|
||||
},
|
||||
};
|
||||
|
||||
static struct s3c64xx_pm_domain s3c64xx_pm_p = {
|
||||
.name = "P",
|
||||
.ena = S3C64XX_NORMALCFG_DOMAIN_P_ON,
|
||||
.pwr_stat = S3C64XX_BLKPWRSTAT_P,
|
||||
.pd = {
|
||||
.power_off = s3c64xx_pd_off,
|
||||
.power_on = s3c64xx_pd_on,
|
||||
},
|
||||
};
|
||||
|
||||
static struct s3c64xx_pm_domain s3c64xx_pm_i = {
|
||||
.name = "I",
|
||||
.ena = S3C64XX_NORMALCFG_DOMAIN_I_ON,
|
||||
.pwr_stat = S3C64XX_BLKPWRSTAT_I,
|
||||
.pd = {
|
||||
.power_off = s3c64xx_pd_off,
|
||||
.power_on = s3c64xx_pd_on,
|
||||
},
|
||||
};
|
||||
|
||||
static struct s3c64xx_pm_domain s3c64xx_pm_g = {
|
||||
.name = "G",
|
||||
.ena = S3C64XX_NORMALCFG_DOMAIN_G_ON,
|
||||
.pd = {
|
||||
.power_off = s3c64xx_pd_off,
|
||||
.power_on = s3c64xx_pd_on,
|
||||
},
|
||||
};
|
||||
|
||||
static struct s3c64xx_pm_domain s3c64xx_pm_v = {
|
||||
.name = "V",
|
||||
.ena = S3C64XX_NORMALCFG_DOMAIN_V_ON,
|
||||
.pwr_stat = S3C64XX_BLKPWRSTAT_V,
|
||||
.pd = {
|
||||
.power_off = s3c64xx_pd_off,
|
||||
.power_on = s3c64xx_pd_on,
|
||||
},
|
||||
};
|
||||
|
||||
static struct s3c64xx_pm_domain *s3c64xx_always_on_pm_domains[] = {
|
||||
&s3c64xx_pm_irom,
|
||||
};
|
||||
|
||||
static struct s3c64xx_pm_domain *s3c64xx_pm_domains[] = {
|
||||
&s3c64xx_pm_etm,
|
||||
&s3c64xx_pm_g,
|
||||
&s3c64xx_pm_v,
|
||||
&s3c64xx_pm_i,
|
||||
&s3c64xx_pm_p,
|
||||
&s3c64xx_pm_s,
|
||||
&s3c64xx_pm_f,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
|
||||
void s3c_pm_debug_smdkled(u32 set, u32 clear)
|
||||
{
|
||||
|
@ -89,6 +233,8 @@ static struct sleep_save misc_save[] = {
|
|||
|
||||
SAVE_ITEM(S3C64XX_SDMA_SEL),
|
||||
SAVE_ITEM(S3C64XX_MODEM_MIFPCON),
|
||||
|
||||
SAVE_ITEM(S3C64XX_NORMAL_CFG),
|
||||
};
|
||||
|
||||
void s3c_pm_configure_extint(void)
|
||||
|
@ -179,7 +325,26 @@ static void s3c64xx_pm_prepare(void)
|
|||
__raw_writel(__raw_readl(S3C64XX_WAKEUP_STAT), S3C64XX_WAKEUP_STAT);
|
||||
}
|
||||
|
||||
static int s3c64xx_pm_init(void)
|
||||
int __init s3c64xx_pm_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
s3c_pm_init();
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s3c64xx_always_on_pm_domains); i++)
|
||||
pm_genpd_init(&s3c64xx_always_on_pm_domains[i]->pd,
|
||||
&pm_domain_always_on_gov, false);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s3c64xx_pm_domains); i++)
|
||||
pm_genpd_init(&s3c64xx_pm_domains[i]->pd, NULL, false);
|
||||
|
||||
if (dev_get_platdata(&s3c_device_fb.dev))
|
||||
pm_genpd_add_device(&s3c64xx_pm_f.pd, &s3c_device_fb.dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __init int s3c64xx_pm_initcall(void)
|
||||
{
|
||||
pm_cpu_prep = s3c64xx_pm_prepare;
|
||||
pm_cpu_sleep = s3c64xx_cpu_suspend;
|
||||
|
@ -198,5 +363,12 @@ static int s3c64xx_pm_init(void)
|
|||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(s3c64xx_pm_initcall);
|
||||
|
||||
arch_initcall(s3c64xx_pm_init);
|
||||
static __init int s3c64xx_pm_late_initcall(void)
|
||||
{
|
||||
pm_genpd_poweroff_unused();
|
||||
|
||||
return 0;
|
||||
}
|
||||
late_initcall(s3c64xx_pm_late_initcall);
|
||||
|
|
|
@ -34,8 +34,8 @@ extern void sh7372_add_standard_devices(void);
|
|||
extern void sh7372_clock_init(void);
|
||||
extern void sh7372_pinmux_init(void);
|
||||
extern void sh7372_pm_init(void);
|
||||
extern void sh7372_resume_core_standby_a3sm(void);
|
||||
extern int sh7372_do_idle_a3sm(unsigned long unused);
|
||||
extern void sh7372_resume_core_standby_sysc(void);
|
||||
extern int sh7372_do_idle_sysc(unsigned long sleep_mode);
|
||||
extern struct clk sh7372_extal1_clk;
|
||||
extern struct clk sh7372_extal2_clk;
|
||||
|
||||
|
|
|
@ -480,11 +480,10 @@ struct platform_device;
|
|||
struct sh7372_pm_domain {
|
||||
struct generic_pm_domain genpd;
|
||||
struct dev_power_governor *gov;
|
||||
void (*suspend)(void);
|
||||
int (*suspend)(void);
|
||||
void (*resume)(void);
|
||||
unsigned int bit_shift;
|
||||
bool no_debug;
|
||||
bool stay_on;
|
||||
};
|
||||
|
||||
static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d)
|
||||
|
@ -499,6 +498,7 @@ extern struct sh7372_pm_domain sh7372_d4;
|
|||
extern struct sh7372_pm_domain sh7372_a4r;
|
||||
extern struct sh7372_pm_domain sh7372_a3rv;
|
||||
extern struct sh7372_pm_domain sh7372_a3ri;
|
||||
extern struct sh7372_pm_domain sh7372_a4s;
|
||||
extern struct sh7372_pm_domain sh7372_a3sp;
|
||||
extern struct sh7372_pm_domain sh7372_a3sg;
|
||||
|
||||
|
@ -515,5 +515,7 @@ extern void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
|
|||
|
||||
extern void sh7372_intcs_suspend(void);
|
||||
extern void sh7372_intcs_resume(void);
|
||||
extern void sh7372_intca_suspend(void);
|
||||
extern void sh7372_intca_resume(void);
|
||||
|
||||
#endif /* __ASM_SH7372_H__ */
|
||||
|
|
|
@ -535,6 +535,7 @@ static struct resource intcs_resources[] __initdata = {
|
|||
static struct intc_desc intcs_desc __initdata = {
|
||||
.name = "sh7372-intcs",
|
||||
.force_enable = ENABLED_INTCS,
|
||||
.skip_syscore_suspend = true,
|
||||
.resource = intcs_resources,
|
||||
.num_resources = ARRAY_SIZE(intcs_resources),
|
||||
.hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers,
|
||||
|
@ -611,3 +612,52 @@ void sh7372_intcs_resume(void)
|
|||
for (k = 0x80; k <= 0x9c; k += 4)
|
||||
__raw_writeb(ffd5[k], intcs_ffd5 + k);
|
||||
}
|
||||
|
||||
static unsigned short e694[0x200];
|
||||
static unsigned short e695[0x200];
|
||||
|
||||
void sh7372_intca_suspend(void)
|
||||
{
|
||||
int k;
|
||||
|
||||
for (k = 0x00; k <= 0x38; k += 4)
|
||||
e694[k] = __raw_readw(0xe6940000 + k);
|
||||
|
||||
for (k = 0x80; k <= 0xb4; k += 4)
|
||||
e694[k] = __raw_readb(0xe6940000 + k);
|
||||
|
||||
for (k = 0x180; k <= 0x1b4; k += 4)
|
||||
e694[k] = __raw_readb(0xe6940000 + k);
|
||||
|
||||
for (k = 0x00; k <= 0x50; k += 4)
|
||||
e695[k] = __raw_readw(0xe6950000 + k);
|
||||
|
||||
for (k = 0x80; k <= 0xa8; k += 4)
|
||||
e695[k] = __raw_readb(0xe6950000 + k);
|
||||
|
||||
for (k = 0x180; k <= 0x1a8; k += 4)
|
||||
e695[k] = __raw_readb(0xe6950000 + k);
|
||||
}
|
||||
|
||||
void sh7372_intca_resume(void)
|
||||
{
|
||||
int k;
|
||||
|
||||
for (k = 0x00; k <= 0x38; k += 4)
|
||||
__raw_writew(e694[k], 0xe6940000 + k);
|
||||
|
||||
for (k = 0x80; k <= 0xb4; k += 4)
|
||||
__raw_writeb(e694[k], 0xe6940000 + k);
|
||||
|
||||
for (k = 0x180; k <= 0x1b4; k += 4)
|
||||
__raw_writeb(e694[k], 0xe6940000 + k);
|
||||
|
||||
for (k = 0x00; k <= 0x50; k += 4)
|
||||
__raw_writew(e695[k], 0xe6950000 + k);
|
||||
|
||||
for (k = 0x80; k <= 0xa8; k += 4)
|
||||
__raw_writeb(e695[k], 0xe6950000 + k);
|
||||
|
||||
for (k = 0x180; k <= 0x1a8; k += 4)
|
||||
__raw_writeb(e695[k], 0xe6950000 + k);
|
||||
}
|
||||
|
|
|
@ -82,11 +82,12 @@ static int pd_power_down(struct generic_pm_domain *genpd)
|
|||
struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
|
||||
unsigned int mask = 1 << sh7372_pd->bit_shift;
|
||||
|
||||
if (sh7372_pd->suspend)
|
||||
sh7372_pd->suspend();
|
||||
if (sh7372_pd->suspend) {
|
||||
int ret = sh7372_pd->suspend();
|
||||
|
||||
if (sh7372_pd->stay_on)
|
||||
return 0;
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (__raw_readl(PSTR) & mask) {
|
||||
unsigned int retry_count;
|
||||
|
@ -101,8 +102,8 @@ static int pd_power_down(struct generic_pm_domain *genpd)
|
|||
}
|
||||
|
||||
if (!sh7372_pd->no_debug)
|
||||
pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n",
|
||||
mask, __raw_readl(PSTR));
|
||||
pr_debug("%s: Power off, 0x%08x -> PSTR = 0x%08x\n",
|
||||
genpd->name, mask, __raw_readl(PSTR));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -113,9 +114,6 @@ static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume)
|
|||
unsigned int retry_count;
|
||||
int ret = 0;
|
||||
|
||||
if (sh7372_pd->stay_on)
|
||||
goto out;
|
||||
|
||||
if (__raw_readl(PSTR) & mask)
|
||||
goto out;
|
||||
|
||||
|
@ -133,8 +131,8 @@ static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume)
|
|||
ret = -EIO;
|
||||
|
||||
if (!sh7372_pd->no_debug)
|
||||
pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n",
|
||||
mask, __raw_readl(PSTR));
|
||||
pr_debug("%s: Power on, 0x%08x -> PSTR = 0x%08x\n",
|
||||
sh7372_pd->genpd.name, mask, __raw_readl(PSTR));
|
||||
|
||||
out:
|
||||
if (ret == 0 && sh7372_pd->resume && do_resume)
|
||||
|
@ -148,35 +146,60 @@ static int pd_power_up(struct generic_pm_domain *genpd)
|
|||
return __pd_power_up(to_sh7372_pd(genpd), true);
|
||||
}
|
||||
|
||||
static void sh7372_a4r_suspend(void)
|
||||
static int sh7372_a4r_suspend(void)
|
||||
{
|
||||
sh7372_intcs_suspend();
|
||||
__raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool pd_active_wakeup(struct device *dev)
|
||||
{
|
||||
return true;
|
||||
bool (*active_wakeup)(struct device *dev);
|
||||
|
||||
active_wakeup = dev_gpd_data(dev)->ops.active_wakeup;
|
||||
return active_wakeup ? active_wakeup(dev) : true;
|
||||
}
|
||||
|
||||
static bool sh7372_power_down_forbidden(struct dev_pm_domain *domain)
|
||||
static int sh7372_stop_dev(struct device *dev)
|
||||
{
|
||||
return false;
|
||||
int (*stop)(struct device *dev);
|
||||
|
||||
stop = dev_gpd_data(dev)->ops.stop;
|
||||
if (stop) {
|
||||
int ret = stop(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return pm_clk_suspend(dev);
|
||||
}
|
||||
|
||||
struct dev_power_governor sh7372_always_on_gov = {
|
||||
.power_down_ok = sh7372_power_down_forbidden,
|
||||
};
|
||||
static int sh7372_start_dev(struct device *dev)
|
||||
{
|
||||
int (*start)(struct device *dev);
|
||||
int ret;
|
||||
|
||||
ret = pm_clk_resume(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
start = dev_gpd_data(dev)->ops.start;
|
||||
if (start)
|
||||
ret = start(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
|
||||
{
|
||||
struct generic_pm_domain *genpd = &sh7372_pd->genpd;
|
||||
struct dev_power_governor *gov = sh7372_pd->gov;
|
||||
|
||||
pm_genpd_init(genpd, sh7372_pd->gov, false);
|
||||
genpd->stop_device = pm_clk_suspend;
|
||||
genpd->start_device = pm_clk_resume;
|
||||
pm_genpd_init(genpd, gov ? : &simple_qos_governor, false);
|
||||
genpd->dev_ops.stop = sh7372_stop_dev;
|
||||
genpd->dev_ops.start = sh7372_start_dev;
|
||||
genpd->dev_ops.active_wakeup = pd_active_wakeup;
|
||||
genpd->dev_irq_safe = true;
|
||||
genpd->active_wakeup = pd_active_wakeup;
|
||||
genpd->power_off = pd_power_down;
|
||||
genpd->power_on = pd_power_up;
|
||||
__pd_power_up(sh7372_pd, false);
|
||||
|
@ -199,48 +222,73 @@ void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
|
|||
}
|
||||
|
||||
struct sh7372_pm_domain sh7372_a4lc = {
|
||||
.genpd.name = "A4LC",
|
||||
.bit_shift = 1,
|
||||
};
|
||||
|
||||
struct sh7372_pm_domain sh7372_a4mp = {
|
||||
.genpd.name = "A4MP",
|
||||
.bit_shift = 2,
|
||||
};
|
||||
|
||||
struct sh7372_pm_domain sh7372_d4 = {
|
||||
.genpd.name = "D4",
|
||||
.bit_shift = 3,
|
||||
};
|
||||
|
||||
struct sh7372_pm_domain sh7372_a4r = {
|
||||
.genpd.name = "A4R",
|
||||
.bit_shift = 5,
|
||||
.gov = &sh7372_always_on_gov,
|
||||
.suspend = sh7372_a4r_suspend,
|
||||
.resume = sh7372_intcs_resume,
|
||||
.stay_on = true,
|
||||
};
|
||||
|
||||
struct sh7372_pm_domain sh7372_a3rv = {
|
||||
.genpd.name = "A3RV",
|
||||
.bit_shift = 6,
|
||||
};
|
||||
|
||||
struct sh7372_pm_domain sh7372_a3ri = {
|
||||
.genpd.name = "A3RI",
|
||||
.bit_shift = 8,
|
||||
};
|
||||
|
||||
struct sh7372_pm_domain sh7372_a3sp = {
|
||||
.bit_shift = 11,
|
||||
.gov = &sh7372_always_on_gov,
|
||||
.no_debug = true,
|
||||
};
|
||||
|
||||
static void sh7372_a3sp_init(void)
|
||||
static int sh7372_a4s_suspend(void)
|
||||
{
|
||||
/* serial consoles make use of SCIF hardware located in A3SP,
|
||||
* keep such power domain on if "no_console_suspend" is set.
|
||||
/*
|
||||
* The A4S domain contains the CPU core and therefore it should
|
||||
* only be turned off if the CPU is in use.
|
||||
*/
|
||||
sh7372_a3sp.stay_on = !console_suspend_enabled;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
struct sh7372_pm_domain sh7372_a4s = {
|
||||
.genpd.name = "A4S",
|
||||
.bit_shift = 10,
|
||||
.gov = &pm_domain_always_on_gov,
|
||||
.no_debug = true,
|
||||
.suspend = sh7372_a4s_suspend,
|
||||
};
|
||||
|
||||
static int sh7372_a3sp_suspend(void)
|
||||
{
|
||||
/*
|
||||
* Serial consoles make use of SCIF hardware located in A3SP,
|
||||
* keep such power domain on if "no_console_suspend" is set.
|
||||
*/
|
||||
return console_suspend_enabled ? -EBUSY : 0;
|
||||
}
|
||||
|
||||
struct sh7372_pm_domain sh7372_a3sp = {
|
||||
.genpd.name = "A3SP",
|
||||
.bit_shift = 11,
|
||||
.gov = &pm_domain_always_on_gov,
|
||||
.no_debug = true,
|
||||
.suspend = sh7372_a3sp_suspend,
|
||||
};
|
||||
|
||||
struct sh7372_pm_domain sh7372_a3sg = {
|
||||
.genpd.name = "A3SG",
|
||||
.bit_shift = 13,
|
||||
};
|
||||
|
||||
|
@ -257,11 +305,16 @@ static int sh7372_do_idle_core_standby(unsigned long unused)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void sh7372_enter_core_standby(void)
|
||||
static void sh7372_set_reset_vector(unsigned long address)
|
||||
{
|
||||
/* set reset vector, translate 4k */
|
||||
__raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR);
|
||||
__raw_writel(address, SBAR);
|
||||
__raw_writel(0, APARMBAREA);
|
||||
}
|
||||
|
||||
static void sh7372_enter_core_standby(void)
|
||||
{
|
||||
sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
|
||||
|
||||
/* enter sleep mode with SYSTBCR to 0x10 */
|
||||
__raw_writel(0x10, SYSTBCR);
|
||||
|
@ -274,27 +327,22 @@ static void sh7372_enter_core_standby(void)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
static void sh7372_enter_a3sm_common(int pllc0_on)
|
||||
static void sh7372_enter_sysc(int pllc0_on, unsigned long sleep_mode)
|
||||
{
|
||||
/* set reset vector, translate 4k */
|
||||
__raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR);
|
||||
__raw_writel(0, APARMBAREA);
|
||||
|
||||
if (pllc0_on)
|
||||
__raw_writel(0, PLLC01STPCR);
|
||||
else
|
||||
__raw_writel(1 << 28, PLLC01STPCR);
|
||||
|
||||
__raw_writel(0, PDNSEL); /* power-down A3SM only, not A4S */
|
||||
__raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */
|
||||
cpu_suspend(0, sh7372_do_idle_a3sm);
|
||||
cpu_suspend(sleep_mode, sh7372_do_idle_sysc);
|
||||
__raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */
|
||||
|
||||
/* disable reset vector translation */
|
||||
__raw_writel(0, SBAR);
|
||||
}
|
||||
|
||||
static int sh7372_a3sm_valid(unsigned long *mskp, unsigned long *msk2p)
|
||||
static int sh7372_sysc_valid(unsigned long *mskp, unsigned long *msk2p)
|
||||
{
|
||||
unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4;
|
||||
unsigned long msk, msk2;
|
||||
|
@ -382,7 +430,7 @@ static void sh7372_icr_to_irqcr(unsigned long icr, u16 *irqcr1p, u16 *irqcr2p)
|
|||
*irqcr2p = irqcr2;
|
||||
}
|
||||
|
||||
static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2)
|
||||
static void sh7372_setup_sysc(unsigned long msk, unsigned long msk2)
|
||||
{
|
||||
u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high;
|
||||
unsigned long tmp;
|
||||
|
@ -415,6 +463,22 @@ static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2)
|
|||
__raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3);
|
||||
__raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4);
|
||||
}
|
||||
|
||||
static void sh7372_enter_a3sm_common(int pllc0_on)
|
||||
{
|
||||
sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
|
||||
sh7372_enter_sysc(pllc0_on, 1 << 12);
|
||||
}
|
||||
|
||||
static void sh7372_enter_a4s_common(int pllc0_on)
|
||||
{
|
||||
sh7372_intca_suspend();
|
||||
memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
|
||||
sh7372_set_reset_vector(SMFRAM);
|
||||
sh7372_enter_sysc(pllc0_on, 1 << 10);
|
||||
sh7372_intca_resume();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
|
@ -448,14 +512,20 @@ static int sh7372_enter_suspend(suspend_state_t suspend_state)
|
|||
unsigned long msk, msk2;
|
||||
|
||||
/* check active clocks to determine potential wakeup sources */
|
||||
if (sh7372_a3sm_valid(&msk, &msk2)) {
|
||||
|
||||
if (sh7372_sysc_valid(&msk, &msk2)) {
|
||||
/* convert INTC mask and sense to SYSC mask and sense */
|
||||
sh7372_setup_a3sm(msk, msk2);
|
||||
sh7372_setup_sysc(msk, msk2);
|
||||
|
||||
/* enter A3SM sleep with PLLC0 off */
|
||||
pr_debug("entering A3SM\n");
|
||||
sh7372_enter_a3sm_common(0);
|
||||
if (!console_suspend_enabled &&
|
||||
sh7372_a4s.genpd.status == GPD_STATE_POWER_OFF) {
|
||||
/* enter A4S sleep with PLLC0 off */
|
||||
pr_debug("entering A4S\n");
|
||||
sh7372_enter_a4s_common(0);
|
||||
} else {
|
||||
/* enter A3SM sleep with PLLC0 off */
|
||||
pr_debug("entering A3SM\n");
|
||||
sh7372_enter_a3sm_common(0);
|
||||
}
|
||||
} else {
|
||||
/* default to Core Standby that supports all wakeup sources */
|
||||
pr_debug("entering Core Standby\n");
|
||||
|
@ -464,9 +534,37 @@ static int sh7372_enter_suspend(suspend_state_t suspend_state)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sh7372_pm_notifier_fn - SH7372 PM notifier routine.
|
||||
* @notifier: Unused.
|
||||
* @pm_event: Event being handled.
|
||||
* @unused: Unused.
|
||||
*/
|
||||
static int sh7372_pm_notifier_fn(struct notifier_block *notifier,
|
||||
unsigned long pm_event, void *unused)
|
||||
{
|
||||
switch (pm_event) {
|
||||
case PM_SUSPEND_PREPARE:
|
||||
/*
|
||||
* This is necessary, because the A4R domain has to be "on"
|
||||
* when suspend_device_irqs() and resume_device_irqs() are
|
||||
* executed during system suspend and resume, respectively, so
|
||||
* that those functions don't crash while accessing the INTCS.
|
||||
*/
|
||||
pm_genpd_poweron(&sh7372_a4r.genpd);
|
||||
break;
|
||||
case PM_POST_SUSPEND:
|
||||
pm_genpd_poweroff_unused();
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void sh7372_suspend_init(void)
|
||||
{
|
||||
shmobile_suspend_ops.enter = sh7372_enter_suspend;
|
||||
pm_notifier(sh7372_pm_notifier_fn, 0);
|
||||
}
|
||||
#else
|
||||
static void sh7372_suspend_init(void) {}
|
||||
|
@ -482,8 +580,6 @@ void __init sh7372_pm_init(void)
|
|||
/* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */
|
||||
__raw_writel(0, PDNSEL);
|
||||
|
||||
sh7372_a3sp_init();
|
||||
|
||||
sh7372_suspend_init();
|
||||
sh7372_cpuidle_init();
|
||||
}
|
||||
|
|
|
@ -994,12 +994,16 @@ void __init sh7372_add_standard_devices(void)
|
|||
sh7372_init_pm_domain(&sh7372_a4r);
|
||||
sh7372_init_pm_domain(&sh7372_a3rv);
|
||||
sh7372_init_pm_domain(&sh7372_a3ri);
|
||||
sh7372_init_pm_domain(&sh7372_a3sg);
|
||||
sh7372_init_pm_domain(&sh7372_a4s);
|
||||
sh7372_init_pm_domain(&sh7372_a3sp);
|
||||
sh7372_init_pm_domain(&sh7372_a3sg);
|
||||
|
||||
sh7372_pm_add_subdomain(&sh7372_a4lc, &sh7372_a3rv);
|
||||
sh7372_pm_add_subdomain(&sh7372_a4r, &sh7372_a4lc);
|
||||
|
||||
sh7372_pm_add_subdomain(&sh7372_a4s, &sh7372_a3sg);
|
||||
sh7372_pm_add_subdomain(&sh7372_a4s, &sh7372_a3sp);
|
||||
|
||||
platform_add_devices(sh7372_early_devices,
|
||||
ARRAY_SIZE(sh7372_early_devices));
|
||||
|
||||
|
|
|
@ -37,13 +37,18 @@
|
|||
#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
|
||||
.align 12
|
||||
.text
|
||||
.global sh7372_resume_core_standby_a3sm
|
||||
sh7372_resume_core_standby_a3sm:
|
||||
.global sh7372_resume_core_standby_sysc
|
||||
sh7372_resume_core_standby_sysc:
|
||||
ldr pc, 1f
|
||||
1: .long cpu_resume - PAGE_OFFSET + PLAT_PHYS_OFFSET
|
||||
|
||||
.global sh7372_do_idle_a3sm
|
||||
sh7372_do_idle_a3sm:
|
||||
#define SPDCR 0xe6180008
|
||||
|
||||
/* A3SM & A4S power down */
|
||||
.global sh7372_do_idle_sysc
|
||||
sh7372_do_idle_sysc:
|
||||
mov r8, r0 /* sleep mode passed in r0 */
|
||||
|
||||
/*
|
||||
* Clear the SCTLR.C bit to prevent further data cache
|
||||
* allocation. Clearing SCTLR.C would make all the data accesses
|
||||
|
@ -80,13 +85,9 @@ sh7372_do_idle_a3sm:
|
|||
dsb
|
||||
dmb
|
||||
|
||||
#define SPDCR 0xe6180008
|
||||
#define A3SM (1 << 12)
|
||||
|
||||
/* A3SM power down */
|
||||
/* SYSC power down */
|
||||
ldr r0, =SPDCR
|
||||
ldr r1, =A3SM
|
||||
str r1, [r0]
|
||||
str r8, [r0]
|
||||
1:
|
||||
b 1b
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ struct device;
|
|||
#ifdef CONFIG_PM
|
||||
|
||||
extern __init int s3c_pm_init(void);
|
||||
extern __init int s3c64xx_pm_init(void);
|
||||
|
||||
#else
|
||||
|
||||
|
@ -29,6 +30,11 @@ static inline int s3c_pm_init(void)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int s3c64xx_pm_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* configuration for the IRQ mask over sleep */
|
||||
|
|
|
@ -85,7 +85,6 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */
|
||||
#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */
|
||||
#define TIF_NOTIFY_RESUME 9 /* callback before returning to user */
|
||||
#define TIF_FREEZE 29
|
||||
#define TIF_DEBUG 30 /* debugging enabled */
|
||||
#define TIF_USERSPACE 31 /* true if FS sets userspace */
|
||||
|
||||
|
@ -98,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
|
||||
/* Note: The masks below must never span more than 16 bits! */
|
||||
|
||||
|
|
|
@ -100,7 +100,6 @@ static inline struct thread_info *current_thread_info(void)
|
|||
TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
||||
#define TIF_FREEZE 6 /* is freezing for suspend */
|
||||
#define TIF_IRQ_SYNC 7 /* sync pipeline stage */
|
||||
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
|
||||
#define TIF_SINGLESTEP 9
|
||||
|
@ -111,7 +110,6 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
#define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
|
||||
|
|
|
@ -86,7 +86,6 @@ struct thread_info {
|
|||
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
|
||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 18 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
|
@ -94,7 +93,6 @@ struct thread_info {
|
|||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||
|
|
|
@ -111,7 +111,6 @@ register struct thread_info *__current_thread_info asm("gr15");
|
|||
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 18 /* freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
|
@ -120,7 +119,6 @@ register struct thread_info *__current_thread_info asm("gr15");
|
|||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||
|
|
|
@ -90,7 +90,6 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
||||
#define TIF_NOTIFY_RESUME 6 /* callback before returning to user */
|
||||
#define TIF_FREEZE 16 /* is freezing for suspend */
|
||||
|
||||
/* as above, but as bit values */
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
|
@ -99,7 +98,6 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
|
||||
|
|
|
@ -113,7 +113,6 @@ struct thread_info {
|
|||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||
#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
|
||||
#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
|
||||
#define TIF_FREEZE 20 /* is freezing for suspend */
|
||||
#define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
|
@ -126,7 +125,6 @@ struct thread_info {
|
|||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
|
||||
#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE)
|
||||
|
||||
/* "work to do on user-return" bits */
|
||||
|
|
|
@ -138,7 +138,6 @@ static inline unsigned int get_thread_fault_code(void)
|
|||
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
|
||||
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 19 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
|
@ -149,7 +148,6 @@ static inline unsigned int get_thread_fault_code(void)
|
|||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||
|
|
|
@ -76,7 +76,6 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define TIF_DELAYED_TRACE 14 /* single step a syscall */
|
||||
#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
|
||||
#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 17 /* thread is freezing for suspend */
|
||||
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */
|
||||
|
||||
#endif /* _ASM_M68K_THREAD_INFO_H */
|
||||
|
|
|
@ -125,7 +125,6 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define TIF_MEMDIE 6 /* is terminating due to OOM killer */
|
||||
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
|
||||
#define TIF_SECCOMP 10 /* secure computing */
|
||||
#define TIF_FREEZE 14 /* Freezing for suspend */
|
||||
|
||||
/* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_POLLING_NRFLAG 16
|
||||
|
@ -137,7 +136,6 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_IRET (1 << TIF_IRET)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
|
||||
|
|
|
@ -117,7 +117,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
|
|||
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
|
||||
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 19
|
||||
#define TIF_FIXADE 20 /* Fix address errors in software */
|
||||
#define TIF_LOGADE 21 /* Log address errors to syslog */
|
||||
#define TIF_32BIT_REGS 22 /* also implies 16/32 fprs */
|
||||
|
@ -141,7 +140,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
|
|||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
#define _TIF_FIXADE (1<<TIF_FIXADE)
|
||||
#define _TIF_LOGADE (1<<TIF_LOGADE)
|
||||
#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS)
|
||||
|
|
|
@ -165,7 +165,6 @@ extern void free_thread_info(struct thread_info *);
|
|||
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 18 /* freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE +(1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_NOTIFY_RESUME +(1 << TIF_NOTIFY_RESUME)
|
||||
|
@ -174,7 +173,6 @@ extern void free_thread_info(struct thread_info *);
|
|||
#define _TIF_SINGLESTEP +(1 << TIF_SINGLESTEP)
|
||||
#define _TIF_RESTORE_SIGMASK +(1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_POLLING_NRFLAG +(1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE +(1 << TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||
|
|
|
@ -58,7 +58,6 @@ struct thread_info {
|
|||
#define TIF_32BIT 4 /* 32 bit binary */
|
||||
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
|
||||
#define TIF_FREEZE 7 /* is freezing for suspend */
|
||||
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
|
||||
#define TIF_SINGLESTEP 9 /* single stepping? */
|
||||
#define TIF_BLOCKSTEP 10 /* branch stepping? */
|
||||
|
@ -69,7 +68,6 @@ struct thread_info {
|
|||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_32BIT (1 << TIF_32BIT)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
|
||||
|
|
|
@ -109,7 +109,6 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
|
||||
#define TIF_NOERROR 12 /* Force successful syscall return */
|
||||
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
|
||||
#define TIF_FREEZE 14 /* Freezing for suspend */
|
||||
#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
|
||||
#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
|
||||
|
||||
|
@ -127,7 +126,6 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
|
||||
#define _TIF_NOERROR (1<<TIF_NOERROR)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
|
||||
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
||||
|
|
|
@ -1406,7 +1406,6 @@ static struct bus_type vio_bus_type = {
|
|||
.match = vio_bus_match,
|
||||
.probe = vio_bus_probe,
|
||||
.remove = vio_bus_remove,
|
||||
.pm = GENERIC_SUBSYS_PM_OPS,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -102,7 +102,6 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
|
||||
#define TIF_SINGLE_STEP 20 /* This task is single stepped */
|
||||
#define TIF_FREEZE 21 /* thread is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL (1<<TIF_SYSCALL)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
|
@ -119,7 +118,6 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_31BIT (1<<TIF_31BIT)
|
||||
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define is_32bit_task() (test_thread_flag(TIF_31BIT))
|
||||
|
|
|
@ -122,7 +122,6 @@ extern void init_thread_xstate(void);
|
|||
#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
|
||||
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 19 /* Freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
|
@ -133,7 +132,6 @@ extern void init_thread_xstate(void);
|
|||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
|
||||
/*
|
||||
* _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we
|
||||
|
|
|
@ -133,7 +133,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
|
|||
#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
|
||||
* TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 10 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 11 /* is freezing for suspend */
|
||||
|
||||
/* as above, but as bit values */
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
|
@ -147,7 +146,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
|
|||
#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \
|
||||
_TIF_SIGPENDING | \
|
||||
_TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
|
|
@ -225,7 +225,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
|||
/* flag bit 12 is available */
|
||||
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
|
||||
#define TIF_POLLING_NRFLAG 14
|
||||
#define TIF_FREEZE 15 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
|
@ -237,7 +236,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
|||
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
|
||||
_TIF_DO_NOTIFY_RESUME_MASK | \
|
||||
|
|
|
@ -71,7 +71,6 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
||||
#define TIF_SYSCALL_AUDIT 6
|
||||
#define TIF_RESTORE_SIGMASK 7
|
||||
#define TIF_FREEZE 16 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
|
@ -80,6 +79,5 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -135,14 +135,12 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
|
||||
#define TIF_SYSCALL_TRACE 8
|
||||
#define TIF_MEMDIE 18
|
||||
#define TIF_FREEZE 19
|
||||
#define TIF_RESTORE_SIGMASK 20
|
||||
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
|
||||
/*
|
||||
|
|
|
@ -91,7 +91,6 @@ struct thread_info {
|
|||
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
|
||||
#define TIF_DEBUG 21 /* uses debug registers */
|
||||
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
|
||||
#define TIF_FREEZE 23 /* is freezing for suspend */
|
||||
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
|
||||
#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
|
||||
#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
|
||||
|
@ -113,7 +112,6 @@ struct thread_info {
|
|||
#define _TIF_FORK (1 << TIF_FORK)
|
||||
#define _TIF_DEBUG (1 << TIF_DEBUG)
|
||||
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
|
||||
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
|
||||
#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
|
||||
|
|
|
@ -132,7 +132,6 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
|
||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_FREEZE 17 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
|
@ -141,7 +140,6 @@ static inline struct thread_info *current_thread_info(void)
|
|||
#define _TIF_IRET (1<<TIF_IRET)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||
|
|
|
@ -476,6 +476,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = init_nvs_nosave,
|
||||
.ident = "Asus K54C",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = init_nvs_nosave,
|
||||
.ident = "Asus K54HR",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
|
||||
},
|
||||
},
|
||||
{},
|
||||
};
|
||||
#endif /* CONFIG_SUSPEND */
|
||||
|
|
|
@ -113,31 +113,7 @@ static int amba_legacy_resume(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_prepare(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (drv && drv->pm && drv->pm->prepare)
|
||||
ret = drv->pm->prepare(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void amba_pm_complete(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
|
||||
if (drv && drv->pm && drv->pm->complete)
|
||||
drv->pm->complete(dev);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_PM_SLEEP */
|
||||
|
||||
#define amba_pm_prepare NULL
|
||||
#define amba_pm_complete NULL
|
||||
|
||||
#endif /* !CONFIG_PM_SLEEP */
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
|
||||
|
@ -159,22 +135,6 @@ static int amba_pm_suspend(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_suspend_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->suspend_noirq)
|
||||
ret = drv->pm->suspend_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_resume(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
|
@ -193,28 +153,10 @@ static int amba_pm_resume(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_resume_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->resume_noirq)
|
||||
ret = drv->pm->resume_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SUSPEND */
|
||||
|
||||
#define amba_pm_suspend NULL
|
||||
#define amba_pm_resume NULL
|
||||
#define amba_pm_suspend_noirq NULL
|
||||
#define amba_pm_resume_noirq NULL
|
||||
|
||||
#endif /* !CONFIG_SUSPEND */
|
||||
|
||||
|
@ -238,22 +180,6 @@ static int amba_pm_freeze(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_freeze_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->freeze_noirq)
|
||||
ret = drv->pm->freeze_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_thaw(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
|
@ -272,22 +198,6 @@ static int amba_pm_thaw(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_thaw_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->thaw_noirq)
|
||||
ret = drv->pm->thaw_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_poweroff(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
|
@ -306,22 +216,6 @@ static int amba_pm_poweroff(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_poweroff_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->poweroff_noirq)
|
||||
ret = drv->pm->poweroff_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_restore(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
|
@ -340,32 +234,12 @@ static int amba_pm_restore(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_restore_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->restore_noirq)
|
||||
ret = drv->pm->restore_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_HIBERNATE_CALLBACKS */
|
||||
|
||||
#define amba_pm_freeze NULL
|
||||
#define amba_pm_thaw NULL
|
||||
#define amba_pm_poweroff NULL
|
||||
#define amba_pm_restore NULL
|
||||
#define amba_pm_freeze_noirq NULL
|
||||
#define amba_pm_thaw_noirq NULL
|
||||
#define amba_pm_poweroff_noirq NULL
|
||||
#define amba_pm_restore_noirq NULL
|
||||
|
||||
#endif /* !CONFIG_HIBERNATE_CALLBACKS */
|
||||
|
||||
|
@ -406,20 +280,12 @@ static int amba_pm_runtime_resume(struct device *dev)
|
|||
#ifdef CONFIG_PM
|
||||
|
||||
static const struct dev_pm_ops amba_pm = {
|
||||
.prepare = amba_pm_prepare,
|
||||
.complete = amba_pm_complete,
|
||||
.suspend = amba_pm_suspend,
|
||||
.resume = amba_pm_resume,
|
||||
.freeze = amba_pm_freeze,
|
||||
.thaw = amba_pm_thaw,
|
||||
.poweroff = amba_pm_poweroff,
|
||||
.restore = amba_pm_restore,
|
||||
.suspend_noirq = amba_pm_suspend_noirq,
|
||||
.resume_noirq = amba_pm_resume_noirq,
|
||||
.freeze_noirq = amba_pm_freeze_noirq,
|
||||
.thaw_noirq = amba_pm_thaw_noirq,
|
||||
.poweroff_noirq = amba_pm_poweroff_noirq,
|
||||
.restore_noirq = amba_pm_restore_noirq,
|
||||
SET_RUNTIME_PM_OPS(
|
||||
amba_pm_runtime_suspend,
|
||||
amba_pm_runtime_resume,
|
||||
|
|
|
@ -534,6 +534,8 @@ static int _request_firmware(const struct firmware **firmware_p,
|
|||
return 0;
|
||||
}
|
||||
|
||||
read_lock_usermodehelper();
|
||||
|
||||
if (WARN_ON(usermodehelper_is_disabled())) {
|
||||
dev_err(device, "firmware: %s will not be loaded\n", name);
|
||||
retval = -EBUSY;
|
||||
|
@ -572,6 +574,8 @@ static int _request_firmware(const struct firmware **firmware_p,
|
|||
fw_destroy_instance(fw_priv);
|
||||
|
||||
out:
|
||||
read_unlock_usermodehelper();
|
||||
|
||||
if (retval) {
|
||||
release_firmware(firmware);
|
||||
*firmware_p = NULL;
|
||||
|
|
|
@ -700,25 +700,6 @@ static int platform_legacy_resume(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_prepare(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (drv && drv->pm && drv->pm->prepare)
|
||||
ret = drv->pm->prepare(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void platform_pm_complete(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
|
||||
if (drv && drv->pm && drv->pm->complete)
|
||||
drv->pm->complete(dev);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
|
@ -741,22 +722,6 @@ int platform_pm_suspend(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_suspend_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->suspend_noirq)
|
||||
ret = drv->pm->suspend_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_resume(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
|
@ -775,22 +740,6 @@ int platform_pm_resume(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_resume_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->resume_noirq)
|
||||
ret = drv->pm->resume_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SUSPEND */
|
||||
|
||||
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
||||
|
@ -813,22 +762,6 @@ int platform_pm_freeze(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_freeze_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->freeze_noirq)
|
||||
ret = drv->pm->freeze_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_thaw(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
|
@ -847,22 +780,6 @@ int platform_pm_thaw(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_thaw_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->thaw_noirq)
|
||||
ret = drv->pm->thaw_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_poweroff(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
|
@ -881,22 +798,6 @@ int platform_pm_poweroff(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_poweroff_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->poweroff_noirq)
|
||||
ret = drv->pm->poweroff_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_restore(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
|
@ -915,22 +816,6 @@ int platform_pm_restore(struct device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_restore_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->restore_noirq)
|
||||
ret = drv->pm->restore_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HIBERNATE_CALLBACKS */
|
||||
|
||||
static const struct dev_pm_ops platform_dev_pm_ops = {
|
||||
|
|
|
@ -3,7 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
|
|||
obj-$(CONFIG_PM_RUNTIME) += runtime.o
|
||||
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
|
||||
obj-$(CONFIG_PM_OPP) += opp.o
|
||||
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o
|
||||
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
|
||||
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
|
||||
|
||||
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
|
||||
|
|
|
@ -15,13 +15,44 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
|
||||
({ \
|
||||
type (*__routine)(struct device *__d); \
|
||||
type __ret = (type)0; \
|
||||
\
|
||||
__routine = genpd->dev_ops.callback; \
|
||||
if (__routine) { \
|
||||
__ret = __routine(dev); \
|
||||
} else { \
|
||||
__routine = dev_gpd_data(dev)->ops.callback; \
|
||||
if (__routine) \
|
||||
__ret = __routine(dev); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
|
||||
({ \
|
||||
ktime_t __start = ktime_get(); \
|
||||
type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
|
||||
s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
|
||||
struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \
|
||||
if (__elapsed > __gpd_data->td.field) { \
|
||||
__gpd_data->td.field = __elapsed; \
|
||||
dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
|
||||
__elapsed); \
|
||||
} \
|
||||
__retval; \
|
||||
})
|
||||
|
||||
static LIST_HEAD(gpd_list);
|
||||
static DEFINE_MUTEX(gpd_list_lock);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
static struct generic_pm_domain *dev_to_genpd(struct device *dev)
|
||||
struct generic_pm_domain *dev_to_genpd(struct device *dev)
|
||||
{
|
||||
if (IS_ERR_OR_NULL(dev->pm_domain))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
@ -29,6 +60,31 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
|
|||
return pd_to_genpd(dev->pm_domain);
|
||||
}
|
||||
|
||||
static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
|
||||
{
|
||||
return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
|
||||
stop_latency_ns, "stop");
|
||||
}
|
||||
|
||||
static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
|
||||
{
|
||||
return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
|
||||
start_latency_ns, "start");
|
||||
}
|
||||
|
||||
static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
|
||||
{
|
||||
return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
|
||||
save_state_latency_ns, "state save");
|
||||
}
|
||||
|
||||
static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
|
||||
{
|
||||
return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
|
||||
restore_state_latency_ns,
|
||||
"state restore");
|
||||
}
|
||||
|
||||
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
|
||||
{
|
||||
bool ret = false;
|
||||
|
@ -145,9 +201,21 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
|
|||
}
|
||||
|
||||
if (genpd->power_on) {
|
||||
ktime_t time_start = ktime_get();
|
||||
s64 elapsed_ns;
|
||||
|
||||
ret = genpd->power_on(genpd);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
|
||||
if (elapsed_ns > genpd->power_on_latency_ns) {
|
||||
genpd->power_on_latency_ns = elapsed_ns;
|
||||
if (genpd->name)
|
||||
pr_warning("%s: Power-on latency exceeded, "
|
||||
"new value %lld ns\n", genpd->name,
|
||||
elapsed_ns);
|
||||
}
|
||||
}
|
||||
|
||||
genpd_set_active(genpd);
|
||||
|
@ -190,7 +258,6 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
|
|||
{
|
||||
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
|
||||
struct device *dev = pdd->dev;
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (gpd_data->need_restore)
|
||||
|
@ -198,15 +265,9 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
|
|||
|
||||
mutex_unlock(&genpd->lock);
|
||||
|
||||
if (drv && drv->pm && drv->pm->runtime_suspend) {
|
||||
if (genpd->start_device)
|
||||
genpd->start_device(dev);
|
||||
|
||||
ret = drv->pm->runtime_suspend(dev);
|
||||
|
||||
if (genpd->stop_device)
|
||||
genpd->stop_device(dev);
|
||||
}
|
||||
genpd_start_dev(genpd, dev);
|
||||
ret = genpd_save_dev(genpd, dev);
|
||||
genpd_stop_dev(genpd, dev);
|
||||
|
||||
mutex_lock(&genpd->lock);
|
||||
|
||||
|
@ -227,22 +288,15 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
|
|||
{
|
||||
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
|
||||
struct device *dev = pdd->dev;
|
||||
struct device_driver *drv = dev->driver;
|
||||
|
||||
if (!gpd_data->need_restore)
|
||||
return;
|
||||
|
||||
mutex_unlock(&genpd->lock);
|
||||
|
||||
if (drv && drv->pm && drv->pm->runtime_resume) {
|
||||
if (genpd->start_device)
|
||||
genpd->start_device(dev);
|
||||
|
||||
drv->pm->runtime_resume(dev);
|
||||
|
||||
if (genpd->stop_device)
|
||||
genpd->stop_device(dev);
|
||||
}
|
||||
genpd_start_dev(genpd, dev);
|
||||
genpd_restore_dev(genpd, dev);
|
||||
genpd_stop_dev(genpd, dev);
|
||||
|
||||
mutex_lock(&genpd->lock);
|
||||
|
||||
|
@ -354,11 +408,16 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
|
|||
}
|
||||
|
||||
if (genpd->power_off) {
|
||||
ktime_t time_start;
|
||||
s64 elapsed_ns;
|
||||
|
||||
if (atomic_read(&genpd->sd_count) > 0) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
time_start = ktime_get();
|
||||
|
||||
/*
|
||||
* If sd_count > 0 at this point, one of the subdomains hasn't
|
||||
* managed to call pm_genpd_poweron() for the master yet after
|
||||
|
@ -372,9 +431,29 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
|
|||
genpd_set_active(genpd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
|
||||
if (elapsed_ns > genpd->power_off_latency_ns) {
|
||||
genpd->power_off_latency_ns = elapsed_ns;
|
||||
if (genpd->name)
|
||||
pr_warning("%s: Power-off latency exceeded, "
|
||||
"new value %lld ns\n", genpd->name,
|
||||
elapsed_ns);
|
||||
}
|
||||
}
|
||||
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
genpd->power_off_time = ktime_get();
|
||||
|
||||
/* Update PM QoS information for devices in the domain. */
|
||||
list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
|
||||
struct gpd_timing_data *td = &to_gpd_data(pdd)->td;
|
||||
|
||||
pm_runtime_update_max_time_suspended(pdd->dev,
|
||||
td->start_latency_ns +
|
||||
td->restore_state_latency_ns +
|
||||
genpd->power_on_latency_ns);
|
||||
}
|
||||
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
genpd_sd_counter_dec(link->master);
|
||||
|
@ -413,6 +492,8 @@ static void genpd_power_off_work_fn(struct work_struct *work)
|
|||
static int pm_genpd_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct generic_pm_domain *genpd;
|
||||
bool (*stop_ok)(struct device *__dev);
|
||||
int ret;
|
||||
|
||||
dev_dbg(dev, "%s()\n", __func__);
|
||||
|
||||
|
@ -422,11 +503,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
|
|||
|
||||
might_sleep_if(!genpd->dev_irq_safe);
|
||||
|
||||
if (genpd->stop_device) {
|
||||
int ret = genpd->stop_device(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
|
||||
if (stop_ok && !stop_ok(dev))
|
||||
return -EBUSY;
|
||||
|
||||
ret = genpd_stop_dev(genpd, dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pm_runtime_update_max_time_suspended(dev,
|
||||
dev_gpd_data(dev)->td.start_latency_ns);
|
||||
|
||||
/*
|
||||
* If power.irq_safe is set, this routine will be run with interrupts
|
||||
|
@ -502,8 +588,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
|
|||
mutex_unlock(&genpd->lock);
|
||||
|
||||
out:
|
||||
if (genpd->start_device)
|
||||
genpd->start_device(dev);
|
||||
genpd_start_dev(genpd, dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -534,6 +619,52 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
|
|||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
||||
static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
|
||||
struct device *dev)
|
||||
{
|
||||
return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
|
||||
}
|
||||
|
||||
static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
|
||||
{
|
||||
return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
|
||||
}
|
||||
|
||||
static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
|
||||
{
|
||||
return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
|
||||
}
|
||||
|
||||
static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
|
||||
{
|
||||
return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
|
||||
}
|
||||
|
||||
static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
|
||||
{
|
||||
return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
|
||||
}
|
||||
|
||||
static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
|
||||
{
|
||||
return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
|
||||
}
|
||||
|
||||
static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
|
||||
{
|
||||
return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
|
||||
}
|
||||
|
||||
static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
|
||||
{
|
||||
return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
|
||||
}
|
||||
|
||||
static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
|
||||
{
|
||||
return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
|
||||
* @genpd: PM domain to power off, if possible.
|
||||
|
@ -590,7 +721,7 @@ static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
|
|||
if (!device_can_wakeup(dev))
|
||||
return false;
|
||||
|
||||
active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
|
||||
active_wakeup = genpd_dev_active_wakeup(genpd, dev);
|
||||
return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
|
||||
}
|
||||
|
||||
|
@ -646,7 +777,7 @@ static int pm_genpd_prepare(struct device *dev)
|
|||
/*
|
||||
* The PM domain must be in the GPD_STATE_ACTIVE state at this point,
|
||||
* so pm_genpd_poweron() will return immediately, but if the device
|
||||
* is suspended (e.g. it's been stopped by .stop_device()), we need
|
||||
* is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
|
||||
* to make it operational.
|
||||
*/
|
||||
pm_runtime_resume(dev);
|
||||
|
@ -685,7 +816,7 @@ static int pm_genpd_suspend(struct device *dev)
|
|||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
|
||||
return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -710,16 +841,14 @@ static int pm_genpd_suspend_noirq(struct device *dev)
|
|||
if (genpd->suspend_power_off)
|
||||
return 0;
|
||||
|
||||
ret = pm_generic_suspend_noirq(dev);
|
||||
ret = genpd_suspend_late(genpd, dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (dev->power.wakeup_path
|
||||
&& genpd->active_wakeup && genpd->active_wakeup(dev))
|
||||
if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
|
||||
return 0;
|
||||
|
||||
if (genpd->stop_device)
|
||||
genpd->stop_device(dev);
|
||||
genpd_stop_dev(genpd, dev);
|
||||
|
||||
/*
|
||||
* Since all of the "noirq" callbacks are executed sequentially, it is
|
||||
|
@ -761,10 +890,9 @@ static int pm_genpd_resume_noirq(struct device *dev)
|
|||
*/
|
||||
pm_genpd_poweron(genpd);
|
||||
genpd->suspended_count--;
|
||||
if (genpd->start_device)
|
||||
genpd->start_device(dev);
|
||||
genpd_start_dev(genpd, dev);
|
||||
|
||||
return pm_generic_resume_noirq(dev);
|
||||
return genpd_resume_early(genpd, dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -785,7 +913,7 @@ static int pm_genpd_resume(struct device *dev)
|
|||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
|
||||
return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -806,7 +934,7 @@ static int pm_genpd_freeze(struct device *dev)
|
|||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
|
||||
return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -832,12 +960,11 @@ static int pm_genpd_freeze_noirq(struct device *dev)
|
|||
if (genpd->suspend_power_off)
|
||||
return 0;
|
||||
|
||||
ret = pm_generic_freeze_noirq(dev);
|
||||
ret = genpd_freeze_late(genpd, dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (genpd->stop_device)
|
||||
genpd->stop_device(dev);
|
||||
genpd_stop_dev(genpd, dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -864,10 +991,9 @@ static int pm_genpd_thaw_noirq(struct device *dev)
|
|||
if (genpd->suspend_power_off)
|
||||
return 0;
|
||||
|
||||
if (genpd->start_device)
|
||||
genpd->start_device(dev);
|
||||
genpd_start_dev(genpd, dev);
|
||||
|
||||
return pm_generic_thaw_noirq(dev);
|
||||
return genpd_thaw_early(genpd, dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -888,72 +1014,7 @@ static int pm_genpd_thaw(struct device *dev)
|
|||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
|
||||
* @dev: Device to suspend.
|
||||
*
|
||||
* Power off a device under the assumption that its pm_domain field points to
|
||||
* the domain member of an object of type struct generic_pm_domain representing
|
||||
* a PM domain consisting of I/O devices.
|
||||
*/
|
||||
static int pm_genpd_dev_poweroff(struct device *dev)
|
||||
{
|
||||
struct generic_pm_domain *genpd;
|
||||
|
||||
dev_dbg(dev, "%s()\n", __func__);
|
||||
|
||||
genpd = dev_to_genpd(dev);
|
||||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
|
||||
* @dev: Device to suspend.
|
||||
*
|
||||
* Carry out a late powering off of a device under the assumption that its
|
||||
* pm_domain field points to the domain member of an object of type
|
||||
* struct generic_pm_domain representing a PM domain consisting of I/O devices.
|
||||
*/
|
||||
static int pm_genpd_dev_poweroff_noirq(struct device *dev)
|
||||
{
|
||||
struct generic_pm_domain *genpd;
|
||||
int ret;
|
||||
|
||||
dev_dbg(dev, "%s()\n", __func__);
|
||||
|
||||
genpd = dev_to_genpd(dev);
|
||||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
if (genpd->suspend_power_off)
|
||||
return 0;
|
||||
|
||||
ret = pm_generic_poweroff_noirq(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (dev->power.wakeup_path
|
||||
&& genpd->active_wakeup && genpd->active_wakeup(dev))
|
||||
return 0;
|
||||
|
||||
if (genpd->stop_device)
|
||||
genpd->stop_device(dev);
|
||||
|
||||
/*
|
||||
* Since all of the "noirq" callbacks are executed sequentially, it is
|
||||
* guaranteed that this function will never run twice in parallel for
|
||||
* the same PM domain, so it is not necessary to use locking here.
|
||||
*/
|
||||
genpd->suspended_count++;
|
||||
pm_genpd_sync_poweroff(genpd);
|
||||
|
||||
return 0;
|
||||
return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -993,31 +1054,9 @@ static int pm_genpd_restore_noirq(struct device *dev)
|
|||
|
||||
pm_genpd_poweron(genpd);
|
||||
genpd->suspended_count--;
|
||||
if (genpd->start_device)
|
||||
genpd->start_device(dev);
|
||||
genpd_start_dev(genpd, dev);
|
||||
|
||||
return pm_generic_restore_noirq(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_restore - Restore a device belonging to an I/O power domain.
|
||||
* @dev: Device to resume.
|
||||
*
|
||||
* Restore a device under the assumption that its pm_domain field points to the
|
||||
* domain member of an object of type struct generic_pm_domain representing
|
||||
* a power domain consisting of I/O devices.
|
||||
*/
|
||||
static int pm_genpd_restore(struct device *dev)
|
||||
{
|
||||
struct generic_pm_domain *genpd;
|
||||
|
||||
dev_dbg(dev, "%s()\n", __func__);
|
||||
|
||||
genpd = dev_to_genpd(dev);
|
||||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
|
||||
return genpd_resume_early(genpd, dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1067,20 +1106,19 @@ static void pm_genpd_complete(struct device *dev)
|
|||
#define pm_genpd_freeze_noirq NULL
|
||||
#define pm_genpd_thaw_noirq NULL
|
||||
#define pm_genpd_thaw NULL
|
||||
#define pm_genpd_dev_poweroff_noirq NULL
|
||||
#define pm_genpd_dev_poweroff NULL
|
||||
#define pm_genpd_restore_noirq NULL
|
||||
#define pm_genpd_restore NULL
|
||||
#define pm_genpd_complete NULL
|
||||
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
/**
|
||||
* pm_genpd_add_device - Add a device to an I/O PM domain.
|
||||
* __pm_genpd_add_device - Add a device to an I/O PM domain.
|
||||
* @genpd: PM domain to add the device to.
|
||||
* @dev: Device to be added.
|
||||
* @td: Set of PM QoS timing parameters to attach to the device.
|
||||
*/
|
||||
int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
|
||||
int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
|
||||
struct gpd_timing_data *td)
|
||||
{
|
||||
struct generic_pm_domain_data *gpd_data;
|
||||
struct pm_domain_data *pdd;
|
||||
|
@ -1123,6 +1161,8 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
|
|||
gpd_data->base.dev = dev;
|
||||
gpd_data->need_restore = false;
|
||||
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
|
||||
if (td)
|
||||
gpd_data->td = *td;
|
||||
|
||||
out:
|
||||
genpd_release_lock(genpd);
|
||||
|
@ -1279,6 +1319,204 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
|
||||
* @dev: Device to add the callbacks to.
|
||||
* @ops: Set of callbacks to add.
|
||||
* @td: Timing data to add to the device along with the callbacks (optional).
|
||||
*/
|
||||
int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
|
||||
struct gpd_timing_data *td)
|
||||
{
|
||||
struct pm_domain_data *pdd;
|
||||
int ret = 0;
|
||||
|
||||
if (!(dev && dev->power.subsys_data && ops))
|
||||
return -EINVAL;
|
||||
|
||||
pm_runtime_disable(dev);
|
||||
device_pm_lock();
|
||||
|
||||
pdd = dev->power.subsys_data->domain_data;
|
||||
if (pdd) {
|
||||
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
|
||||
|
||||
gpd_data->ops = *ops;
|
||||
if (td)
|
||||
gpd_data->td = *td;
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
device_pm_unlock();
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
|
||||
|
||||
/**
|
||||
* __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
|
||||
* @dev: Device to remove the callbacks from.
|
||||
* @clear_td: If set, clear the device's timing data too.
|
||||
*/
|
||||
int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
|
||||
{
|
||||
struct pm_domain_data *pdd;
|
||||
int ret = 0;
|
||||
|
||||
if (!(dev && dev->power.subsys_data))
|
||||
return -EINVAL;
|
||||
|
||||
pm_runtime_disable(dev);
|
||||
device_pm_lock();
|
||||
|
||||
pdd = dev->power.subsys_data->domain_data;
|
||||
if (pdd) {
|
||||
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
|
||||
|
||||
gpd_data->ops = (struct gpd_dev_ops){ 0 };
|
||||
if (clear_td)
|
||||
gpd_data->td = (struct gpd_timing_data){ 0 };
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
device_pm_unlock();
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
|
||||
|
||||
/* Default device callbacks for generic PM domains. */
|
||||
|
||||
/**
|
||||
* pm_genpd_default_save_state - Default "save device state" for PM domians.
|
||||
* @dev: Device to handle.
|
||||
*/
|
||||
static int pm_genpd_default_save_state(struct device *dev)
|
||||
{
|
||||
int (*cb)(struct device *__dev);
|
||||
struct device_driver *drv = dev->driver;
|
||||
|
||||
cb = dev_gpd_data(dev)->ops.save_state;
|
||||
if (cb)
|
||||
return cb(dev);
|
||||
|
||||
if (drv && drv->pm && drv->pm->runtime_suspend)
|
||||
return drv->pm->runtime_suspend(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_default_restore_state - Default PM domians "restore device state".
|
||||
* @dev: Device to handle.
|
||||
*/
|
||||
static int pm_genpd_default_restore_state(struct device *dev)
|
||||
{
|
||||
int (*cb)(struct device *__dev);
|
||||
struct device_driver *drv = dev->driver;
|
||||
|
||||
cb = dev_gpd_data(dev)->ops.restore_state;
|
||||
if (cb)
|
||||
return cb(dev);
|
||||
|
||||
if (drv && drv->pm && drv->pm->runtime_resume)
|
||||
return drv->pm->runtime_resume(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_default_suspend - Default "device suspend" for PM domians.
|
||||
* @dev: Device to handle.
|
||||
*/
|
||||
static int pm_genpd_default_suspend(struct device *dev)
|
||||
{
|
||||
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
|
||||
|
||||
return cb ? cb(dev) : pm_generic_suspend(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
|
||||
* @dev: Device to handle.
|
||||
*/
|
||||
static int pm_genpd_default_suspend_late(struct device *dev)
|
||||
{
|
||||
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
|
||||
|
||||
return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_default_resume_early - Default "early device resume" for PM domians.
|
||||
* @dev: Device to handle.
|
||||
*/
|
||||
static int pm_genpd_default_resume_early(struct device *dev)
|
||||
{
|
||||
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
|
||||
|
||||
return cb ? cb(dev) : pm_generic_resume_noirq(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_default_resume - Default "device resume" for PM domians.
|
||||
* @dev: Device to handle.
|
||||
*/
|
||||
static int pm_genpd_default_resume(struct device *dev)
|
||||
{
|
||||
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
|
||||
|
||||
return cb ? cb(dev) : pm_generic_resume(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_default_freeze - Default "device freeze" for PM domians.
|
||||
* @dev: Device to handle.
|
||||
*/
|
||||
static int pm_genpd_default_freeze(struct device *dev)
|
||||
{
|
||||
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
|
||||
|
||||
return cb ? cb(dev) : pm_generic_freeze(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
|
||||
* @dev: Device to handle.
|
||||
*/
|
||||
static int pm_genpd_default_freeze_late(struct device *dev)
|
||||
{
|
||||
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
|
||||
|
||||
return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
|
||||
* @dev: Device to handle.
|
||||
*/
|
||||
static int pm_genpd_default_thaw_early(struct device *dev)
|
||||
{
|
||||
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
|
||||
|
||||
return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_default_thaw - Default "device thaw" for PM domians.
|
||||
* @dev: Device to handle.
|
||||
*/
|
||||
static int pm_genpd_default_thaw(struct device *dev)
|
||||
{
|
||||
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
|
||||
|
||||
return cb ? cb(dev) : pm_generic_thaw(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_init - Initialize a generic I/O PM domain object.
|
||||
* @genpd: PM domain object to initialize.
|
||||
|
@ -1305,6 +1543,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
|
|||
genpd->resume_count = 0;
|
||||
genpd->device_count = 0;
|
||||
genpd->suspended_count = 0;
|
||||
genpd->max_off_time_ns = -1;
|
||||
genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
|
||||
genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
|
||||
genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
|
||||
|
@ -1317,11 +1556,21 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
|
|||
genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
|
||||
genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
|
||||
genpd->domain.ops.thaw = pm_genpd_thaw;
|
||||
genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
|
||||
genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
|
||||
genpd->domain.ops.poweroff = pm_genpd_suspend;
|
||||
genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
|
||||
genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
|
||||
genpd->domain.ops.restore = pm_genpd_restore;
|
||||
genpd->domain.ops.restore = pm_genpd_resume;
|
||||
genpd->domain.ops.complete = pm_genpd_complete;
|
||||
genpd->dev_ops.save_state = pm_genpd_default_save_state;
|
||||
genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
|
||||
genpd->dev_ops.suspend = pm_genpd_default_suspend;
|
||||
genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
|
||||
genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
|
||||
genpd->dev_ops.resume = pm_genpd_default_resume;
|
||||
genpd->dev_ops.freeze = pm_genpd_default_freeze;
|
||||
genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
|
||||
genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
|
||||
genpd->dev_ops.thaw = pm_genpd_default_thaw;
|
||||
mutex_lock(&gpd_list_lock);
|
||||
list_add(&genpd->gpd_list_node, &gpd_list);
|
||||
mutex_unlock(&gpd_list_lock);
|
||||
|
|
156
drivers/base/power/domain_governor.c
Normal file
156
drivers/base/power/domain_governor.c
Normal file
|
@ -0,0 +1,156 @@
|
|||
/*
|
||||
* drivers/base/power/domain_governor.c - Governors for device PM domains.
|
||||
*
|
||||
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
|
||||
*
|
||||
* This file is released under the GPLv2.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/hrtimer.h>
|
||||
|
||||
/**
|
||||
* default_stop_ok - Default PM domain governor routine for stopping devices.
|
||||
* @dev: Device to check.
|
||||
*/
|
||||
bool default_stop_ok(struct device *dev)
|
||||
{
|
||||
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
|
||||
|
||||
dev_dbg(dev, "%s()\n", __func__);
|
||||
|
||||
if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0)
|
||||
return true;
|
||||
|
||||
return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns
|
||||
&& td->break_even_ns < dev->power.max_time_suspended_ns;
|
||||
}
|
||||
|
||||
/**
|
||||
* default_power_down_ok - Default generic PM domain power off governor routine.
|
||||
* @pd: PM domain to check.
|
||||
*
|
||||
* This routine must be executed under the PM domain's lock.
|
||||
*/
|
||||
static bool default_power_down_ok(struct dev_pm_domain *pd)
|
||||
{
|
||||
struct generic_pm_domain *genpd = pd_to_genpd(pd);
|
||||
struct gpd_link *link;
|
||||
struct pm_domain_data *pdd;
|
||||
s64 min_dev_off_time_ns;
|
||||
s64 off_on_time_ns;
|
||||
ktime_t time_now = ktime_get();
|
||||
|
||||
off_on_time_ns = genpd->power_off_latency_ns +
|
||||
genpd->power_on_latency_ns;
|
||||
/*
|
||||
* It doesn't make sense to remove power from the domain if saving
|
||||
* the state of all devices in it and the power off/power on operations
|
||||
* take too much time.
|
||||
*
|
||||
* All devices in this domain have been stopped already at this point.
|
||||
*/
|
||||
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
|
||||
if (pdd->dev->driver)
|
||||
off_on_time_ns +=
|
||||
to_gpd_data(pdd)->td.save_state_latency_ns;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if subdomains can be off for enough time.
|
||||
*
|
||||
* All subdomains have been powered off already at this point.
|
||||
*/
|
||||
list_for_each_entry(link, &genpd->master_links, master_node) {
|
||||
struct generic_pm_domain *sd = link->slave;
|
||||
s64 sd_max_off_ns = sd->max_off_time_ns;
|
||||
|
||||
if (sd_max_off_ns < 0)
|
||||
continue;
|
||||
|
||||
sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now,
|
||||
sd->power_off_time));
|
||||
/*
|
||||
* Check if the subdomain is allowed to be off long enough for
|
||||
* the current domain to turn off and on (that's how much time
|
||||
* it will have to wait worst case).
|
||||
*/
|
||||
if (sd_max_off_ns <= off_on_time_ns)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the devices in the domain can be off enough time.
|
||||
*/
|
||||
min_dev_off_time_ns = -1;
|
||||
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
|
||||
struct gpd_timing_data *td;
|
||||
struct device *dev = pdd->dev;
|
||||
s64 dev_off_time_ns;
|
||||
|
||||
if (!dev->driver || dev->power.max_time_suspended_ns < 0)
|
||||
continue;
|
||||
|
||||
td = &to_gpd_data(pdd)->td;
|
||||
dev_off_time_ns = dev->power.max_time_suspended_ns -
|
||||
(td->start_latency_ns + td->restore_state_latency_ns +
|
||||
ktime_to_ns(ktime_sub(time_now,
|
||||
dev->power.suspend_time)));
|
||||
if (dev_off_time_ns <= off_on_time_ns)
|
||||
return false;
|
||||
|
||||
if (min_dev_off_time_ns > dev_off_time_ns
|
||||
|| min_dev_off_time_ns < 0)
|
||||
min_dev_off_time_ns = dev_off_time_ns;
|
||||
}
|
||||
|
||||
if (min_dev_off_time_ns < 0) {
|
||||
/*
|
||||
* There are no latency constraints, so the domain can spend
|
||||
* arbitrary time in the "off" state.
|
||||
*/
|
||||
genpd->max_off_time_ns = -1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* The difference between the computed minimum delta and the time needed
|
||||
* to turn the domain on is the maximum theoretical time this domain can
|
||||
* spend in the "off" state.
|
||||
*/
|
||||
min_dev_off_time_ns -= genpd->power_on_latency_ns;
|
||||
|
||||
/*
|
||||
* If the difference between the computed minimum delta and the time
|
||||
* needed to turn the domain off and back on on is smaller than the
|
||||
* domain's power break even time, removing power from the domain is not
|
||||
* worth it.
|
||||
*/
|
||||
if (genpd->break_even_ns >
|
||||
min_dev_off_time_ns - genpd->power_off_latency_ns)
|
||||
return false;
|
||||
|
||||
genpd->max_off_time_ns = min_dev_off_time_ns;
|
||||
return true;
|
||||
}
|
||||
|
||||
struct dev_power_governor simple_qos_governor = {
|
||||
.stop_ok = default_stop_ok,
|
||||
.power_down_ok = default_power_down_ok,
|
||||
};
|
||||
|
||||
static bool always_on_power_down_ok(struct dev_pm_domain *domain)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_gov_always_on - A governor implementing an always-on policy
|
||||
*/
|
||||
struct dev_power_governor pm_domain_always_on_gov = {
|
||||
.power_down_ok = always_on_power_down_ok,
|
||||
.stop_ok = default_stop_ok,
|
||||
};
|
|
@ -97,16 +97,16 @@ int pm_generic_prepare(struct device *dev)
|
|||
* @event: PM transition of the system under way.
|
||||
* @bool: Whether or not this is the "noirq" stage.
|
||||
*
|
||||
* If the device has not been suspended at run time, execute the
|
||||
* suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
|
||||
* return its error code. Otherwise, return zero.
|
||||
* Execute the PM callback corresponding to @event provided by the driver of
|
||||
* @dev, if defined, and return its error code. Return 0 if the callback is
|
||||
* not present.
|
||||
*/
|
||||
static int __pm_generic_call(struct device *dev, int event, bool noirq)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
int (*callback)(struct device *);
|
||||
|
||||
if (!pm || pm_runtime_suspended(dev))
|
||||
if (!pm)
|
||||
return 0;
|
||||
|
||||
switch (event) {
|
||||
|
@ -119,9 +119,15 @@ static int __pm_generic_call(struct device *dev, int event, bool noirq)
|
|||
case PM_EVENT_HIBERNATE:
|
||||
callback = noirq ? pm->poweroff_noirq : pm->poweroff;
|
||||
break;
|
||||
case PM_EVENT_RESUME:
|
||||
callback = noirq ? pm->resume_noirq : pm->resume;
|
||||
break;
|
||||
case PM_EVENT_THAW:
|
||||
callback = noirq ? pm->thaw_noirq : pm->thaw;
|
||||
break;
|
||||
case PM_EVENT_RESTORE:
|
||||
callback = noirq ? pm->restore_noirq : pm->restore;
|
||||
break;
|
||||
default:
|
||||
callback = NULL;
|
||||
break;
|
||||
|
@ -210,57 +216,13 @@ int pm_generic_thaw(struct device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_thaw);
|
||||
|
||||
/**
|
||||
* __pm_generic_resume - Generic resume/restore callback for subsystems.
|
||||
* @dev: Device to handle.
|
||||
* @event: PM transition of the system under way.
|
||||
* @bool: Whether or not this is the "noirq" stage.
|
||||
*
|
||||
* Execute the resume/resotre callback provided by the @dev's driver, if
|
||||
* defined. If it returns 0, change the device's runtime PM status to 'active'.
|
||||
* Return the callback's error code.
|
||||
*/
|
||||
static int __pm_generic_resume(struct device *dev, int event, bool noirq)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
int (*callback)(struct device *);
|
||||
int ret;
|
||||
|
||||
if (!pm)
|
||||
return 0;
|
||||
|
||||
switch (event) {
|
||||
case PM_EVENT_RESUME:
|
||||
callback = noirq ? pm->resume_noirq : pm->resume;
|
||||
break;
|
||||
case PM_EVENT_RESTORE:
|
||||
callback = noirq ? pm->restore_noirq : pm->restore;
|
||||
break;
|
||||
default:
|
||||
callback = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!callback)
|
||||
return 0;
|
||||
|
||||
ret = callback(dev);
|
||||
if (!ret && !noirq && pm_runtime_enabled(dev)) {
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_set_active(dev);
|
||||
pm_runtime_enable(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
|
||||
* @dev: Device to resume.
|
||||
*/
|
||||
int pm_generic_resume_noirq(struct device *dev)
|
||||
{
|
||||
return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
|
||||
return __pm_generic_call(dev, PM_EVENT_RESUME, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
|
||||
|
||||
|
@ -270,7 +232,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
|
|||
*/
|
||||
int pm_generic_resume(struct device *dev)
|
||||
{
|
||||
return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
|
||||
return __pm_generic_call(dev, PM_EVENT_RESUME, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_resume);
|
||||
|
||||
|
@ -280,7 +242,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
|
|||
*/
|
||||
int pm_generic_restore_noirq(struct device *dev)
|
||||
{
|
||||
return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
|
||||
return __pm_generic_call(dev, PM_EVENT_RESTORE, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
|
||||
|
||||
|
@ -290,7 +252,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
|
|||
*/
|
||||
int pm_generic_restore(struct device *dev)
|
||||
{
|
||||
return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
|
||||
return __pm_generic_call(dev, PM_EVENT_RESTORE, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_restore);
|
||||
|
||||
|
@ -314,28 +276,3 @@ void pm_generic_complete(struct device *dev)
|
|||
pm_runtime_idle(dev);
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
struct dev_pm_ops generic_subsys_pm_ops = {
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.prepare = pm_generic_prepare,
|
||||
.suspend = pm_generic_suspend,
|
||||
.suspend_noirq = pm_generic_suspend_noirq,
|
||||
.resume = pm_generic_resume,
|
||||
.resume_noirq = pm_generic_resume_noirq,
|
||||
.freeze = pm_generic_freeze,
|
||||
.freeze_noirq = pm_generic_freeze_noirq,
|
||||
.thaw = pm_generic_thaw,
|
||||
.thaw_noirq = pm_generic_thaw_noirq,
|
||||
.poweroff = pm_generic_poweroff,
|
||||
.poweroff_noirq = pm_generic_poweroff_noirq,
|
||||
.restore = pm_generic_restore,
|
||||
.restore_noirq = pm_generic_restore_noirq,
|
||||
.complete = pm_generic_complete,
|
||||
#endif
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
.runtime_suspend = pm_generic_runtime_suspend,
|
||||
.runtime_resume = pm_generic_runtime_resume,
|
||||
.runtime_idle = pm_generic_runtime_idle,
|
||||
#endif
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(generic_subsys_pm_ops);
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
#include "../base.h"
|
||||
#include "power.h"
|
||||
|
||||
typedef int (*pm_callback_t)(struct device *);
|
||||
|
||||
/*
|
||||
* The entries in the dpm_list list are in a depth first order, simply
|
||||
* because children are guaranteed to be discovered after parents, and
|
||||
|
@ -164,8 +166,9 @@ static ktime_t initcall_debug_start(struct device *dev)
|
|||
ktime_t calltime = ktime_set(0, 0);
|
||||
|
||||
if (initcall_debug) {
|
||||
pr_info("calling %s+ @ %i\n",
|
||||
dev_name(dev), task_pid_nr(current));
|
||||
pr_info("calling %s+ @ %i, parent: %s\n",
|
||||
dev_name(dev), task_pid_nr(current),
|
||||
dev->parent ? dev_name(dev->parent) : "none");
|
||||
calltime = ktime_get();
|
||||
}
|
||||
|
||||
|
@ -211,151 +214,69 @@ static void dpm_wait_for_children(struct device *dev, bool async)
|
|||
}
|
||||
|
||||
/**
|
||||
* pm_op - Execute the PM operation appropriate for given PM event.
|
||||
* @dev: Device to handle.
|
||||
* pm_op - Return the PM operation appropriate for given PM event.
|
||||
* @ops: PM operations to choose from.
|
||||
* @state: PM transition of the system being carried out.
|
||||
*/
|
||||
static int pm_op(struct device *dev,
|
||||
const struct dev_pm_ops *ops,
|
||||
pm_message_t state)
|
||||
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
|
||||
{
|
||||
int error = 0;
|
||||
ktime_t calltime;
|
||||
|
||||
calltime = initcall_debug_start(dev);
|
||||
|
||||
switch (state.event) {
|
||||
#ifdef CONFIG_SUSPEND
|
||||
case PM_EVENT_SUSPEND:
|
||||
if (ops->suspend) {
|
||||
error = ops->suspend(dev);
|
||||
suspend_report_result(ops->suspend, error);
|
||||
}
|
||||
break;
|
||||
return ops->suspend;
|
||||
case PM_EVENT_RESUME:
|
||||
if (ops->resume) {
|
||||
error = ops->resume(dev);
|
||||
suspend_report_result(ops->resume, error);
|
||||
}
|
||||
break;
|
||||
return ops->resume;
|
||||
#endif /* CONFIG_SUSPEND */
|
||||
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
||||
case PM_EVENT_FREEZE:
|
||||
case PM_EVENT_QUIESCE:
|
||||
if (ops->freeze) {
|
||||
error = ops->freeze(dev);
|
||||
suspend_report_result(ops->freeze, error);
|
||||
}
|
||||
break;
|
||||
return ops->freeze;
|
||||
case PM_EVENT_HIBERNATE:
|
||||
if (ops->poweroff) {
|
||||
error = ops->poweroff(dev);
|
||||
suspend_report_result(ops->poweroff, error);
|
||||
}
|
||||
break;
|
||||
return ops->poweroff;
|
||||
case PM_EVENT_THAW:
|
||||
case PM_EVENT_RECOVER:
|
||||
if (ops->thaw) {
|
||||
error = ops->thaw(dev);
|
||||
suspend_report_result(ops->thaw, error);
|
||||
}
|
||||
return ops->thaw;
|
||||
break;
|
||||
case PM_EVENT_RESTORE:
|
||||
if (ops->restore) {
|
||||
error = ops->restore(dev);
|
||||
suspend_report_result(ops->restore, error);
|
||||
}
|
||||
break;
|
||||
return ops->restore;
|
||||
#endif /* CONFIG_HIBERNATE_CALLBACKS */
|
||||
default:
|
||||
error = -EINVAL;
|
||||
}
|
||||
|
||||
initcall_debug_report(dev, calltime, error);
|
||||
|
||||
return error;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_noirq_op - Execute the PM operation appropriate for given PM event.
|
||||
* @dev: Device to handle.
|
||||
* pm_noirq_op - Return the PM operation appropriate for given PM event.
|
||||
* @ops: PM operations to choose from.
|
||||
* @state: PM transition of the system being carried out.
|
||||
*
|
||||
* The driver of @dev will not receive interrupts while this function is being
|
||||
* executed.
|
||||
*/
|
||||
static int pm_noirq_op(struct device *dev,
|
||||
const struct dev_pm_ops *ops,
|
||||
pm_message_t state)
|
||||
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
|
||||
{
|
||||
int error = 0;
|
||||
ktime_t calltime = ktime_set(0, 0), delta, rettime;
|
||||
|
||||
if (initcall_debug) {
|
||||
pr_info("calling %s+ @ %i, parent: %s\n",
|
||||
dev_name(dev), task_pid_nr(current),
|
||||
dev->parent ? dev_name(dev->parent) : "none");
|
||||
calltime = ktime_get();
|
||||
}
|
||||
|
||||
switch (state.event) {
|
||||
#ifdef CONFIG_SUSPEND
|
||||
case PM_EVENT_SUSPEND:
|
||||
if (ops->suspend_noirq) {
|
||||
error = ops->suspend_noirq(dev);
|
||||
suspend_report_result(ops->suspend_noirq, error);
|
||||
}
|
||||
break;
|
||||
return ops->suspend_noirq;
|
||||
case PM_EVENT_RESUME:
|
||||
if (ops->resume_noirq) {
|
||||
error = ops->resume_noirq(dev);
|
||||
suspend_report_result(ops->resume_noirq, error);
|
||||
}
|
||||
break;
|
||||
return ops->resume_noirq;
|
||||
#endif /* CONFIG_SUSPEND */
|
||||
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
||||
case PM_EVENT_FREEZE:
|
||||
case PM_EVENT_QUIESCE:
|
||||
if (ops->freeze_noirq) {
|
||||
error = ops->freeze_noirq(dev);
|
||||
suspend_report_result(ops->freeze_noirq, error);
|
||||
}
|
||||
break;
|
||||
return ops->freeze_noirq;
|
||||
case PM_EVENT_HIBERNATE:
|
||||
if (ops->poweroff_noirq) {
|
||||
error = ops->poweroff_noirq(dev);
|
||||
suspend_report_result(ops->poweroff_noirq, error);
|
||||
}
|
||||
break;
|
||||
return ops->poweroff_noirq;
|
||||
case PM_EVENT_THAW:
|
||||
case PM_EVENT_RECOVER:
|
||||
if (ops->thaw_noirq) {
|
||||
error = ops->thaw_noirq(dev);
|
||||
suspend_report_result(ops->thaw_noirq, error);
|
||||
}
|
||||
break;
|
||||
return ops->thaw_noirq;
|
||||
case PM_EVENT_RESTORE:
|
||||
if (ops->restore_noirq) {
|
||||
error = ops->restore_noirq(dev);
|
||||
suspend_report_result(ops->restore_noirq, error);
|
||||
}
|
||||
break;
|
||||
return ops->restore_noirq;
|
||||
#endif /* CONFIG_HIBERNATE_CALLBACKS */
|
||||
default:
|
||||
error = -EINVAL;
|
||||
}
|
||||
|
||||
if (initcall_debug) {
|
||||
rettime = ktime_get();
|
||||
delta = ktime_sub(rettime, calltime);
|
||||
printk("initcall %s_i+ returned %d after %Ld usecs\n",
|
||||
dev_name(dev), error,
|
||||
(unsigned long long)ktime_to_ns(delta) >> 10);
|
||||
}
|
||||
|
||||
return error;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static char *pm_verb(int event)
|
||||
|
@ -413,6 +334,26 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
|
|||
usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
|
||||
}
|
||||
|
||||
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
|
||||
pm_message_t state, char *info)
|
||||
{
|
||||
ktime_t calltime;
|
||||
int error;
|
||||
|
||||
if (!cb)
|
||||
return 0;
|
||||
|
||||
calltime = initcall_debug_start(dev);
|
||||
|
||||
pm_dev_dbg(dev, state, info);
|
||||
error = cb(dev);
|
||||
suspend_report_result(cb, error);
|
||||
|
||||
initcall_debug_report(dev, calltime, error);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/*------------------------- Resume routines -------------------------*/
|
||||
|
||||
/**
|
||||
|
@ -425,25 +366,34 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
|
|||
*/
|
||||
static int device_resume_noirq(struct device *dev, pm_message_t state)
|
||||
{
|
||||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
int error = 0;
|
||||
|
||||
TRACE_DEVICE(dev);
|
||||
TRACE_RESUME(0);
|
||||
|
||||
if (dev->pm_domain) {
|
||||
pm_dev_dbg(dev, state, "EARLY power domain ");
|
||||
error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
|
||||
info = "EARLY power domain ";
|
||||
callback = pm_noirq_op(&dev->pm_domain->ops, state);
|
||||
} else if (dev->type && dev->type->pm) {
|
||||
pm_dev_dbg(dev, state, "EARLY type ");
|
||||
error = pm_noirq_op(dev, dev->type->pm, state);
|
||||
info = "EARLY type ";
|
||||
callback = pm_noirq_op(dev->type->pm, state);
|
||||
} else if (dev->class && dev->class->pm) {
|
||||
pm_dev_dbg(dev, state, "EARLY class ");
|
||||
error = pm_noirq_op(dev, dev->class->pm, state);
|
||||
info = "EARLY class ";
|
||||
callback = pm_noirq_op(dev->class->pm, state);
|
||||
} else if (dev->bus && dev->bus->pm) {
|
||||
pm_dev_dbg(dev, state, "EARLY ");
|
||||
error = pm_noirq_op(dev, dev->bus->pm, state);
|
||||
info = "EARLY bus ";
|
||||
callback = pm_noirq_op(dev->bus->pm, state);
|
||||
}
|
||||
|
||||
if (!callback && dev->driver && dev->driver->pm) {
|
||||
info = "EARLY driver ";
|
||||
callback = pm_noirq_op(dev->driver->pm, state);
|
||||
}
|
||||
|
||||
error = dpm_run_callback(callback, dev, state, info);
|
||||
|
||||
TRACE_RESUME(error);
|
||||
return error;
|
||||
}
|
||||
|
@ -485,26 +435,6 @@ void dpm_resume_noirq(pm_message_t state)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
|
||||
|
||||
/**
|
||||
* legacy_resume - Execute a legacy (bus or class) resume callback for device.
|
||||
* @dev: Device to resume.
|
||||
* @cb: Resume callback to execute.
|
||||
*/
|
||||
static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
|
||||
{
|
||||
int error;
|
||||
ktime_t calltime;
|
||||
|
||||
calltime = initcall_debug_start(dev);
|
||||
|
||||
error = cb(dev);
|
||||
suspend_report_result(cb, error);
|
||||
|
||||
initcall_debug_report(dev, calltime, error);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* device_resume - Execute "resume" callbacks for given device.
|
||||
* @dev: Device to handle.
|
||||
|
@ -513,6 +443,8 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
|
|||
*/
|
||||
static int device_resume(struct device *dev, pm_message_t state, bool async)
|
||||
{
|
||||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
int error = 0;
|
||||
bool put = false;
|
||||
|
||||
|
@ -535,40 +467,48 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
|
|||
put = true;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
pm_dev_dbg(dev, state, "power domain ");
|
||||
error = pm_op(dev, &dev->pm_domain->ops, state);
|
||||
goto End;
|
||||
info = "power domain ";
|
||||
callback = pm_op(&dev->pm_domain->ops, state);
|
||||
goto Driver;
|
||||
}
|
||||
|
||||
if (dev->type && dev->type->pm) {
|
||||
pm_dev_dbg(dev, state, "type ");
|
||||
error = pm_op(dev, dev->type->pm, state);
|
||||
goto End;
|
||||
info = "type ";
|
||||
callback = pm_op(dev->type->pm, state);
|
||||
goto Driver;
|
||||
}
|
||||
|
||||
if (dev->class) {
|
||||
if (dev->class->pm) {
|
||||
pm_dev_dbg(dev, state, "class ");
|
||||
error = pm_op(dev, dev->class->pm, state);
|
||||
goto End;
|
||||
info = "class ";
|
||||
callback = pm_op(dev->class->pm, state);
|
||||
goto Driver;
|
||||
} else if (dev->class->resume) {
|
||||
pm_dev_dbg(dev, state, "legacy class ");
|
||||
error = legacy_resume(dev, dev->class->resume);
|
||||
info = "legacy class ";
|
||||
callback = dev->class->resume;
|
||||
goto End;
|
||||
}
|
||||
}
|
||||
|
||||
if (dev->bus) {
|
||||
if (dev->bus->pm) {
|
||||
pm_dev_dbg(dev, state, "");
|
||||
error = pm_op(dev, dev->bus->pm, state);
|
||||
info = "bus ";
|
||||
callback = pm_op(dev->bus->pm, state);
|
||||
} else if (dev->bus->resume) {
|
||||
pm_dev_dbg(dev, state, "legacy ");
|
||||
error = legacy_resume(dev, dev->bus->resume);
|
||||
info = "legacy bus ";
|
||||
callback = dev->bus->resume;
|
||||
goto End;
|
||||
}
|
||||
}
|
||||
|
||||
Driver:
|
||||
if (!callback && dev->driver && dev->driver->pm) {
|
||||
info = "driver ";
|
||||
callback = pm_op(dev->driver->pm, state);
|
||||
}
|
||||
|
||||
End:
|
||||
error = dpm_run_callback(callback, dev, state, info);
|
||||
dev->power.is_suspended = false;
|
||||
|
||||
Unlock:
|
||||
|
@ -660,24 +600,33 @@ void dpm_resume(pm_message_t state)
|
|||
*/
|
||||
static void device_complete(struct device *dev, pm_message_t state)
|
||||
{
|
||||
void (*callback)(struct device *) = NULL;
|
||||
char *info = NULL;
|
||||
|
||||
device_lock(dev);
|
||||
|
||||
if (dev->pm_domain) {
|
||||
pm_dev_dbg(dev, state, "completing power domain ");
|
||||
if (dev->pm_domain->ops.complete)
|
||||
dev->pm_domain->ops.complete(dev);
|
||||
info = "completing power domain ";
|
||||
callback = dev->pm_domain->ops.complete;
|
||||
} else if (dev->type && dev->type->pm) {
|
||||
pm_dev_dbg(dev, state, "completing type ");
|
||||
if (dev->type->pm->complete)
|
||||
dev->type->pm->complete(dev);
|
||||
info = "completing type ";
|
||||
callback = dev->type->pm->complete;
|
||||
} else if (dev->class && dev->class->pm) {
|
||||
pm_dev_dbg(dev, state, "completing class ");
|
||||
if (dev->class->pm->complete)
|
||||
dev->class->pm->complete(dev);
|
||||
info = "completing class ";
|
||||
callback = dev->class->pm->complete;
|
||||
} else if (dev->bus && dev->bus->pm) {
|
||||
pm_dev_dbg(dev, state, "completing ");
|
||||
if (dev->bus->pm->complete)
|
||||
dev->bus->pm->complete(dev);
|
||||
info = "completing bus ";
|
||||
callback = dev->bus->pm->complete;
|
||||
}
|
||||
|
||||
if (!callback && dev->driver && dev->driver->pm) {
|
||||
info = "completing driver ";
|
||||
callback = dev->driver->pm->complete;
|
||||
}
|
||||
|
||||
if (callback) {
|
||||
pm_dev_dbg(dev, state, info);
|
||||
callback(dev);
|
||||
}
|
||||
|
||||
device_unlock(dev);
|
||||
|
@ -763,31 +712,29 @@ static pm_message_t resume_event(pm_message_t sleep_state)
|
|||
*/
|
||||
static int device_suspend_noirq(struct device *dev, pm_message_t state)
|
||||
{
|
||||
int error;
|
||||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
pm_dev_dbg(dev, state, "LATE power domain ");
|
||||
error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
|
||||
if (error)
|
||||
return error;
|
||||
info = "LATE power domain ";
|
||||
callback = pm_noirq_op(&dev->pm_domain->ops, state);
|
||||
} else if (dev->type && dev->type->pm) {
|
||||
pm_dev_dbg(dev, state, "LATE type ");
|
||||
error = pm_noirq_op(dev, dev->type->pm, state);
|
||||
if (error)
|
||||
return error;
|
||||
info = "LATE type ";
|
||||
callback = pm_noirq_op(dev->type->pm, state);
|
||||
} else if (dev->class && dev->class->pm) {
|
||||
pm_dev_dbg(dev, state, "LATE class ");
|
||||
error = pm_noirq_op(dev, dev->class->pm, state);
|
||||
if (error)
|
||||
return error;
|
||||
info = "LATE class ";
|
||||
callback = pm_noirq_op(dev->class->pm, state);
|
||||
} else if (dev->bus && dev->bus->pm) {
|
||||
pm_dev_dbg(dev, state, "LATE ");
|
||||
error = pm_noirq_op(dev, dev->bus->pm, state);
|
||||
if (error)
|
||||
return error;
|
||||
info = "LATE bus ";
|
||||
callback = pm_noirq_op(dev->bus->pm, state);
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (!callback && dev->driver && dev->driver->pm) {
|
||||
info = "LATE driver ";
|
||||
callback = pm_noirq_op(dev->driver->pm, state);
|
||||
}
|
||||
|
||||
return dpm_run_callback(callback, dev, state, info);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -864,6 +811,8 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
|
|||
*/
|
||||
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||
{
|
||||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
int error = 0;
|
||||
|
||||
dpm_wait_for_children(dev, async);
|
||||
|
@ -884,22 +833,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
|||
device_lock(dev);
|
||||
|
||||
if (dev->pm_domain) {
|
||||
pm_dev_dbg(dev, state, "power domain ");
|
||||
error = pm_op(dev, &dev->pm_domain->ops, state);
|
||||
goto End;
|
||||
info = "power domain ";
|
||||
callback = pm_op(&dev->pm_domain->ops, state);
|
||||
goto Run;
|
||||
}
|
||||
|
||||
if (dev->type && dev->type->pm) {
|
||||
pm_dev_dbg(dev, state, "type ");
|
||||
error = pm_op(dev, dev->type->pm, state);
|
||||
goto End;
|
||||
info = "type ";
|
||||
callback = pm_op(dev->type->pm, state);
|
||||
goto Run;
|
||||
}
|
||||
|
||||
if (dev->class) {
|
||||
if (dev->class->pm) {
|
||||
pm_dev_dbg(dev, state, "class ");
|
||||
error = pm_op(dev, dev->class->pm, state);
|
||||
goto End;
|
||||
info = "class ";
|
||||
callback = pm_op(dev->class->pm, state);
|
||||
goto Run;
|
||||
} else if (dev->class->suspend) {
|
||||
pm_dev_dbg(dev, state, "legacy class ");
|
||||
error = legacy_suspend(dev, state, dev->class->suspend);
|
||||
|
@ -909,14 +858,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
|||
|
||||
if (dev->bus) {
|
||||
if (dev->bus->pm) {
|
||||
pm_dev_dbg(dev, state, "");
|
||||
error = pm_op(dev, dev->bus->pm, state);
|
||||
info = "bus ";
|
||||
callback = pm_op(dev->bus->pm, state);
|
||||
} else if (dev->bus->suspend) {
|
||||
pm_dev_dbg(dev, state, "legacy ");
|
||||
pm_dev_dbg(dev, state, "legacy bus ");
|
||||
error = legacy_suspend(dev, state, dev->bus->suspend);
|
||||
goto End;
|
||||
}
|
||||
}
|
||||
|
||||
Run:
|
||||
if (!callback && dev->driver && dev->driver->pm) {
|
||||
info = "driver ";
|
||||
callback = pm_op(dev->driver->pm, state);
|
||||
}
|
||||
|
||||
error = dpm_run_callback(callback, dev, state, info);
|
||||
|
||||
End:
|
||||
if (!error) {
|
||||
dev->power.is_suspended = true;
|
||||
|
@ -1022,6 +980,8 @@ int dpm_suspend(pm_message_t state)
|
|||
*/
|
||||
static int device_prepare(struct device *dev, pm_message_t state)
|
||||
{
|
||||
int (*callback)(struct device *) = NULL;
|
||||
char *info = NULL;
|
||||
int error = 0;
|
||||
|
||||
device_lock(dev);
|
||||
|
@ -1029,34 +989,29 @@ static int device_prepare(struct device *dev, pm_message_t state)
|
|||
dev->power.wakeup_path = device_may_wakeup(dev);
|
||||
|
||||
if (dev->pm_domain) {
|
||||
pm_dev_dbg(dev, state, "preparing power domain ");
|
||||
if (dev->pm_domain->ops.prepare)
|
||||
error = dev->pm_domain->ops.prepare(dev);
|
||||
suspend_report_result(dev->pm_domain->ops.prepare, error);
|
||||
if (error)
|
||||
goto End;
|
||||
info = "preparing power domain ";
|
||||
callback = dev->pm_domain->ops.prepare;
|
||||
} else if (dev->type && dev->type->pm) {
|
||||
pm_dev_dbg(dev, state, "preparing type ");
|
||||
if (dev->type->pm->prepare)
|
||||
error = dev->type->pm->prepare(dev);
|
||||
suspend_report_result(dev->type->pm->prepare, error);
|
||||
if (error)
|
||||
goto End;
|
||||
info = "preparing type ";
|
||||
callback = dev->type->pm->prepare;
|
||||
} else if (dev->class && dev->class->pm) {
|
||||
pm_dev_dbg(dev, state, "preparing class ");
|
||||
if (dev->class->pm->prepare)
|
||||
error = dev->class->pm->prepare(dev);
|
||||
suspend_report_result(dev->class->pm->prepare, error);
|
||||
if (error)
|
||||
goto End;
|
||||
info = "preparing class ";
|
||||
callback = dev->class->pm->prepare;
|
||||
} else if (dev->bus && dev->bus->pm) {
|
||||
pm_dev_dbg(dev, state, "preparing ");
|
||||
if (dev->bus->pm->prepare)
|
||||
error = dev->bus->pm->prepare(dev);
|
||||
suspend_report_result(dev->bus->pm->prepare, error);
|
||||
info = "preparing bus ";
|
||||
callback = dev->bus->pm->prepare;
|
||||
}
|
||||
|
||||
if (!callback && dev->driver && dev->driver->pm) {
|
||||
info = "preparing driver ";
|
||||
callback = dev->driver->pm->prepare;
|
||||
}
|
||||
|
||||
if (callback) {
|
||||
error = callback(dev);
|
||||
suspend_report_result(callback, error);
|
||||
}
|
||||
|
||||
End:
|
||||
device_unlock(dev);
|
||||
|
||||
return error;
|
||||
|
|
|
@ -47,21 +47,29 @@ static DEFINE_MUTEX(dev_pm_qos_mtx);
|
|||
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_read_value - Get PM QoS constraint for a given device.
|
||||
* __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
|
||||
* @dev: Device to get the PM QoS constraint value for.
|
||||
*
|
||||
* This routine must be called with dev->power.lock held.
|
||||
*/
|
||||
s32 __dev_pm_qos_read_value(struct device *dev)
|
||||
{
|
||||
struct pm_qos_constraints *c = dev->power.constraints;
|
||||
|
||||
return c ? pm_qos_read_value(c) : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
|
||||
* @dev: Device to get the PM QoS constraint value for.
|
||||
*/
|
||||
s32 dev_pm_qos_read_value(struct device *dev)
|
||||
{
|
||||
struct pm_qos_constraints *c;
|
||||
unsigned long flags;
|
||||
s32 ret = 0;
|
||||
s32 ret;
|
||||
|
||||
spin_lock_irqsave(&dev->power.lock, flags);
|
||||
|
||||
c = dev->power.constraints;
|
||||
if (c)
|
||||
ret = pm_qos_read_value(c);
|
||||
|
||||
ret = __dev_pm_qos_read_value(dev);
|
||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||
|
||||
return ret;
|
||||
|
@ -412,3 +420,28 @@ int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
|
|||
return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
|
||||
* @dev: Device whose ancestor to add the request for.
|
||||
* @req: Pointer to the preallocated handle.
|
||||
* @value: Constraint latency value.
|
||||
*/
|
||||
int dev_pm_qos_add_ancestor_request(struct device *dev,
|
||||
struct dev_pm_qos_request *req, s32 value)
|
||||
{
|
||||
struct device *ancestor = dev->parent;
|
||||
int error = -ENODEV;
|
||||
|
||||
while (ancestor && !ancestor->power.ignore_children)
|
||||
ancestor = ancestor->parent;
|
||||
|
||||
if (ancestor)
|
||||
error = dev_pm_qos_add_request(ancestor, req, value);
|
||||
|
||||
if (error)
|
||||
req->dev = NULL;
|
||||
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
|
||||
|
|
|
@ -250,6 +250,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
|
|||
else
|
||||
callback = NULL;
|
||||
|
||||
if (!callback && dev->driver && dev->driver->pm)
|
||||
callback = dev->driver->pm->runtime_idle;
|
||||
|
||||
if (callback)
|
||||
__rpm_callback(callback, dev);
|
||||
|
||||
|
@ -279,6 +282,47 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
|
|||
return retval != -EACCES ? retval : -EIO;
|
||||
}
|
||||
|
||||
struct rpm_qos_data {
|
||||
ktime_t time_now;
|
||||
s64 constraint_ns;
|
||||
};
|
||||
|
||||
/**
|
||||
* rpm_update_qos_constraint - Update a given PM QoS constraint data.
|
||||
* @dev: Device whose timing data to use.
|
||||
* @data: PM QoS constraint data to update.
|
||||
*
|
||||
* Use the suspend timing data of @dev to update PM QoS constraint data pointed
|
||||
* to by @data.
|
||||
*/
|
||||
static int rpm_update_qos_constraint(struct device *dev, void *data)
|
||||
{
|
||||
struct rpm_qos_data *qos = data;
|
||||
unsigned long flags;
|
||||
s64 delta_ns;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&dev->power.lock, flags);
|
||||
|
||||
if (dev->power.max_time_suspended_ns < 0)
|
||||
goto out;
|
||||
|
||||
delta_ns = dev->power.max_time_suspended_ns -
|
||||
ktime_to_ns(ktime_sub(qos->time_now, dev->power.suspend_time));
|
||||
if (delta_ns <= 0) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (qos->constraint_ns > delta_ns || qos->constraint_ns == 0)
|
||||
qos->constraint_ns = delta_ns;
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* rpm_suspend - Carry out runtime suspend of given device.
|
||||
* @dev: Device to suspend.
|
||||
|
@ -305,6 +349,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
|||
{
|
||||
int (*callback)(struct device *);
|
||||
struct device *parent = NULL;
|
||||
struct rpm_qos_data qos;
|
||||
int retval;
|
||||
|
||||
trace_rpm_suspend(dev, rpmflags);
|
||||
|
@ -400,8 +445,38 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
|||
goto out;
|
||||
}
|
||||
|
||||
qos.constraint_ns = __dev_pm_qos_read_value(dev);
|
||||
if (qos.constraint_ns < 0) {
|
||||
/* Negative constraint means "never suspend". */
|
||||
retval = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
qos.constraint_ns *= NSEC_PER_USEC;
|
||||
qos.time_now = ktime_get();
|
||||
|
||||
__update_runtime_status(dev, RPM_SUSPENDING);
|
||||
|
||||
if (!dev->power.ignore_children) {
|
||||
if (dev->power.irq_safe)
|
||||
spin_unlock(&dev->power.lock);
|
||||
else
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
|
||||
retval = device_for_each_child(dev, &qos,
|
||||
rpm_update_qos_constraint);
|
||||
|
||||
if (dev->power.irq_safe)
|
||||
spin_lock(&dev->power.lock);
|
||||
else
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
|
||||
if (retval)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
dev->power.suspend_time = qos.time_now;
|
||||
dev->power.max_time_suspended_ns = qos.constraint_ns ? : -1;
|
||||
|
||||
if (dev->pm_domain)
|
||||
callback = dev->pm_domain->ops.runtime_suspend;
|
||||
else if (dev->type && dev->type->pm)
|
||||
|
@ -413,28 +488,13 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
|||
else
|
||||
callback = NULL;
|
||||
|
||||
retval = rpm_callback(callback, dev);
|
||||
if (retval) {
|
||||
__update_runtime_status(dev, RPM_ACTIVE);
|
||||
dev->power.deferred_resume = false;
|
||||
if (retval == -EAGAIN || retval == -EBUSY) {
|
||||
dev->power.runtime_error = 0;
|
||||
if (!callback && dev->driver && dev->driver->pm)
|
||||
callback = dev->driver->pm->runtime_suspend;
|
||||
|
||||
retval = rpm_callback(callback, dev);
|
||||
if (retval)
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* If the callback routine failed an autosuspend, and
|
||||
* if the last_busy time has been updated so that there
|
||||
* is a new autosuspend expiration time, automatically
|
||||
* reschedule another autosuspend.
|
||||
*/
|
||||
if ((rpmflags & RPM_AUTO) &&
|
||||
pm_runtime_autosuspend_expiration(dev) != 0)
|
||||
goto repeat;
|
||||
} else {
|
||||
pm_runtime_cancel_pending(dev);
|
||||
}
|
||||
wake_up_all(&dev->power.wait_queue);
|
||||
goto out;
|
||||
}
|
||||
no_callback:
|
||||
__update_runtime_status(dev, RPM_SUSPENDED);
|
||||
pm_runtime_deactivate_timer(dev);
|
||||
|
@ -466,6 +526,29 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
|||
trace_rpm_return_int(dev, _THIS_IP_, retval);
|
||||
|
||||
return retval;
|
||||
|
||||
fail:
|
||||
__update_runtime_status(dev, RPM_ACTIVE);
|
||||
dev->power.suspend_time = ktime_set(0, 0);
|
||||
dev->power.max_time_suspended_ns = -1;
|
||||
dev->power.deferred_resume = false;
|
||||
if (retval == -EAGAIN || retval == -EBUSY) {
|
||||
dev->power.runtime_error = 0;
|
||||
|
||||
/*
|
||||
* If the callback routine failed an autosuspend, and
|
||||
* if the last_busy time has been updated so that there
|
||||
* is a new autosuspend expiration time, automatically
|
||||
* reschedule another autosuspend.
|
||||
*/
|
||||
if ((rpmflags & RPM_AUTO) &&
|
||||
pm_runtime_autosuspend_expiration(dev) != 0)
|
||||
goto repeat;
|
||||
} else {
|
||||
pm_runtime_cancel_pending(dev);
|
||||
}
|
||||
wake_up_all(&dev->power.wait_queue);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -620,6 +703,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
|
|||
if (dev->power.no_callbacks)
|
||||
goto no_callback; /* Assume success. */
|
||||
|
||||
dev->power.suspend_time = ktime_set(0, 0);
|
||||
dev->power.max_time_suspended_ns = -1;
|
||||
|
||||
__update_runtime_status(dev, RPM_RESUMING);
|
||||
|
||||
if (dev->pm_domain)
|
||||
|
@ -633,6 +719,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
|
|||
else
|
||||
callback = NULL;
|
||||
|
||||
if (!callback && dev->driver && dev->driver->pm)
|
||||
callback = dev->driver->pm->runtime_resume;
|
||||
|
||||
retval = rpm_callback(callback, dev);
|
||||
if (retval) {
|
||||
__update_runtime_status(dev, RPM_SUSPENDED);
|
||||
|
@ -1279,6 +1368,9 @@ void pm_runtime_init(struct device *dev)
|
|||
setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
|
||||
(unsigned long)dev);
|
||||
|
||||
dev->power.suspend_time = ktime_set(0, 0);
|
||||
dev->power.max_time_suspended_ns = -1;
|
||||
|
||||
init_waitqueue_head(&dev->power.wait_queue);
|
||||
}
|
||||
|
||||
|
@ -1296,3 +1388,28 @@ void pm_runtime_remove(struct device *dev)
|
|||
if (dev->power.irq_safe && dev->parent)
|
||||
pm_runtime_put_sync(dev->parent);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_runtime_update_max_time_suspended - Update device's suspend time data.
|
||||
* @dev: Device to handle.
|
||||
* @delta_ns: Value to subtract from the device's max_time_suspended_ns field.
|
||||
*
|
||||
* Update the device's power.max_time_suspended_ns field by subtracting
|
||||
* @delta_ns from it. The resulting value of power.max_time_suspended_ns is
|
||||
* never negative.
|
||||
*/
|
||||
void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->power.lock, flags);
|
||||
|
||||
if (delta_ns > 0 && dev->power.max_time_suspended_ns > 0) {
|
||||
if (dev->power.max_time_suspended_ns > delta_ns)
|
||||
dev->power.max_time_suspended_ns -= delta_ns;
|
||||
else
|
||||
dev->power.max_time_suspended_ns = 0;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||
}
|
||||
|
|
|
@ -475,8 +475,6 @@ static int btmrvl_service_main_thread(void *data)
|
|||
|
||||
init_waitqueue_entry(&wait, current);
|
||||
|
||||
current->flags |= PF_NOFREEZE;
|
||||
|
||||
for (;;) {
|
||||
add_wait_queue(&thread->wait_q, &wait);
|
||||
|
||||
|
|
|
@ -65,4 +65,17 @@ config DEVFREQ_GOV_USERSPACE
|
|||
|
||||
comment "DEVFREQ Drivers"
|
||||
|
||||
config ARM_EXYNOS4_BUS_DEVFREQ
|
||||
bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
|
||||
depends on CPU_EXYNOS4210 || CPU_EXYNOS4212 || CPU_EXYNOS4412
|
||||
select ARCH_HAS_OPP
|
||||
select DEVFREQ_GOV_SIMPLE_ONDEMAND
|
||||
help
|
||||
This adds the DEVFREQ driver for Exynos4210 memory bus (vdd_int)
|
||||
and Exynos4212/4412 memory interface and bus (vdd_mif + vdd_int).
|
||||
It reads PPMU counters of memory controllers and adjusts
|
||||
the operating frequencies and voltages with OPP support.
|
||||
To operate with optimal voltages, ASV support is required
|
||||
(CONFIG_EXYNOS_ASV).
|
||||
|
||||
endif # PM_DEVFREQ
|
||||
|
|
|
@ -3,3 +3,6 @@ obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
|
|||
obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
|
||||
obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
|
||||
obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
|
||||
|
||||
# DEVFREQ Drivers
|
||||
obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos4_bus.o
|
||||
|
|
|
@ -347,7 +347,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
|
|||
if (!IS_ERR(devfreq)) {
|
||||
dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -356,7 +356,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
|
|||
dev_err(dev, "%s: Unable to create devfreq for the device\n",
|
||||
__func__);
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
mutex_init(&devfreq->lock);
|
||||
|
@ -399,17 +399,16 @@ struct devfreq *devfreq_add_device(struct device *dev,
|
|||
devfreq->next_polling);
|
||||
}
|
||||
mutex_unlock(&devfreq_list_lock);
|
||||
goto out;
|
||||
out:
|
||||
return devfreq;
|
||||
|
||||
err_init:
|
||||
device_unregister(&devfreq->dev);
|
||||
err_dev:
|
||||
mutex_unlock(&devfreq->lock);
|
||||
kfree(devfreq);
|
||||
out:
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
else
|
||||
return devfreq;
|
||||
err_out:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
1135
drivers/devfreq/exynos4_bus.c
Normal file
1135
drivers/devfreq/exynos4_bus.c
Normal file
File diff suppressed because it is too large
Load diff
|
@ -214,9 +214,18 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
|
|||
return error_count;
|
||||
}
|
||||
|
||||
static void dmatest_callback(void *completion)
|
||||
/* poor man's completion - we want to use wait_event_freezable() on it */
|
||||
struct dmatest_done {
|
||||
bool done;
|
||||
wait_queue_head_t *wait;
|
||||
};
|
||||
|
||||
static void dmatest_callback(void *arg)
|
||||
{
|
||||
complete(completion);
|
||||
struct dmatest_done *done = arg;
|
||||
|
||||
done->done = true;
|
||||
wake_up_all(done->wait);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -235,7 +244,9 @@ static void dmatest_callback(void *completion)
|
|||
*/
|
||||
static int dmatest_func(void *data)
|
||||
{
|
||||
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
|
||||
struct dmatest_thread *thread = data;
|
||||
struct dmatest_done done = { .wait = &done_wait };
|
||||
struct dma_chan *chan;
|
||||
const char *thread_name;
|
||||
unsigned int src_off, dst_off, len;
|
||||
|
@ -252,7 +263,7 @@ static int dmatest_func(void *data)
|
|||
int i;
|
||||
|
||||
thread_name = current->comm;
|
||||
set_freezable_with_signal();
|
||||
set_freezable();
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
||||
|
@ -306,9 +317,6 @@ static int dmatest_func(void *data)
|
|||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
dma_addr_t dma_srcs[src_cnt];
|
||||
dma_addr_t dma_dsts[dst_cnt];
|
||||
struct completion cmp;
|
||||
unsigned long start, tmo, end = 0 /* compiler... */;
|
||||
bool reload = true;
|
||||
u8 align = 0;
|
||||
|
||||
total_tests++;
|
||||
|
@ -391,9 +399,9 @@ static int dmatest_func(void *data)
|
|||
continue;
|
||||
}
|
||||
|
||||
init_completion(&cmp);
|
||||
done.done = false;
|
||||
tx->callback = dmatest_callback;
|
||||
tx->callback_param = &cmp;
|
||||
tx->callback_param = &done;
|
||||
cookie = tx->tx_submit(tx);
|
||||
|
||||
if (dma_submit_error(cookie)) {
|
||||
|
@ -407,20 +415,20 @@ static int dmatest_func(void *data)
|
|||
}
|
||||
dma_async_issue_pending(chan);
|
||||
|
||||
do {
|
||||
start = jiffies;
|
||||
if (reload)
|
||||
end = start + msecs_to_jiffies(timeout);
|
||||
else if (end <= start)
|
||||
end = start + 1;
|
||||
tmo = wait_for_completion_interruptible_timeout(&cmp,
|
||||
end - start);
|
||||
reload = try_to_freeze();
|
||||
} while (tmo == -ERESTARTSYS);
|
||||
wait_event_freezable_timeout(done_wait, done.done,
|
||||
msecs_to_jiffies(timeout));
|
||||
|
||||
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
|
||||
|
||||
if (tmo == 0) {
|
||||
if (!done.done) {
|
||||
/*
|
||||
* We're leaving the timed out dma operation with
|
||||
* dangling pointer to done_wait. To make this
|
||||
* correct, we'll need to allocate wait_done for
|
||||
* each test iteration and perform "who's gonna
|
||||
* free it this time?" dancing. For now, just
|
||||
* leave it dangling.
|
||||
*/
|
||||
pr_warning("%s: #%u: test timed out\n",
|
||||
thread_name, total_tests - 1);
|
||||
failed_tests++;
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/input.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
|
@ -46,6 +47,7 @@ struct st1232_ts_data {
|
|||
struct i2c_client *client;
|
||||
struct input_dev *input_dev;
|
||||
struct st1232_ts_finger finger[MAX_FINGERS];
|
||||
struct dev_pm_qos_request low_latency_req;
|
||||
};
|
||||
|
||||
static int st1232_ts_read_data(struct st1232_ts_data *ts)
|
||||
|
@ -118,8 +120,17 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
|
|||
}
|
||||
|
||||
/* SYN_MT_REPORT only if no contact */
|
||||
if (!count)
|
||||
if (!count) {
|
||||
input_mt_sync(input_dev);
|
||||
if (ts->low_latency_req.dev) {
|
||||
dev_pm_qos_remove_request(&ts->low_latency_req);
|
||||
ts->low_latency_req.dev = NULL;
|
||||
}
|
||||
} else if (!ts->low_latency_req.dev) {
|
||||
/* First contact, request 100 us latency. */
|
||||
dev_pm_qos_add_ancestor_request(&ts->client->dev,
|
||||
&ts->low_latency_req, 100);
|
||||
}
|
||||
|
||||
/* SYN_REPORT */
|
||||
input_sync(input_dev);
|
||||
|
|
|
@ -138,8 +138,6 @@ static int twl6030_irq_thread(void *data)
|
|||
static const unsigned max_i2c_errors = 100;
|
||||
int ret;
|
||||
|
||||
current->flags |= PF_NOFREEZE;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
int i;
|
||||
union {
|
||||
|
|
|
@ -750,7 +750,7 @@ static int stir_transmit_thread(void *arg)
|
|||
|
||||
write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
|
||||
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
|
||||
if (change_speed(stir, stir->speed))
|
||||
break;
|
||||
|
|
|
@ -2456,8 +2456,9 @@ static int hotkey_kthread(void *data)
|
|||
u32 poll_mask, event_mask;
|
||||
unsigned int si, so;
|
||||
unsigned long t;
|
||||
unsigned int change_detector, must_reset;
|
||||
unsigned int change_detector;
|
||||
unsigned int poll_freq;
|
||||
bool was_frozen;
|
||||
|
||||
mutex_lock(&hotkey_thread_mutex);
|
||||
|
||||
|
@ -2488,14 +2489,14 @@ static int hotkey_kthread(void *data)
|
|||
t = 100; /* should never happen... */
|
||||
}
|
||||
t = msleep_interruptible(t);
|
||||
if (unlikely(kthread_should_stop()))
|
||||
if (unlikely(kthread_freezable_should_stop(&was_frozen)))
|
||||
break;
|
||||
must_reset = try_to_freeze();
|
||||
if (t > 0 && !must_reset)
|
||||
|
||||
if (t > 0 && !was_frozen)
|
||||
continue;
|
||||
|
||||
mutex_lock(&hotkey_thread_data_mutex);
|
||||
if (must_reset || hotkey_config_change != change_detector) {
|
||||
if (was_frozen || hotkey_config_change != change_detector) {
|
||||
/* forget old state on thaw or config change */
|
||||
si = so;
|
||||
t = 0;
|
||||
|
@ -2528,10 +2529,6 @@ static int hotkey_kthread(void *data)
|
|||
static void hotkey_poll_stop_sync(void)
|
||||
{
|
||||
if (tpacpi_hotkey_task) {
|
||||
if (frozen(tpacpi_hotkey_task) ||
|
||||
freezing(tpacpi_hotkey_task))
|
||||
thaw_process(tpacpi_hotkey_task);
|
||||
|
||||
kthread_stop(tpacpi_hotkey_task);
|
||||
tpacpi_hotkey_task = NULL;
|
||||
mutex_lock(&hotkey_thread_mutex);
|
||||
|
|
|
@ -354,6 +354,8 @@ int __init register_intc_controller(struct intc_desc *desc)
|
|||
if (desc->force_enable)
|
||||
intc_enable_disable_enum(desc, d, desc->force_enable, 1);
|
||||
|
||||
d->skip_suspend = desc->skip_syscore_suspend;
|
||||
|
||||
nr_intc_controllers++;
|
||||
|
||||
return 0;
|
||||
|
@ -386,6 +388,9 @@ static int intc_suspend(void)
|
|||
list_for_each_entry(d, &intc_list, list) {
|
||||
int irq;
|
||||
|
||||
if (d->skip_suspend)
|
||||
continue;
|
||||
|
||||
/* enable wakeup irqs belonging to this intc controller */
|
||||
for_each_active_irq(irq) {
|
||||
struct irq_data *data;
|
||||
|
@ -409,6 +414,9 @@ static void intc_resume(void)
|
|||
list_for_each_entry(d, &intc_list, list) {
|
||||
int irq;
|
||||
|
||||
if (d->skip_suspend)
|
||||
continue;
|
||||
|
||||
for_each_active_irq(irq) {
|
||||
struct irq_data *data;
|
||||
struct irq_chip *chip;
|
||||
|
|
|
@ -67,6 +67,7 @@ struct intc_desc_int {
|
|||
struct intc_window *window;
|
||||
unsigned int nr_windows;
|
||||
struct irq_chip chip;
|
||||
bool skip_suspend;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -466,8 +466,6 @@ static int rtsx_control_thread(void *__dev)
|
|||
struct rtsx_chip *chip = dev->chip;
|
||||
struct Scsi_Host *host = rtsx_to_host(dev);
|
||||
|
||||
current->flags |= PF_NOFREEZE;
|
||||
|
||||
for (;;) {
|
||||
if (wait_for_completion_interruptible(&dev->cmnd_ready))
|
||||
break;
|
||||
|
|
|
@ -831,7 +831,8 @@ static int usb_stor_scan_thread(void * __us)
|
|||
|
||||
dev_dbg(dev, "device found\n");
|
||||
|
||||
set_freezable_with_signal();
|
||||
set_freezable();
|
||||
|
||||
/*
|
||||
* Wait for the timeout to expire or for a disconnect
|
||||
*
|
||||
|
@ -839,16 +840,16 @@ static int usb_stor_scan_thread(void * __us)
|
|||
* fail to freeze, but we can't be non-freezable either. Nor can
|
||||
* khubd freeze while waiting for scanning to complete as it may
|
||||
* hold the device lock, causing a hang when suspending devices.
|
||||
* So we request a fake signal when freezing and use
|
||||
* interruptible sleep to kick us out of our wait early when
|
||||
* freezing happens.
|
||||
* So instead of using wait_event_freezable(), explicitly test
|
||||
* for (DONT_SCAN || freezing) in interruptible wait and proceed
|
||||
* if any of DONT_SCAN, freezing or timeout has happened.
|
||||
*/
|
||||
if (delay_use > 0) {
|
||||
dev_dbg(dev, "waiting for device to settle "
|
||||
"before scanning\n");
|
||||
wait_event_interruptible_timeout(us->delay_wait,
|
||||
test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
|
||||
delay_use * HZ);
|
||||
test_bit(US_FLIDX_DONT_SCAN, &us->dflags) ||
|
||||
freezing(current), delay_use * HZ);
|
||||
}
|
||||
|
||||
/* If the device is still connected, perform the scanning */
|
||||
|
|
|
@ -334,7 +334,7 @@ static int worker_loop(void *arg)
|
|||
if (freezing(current)) {
|
||||
worker->working = 0;
|
||||
spin_unlock_irq(&worker->lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
spin_unlock_irq(&worker->lock);
|
||||
if (!kthread_should_stop()) {
|
||||
|
|
|
@ -1579,9 +1579,7 @@ static int cleaner_kthread(void *arg)
|
|||
btrfs_run_defrag_inodes(root->fs_info);
|
||||
}
|
||||
|
||||
if (freezing(current)) {
|
||||
refrigerator();
|
||||
} else {
|
||||
if (!try_to_freeze()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!kthread_should_stop())
|
||||
schedule();
|
||||
|
@ -1635,9 +1633,7 @@ static int transaction_kthread(void *arg)
|
|||
wake_up_process(root->fs_info->cleaner_kthread);
|
||||
mutex_unlock(&root->fs_info->transaction_kthread_mutex);
|
||||
|
||||
if (freezing(current)) {
|
||||
refrigerator();
|
||||
} else {
|
||||
if (!try_to_freeze()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!kthread_should_stop() &&
|
||||
!btrfs_transaction_blocked(root->fs_info))
|
||||
|
|
|
@ -2882,8 +2882,7 @@ static int ext4_lazyinit_thread(void *arg)
|
|||
}
|
||||
mutex_unlock(&eli->li_list_mtx);
|
||||
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
|
||||
cur = jiffies;
|
||||
if ((time_after_eq(cur, next_wakeup)) ||
|
||||
|
|
|
@ -936,7 +936,7 @@ int bdi_writeback_thread(void *data)
|
|||
|
||||
trace_writeback_thread_start(bdi);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
while (!kthread_freezable_should_stop(NULL)) {
|
||||
/*
|
||||
* Remove own delayed wake-up timer, since we are already awake
|
||||
* and we'll take care of the preriodic write-back.
|
||||
|
@ -966,8 +966,6 @@ int bdi_writeback_thread(void *data)
|
|||
*/
|
||||
schedule();
|
||||
}
|
||||
|
||||
try_to_freeze();
|
||||
}
|
||||
|
||||
/* Flush any work that raced with us exiting */
|
||||
|
|
|
@ -951,8 +951,8 @@ int gfs2_logd(void *data)
|
|||
wake_up(&sdp->sd_log_waitq);
|
||||
|
||||
t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
|
||||
try_to_freeze();
|
||||
|
||||
do {
|
||||
prepare_to_wait(&sdp->sd_logd_waitq, &wait,
|
||||
|
|
|
@ -1417,8 +1417,8 @@ int gfs2_quotad(void *data)
|
|||
/* Check for & recover partially truncated inodes */
|
||||
quotad_check_trunc_list(sdp);
|
||||
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
|
||||
t = min(quotad_timeo, statfs_timeo);
|
||||
|
||||
prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
|
||||
|
|
|
@ -166,7 +166,7 @@ static int kjournald(void *arg)
|
|||
*/
|
||||
jbd_debug(1, "Now suspending kjournald\n");
|
||||
spin_unlock(&journal->j_state_lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
spin_lock(&journal->j_state_lock);
|
||||
} else {
|
||||
/*
|
||||
|
|
|
@ -173,7 +173,7 @@ static int kjournald2(void *arg)
|
|||
*/
|
||||
jbd_debug(1, "Now suspending kjournald2\n");
|
||||
write_unlock(&journal->j_state_lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
write_lock(&journal->j_state_lock);
|
||||
} else {
|
||||
/*
|
||||
|
|
|
@ -2349,7 +2349,7 @@ int jfsIOWait(void *arg)
|
|||
|
||||
if (freezing(current)) {
|
||||
spin_unlock_irq(&log_redrive_lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
spin_unlock_irq(&log_redrive_lock);
|
||||
|
|
|
@ -2800,7 +2800,7 @@ int jfs_lazycommit(void *arg)
|
|||
|
||||
if (freezing(current)) {
|
||||
LAZY_UNLOCK(flags);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
DECLARE_WAITQUEUE(wq, current);
|
||||
|
||||
|
@ -2994,7 +2994,7 @@ int jfs_sync(void *arg)
|
|||
|
||||
if (freezing(current)) {
|
||||
TXN_UNLOCK();
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
TXN_UNLOCK();
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include <linux/nfs_xdr.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
@ -77,7 +78,7 @@ int nfs_wait_bit_killable(void *word)
|
|||
{
|
||||
if (fatal_signal_pending(current))
|
||||
return -ERESTARTSYS;
|
||||
schedule();
|
||||
freezable_schedule();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/nfs_page.h>
|
||||
#include <linux/lockd/bind.h>
|
||||
#include <linux/nfs_mount.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
#include "iostat.h"
|
||||
#include "internal.h"
|
||||
|
@ -32,7 +33,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
|
|||
res = rpc_call_sync(clnt, msg, flags);
|
||||
if (res != -EJUKEBOX && res != -EKEYEXPIRED)
|
||||
break;
|
||||
schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
|
||||
freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
|
||||
res = -ERESTARTSYS;
|
||||
} while (!fatal_signal_pending(current));
|
||||
return res;
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
#include <linux/sunrpc/bc_xprt.h>
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
#include "nfs4_fs.h"
|
||||
#include "delegation.h"
|
||||
|
@ -243,7 +244,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
|
|||
*timeout = NFS4_POLL_RETRY_MIN;
|
||||
if (*timeout > NFS4_POLL_RETRY_MAX)
|
||||
*timeout = NFS4_POLL_RETRY_MAX;
|
||||
schedule_timeout_killable(*timeout);
|
||||
freezable_schedule_timeout_killable(*timeout);
|
||||
if (fatal_signal_pending(current))
|
||||
res = -ERESTARTSYS;
|
||||
*timeout <<= 1;
|
||||
|
@ -3958,7 +3959,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
|
|||
static unsigned long
|
||||
nfs4_set_lock_task_retry(unsigned long timeout)
|
||||
{
|
||||
schedule_timeout_killable(timeout);
|
||||
freezable_schedule_timeout_killable(timeout);
|
||||
timeout <<= 1;
|
||||
if (timeout > NFS4_LOCK_MAXTIMEOUT)
|
||||
return NFS4_LOCK_MAXTIMEOUT;
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <linux/nfs_fs.h>
|
||||
#include <linux/nfs_page.h>
|
||||
#include <linux/lockd/bind.h>
|
||||
#include <linux/freezer.h>
|
||||
#include "internal.h"
|
||||
|
||||
#define NFSDBG_FACILITY NFSDBG_PROC
|
||||
|
@ -59,7 +60,7 @@ nfs_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
|
|||
res = rpc_call_sync(clnt, msg, flags);
|
||||
if (res != -EKEYEXPIRED)
|
||||
break;
|
||||
schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
|
||||
freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
|
||||
res = -ERESTARTSYS;
|
||||
} while (!fatal_signal_pending(current));
|
||||
return res;
|
||||
|
|
|
@ -2470,7 +2470,7 @@ static int nilfs_segctor_thread(void *arg)
|
|||
|
||||
if (freezing(current)) {
|
||||
spin_unlock(&sci->sc_state_lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
spin_lock(&sci->sc_state_lock);
|
||||
} else {
|
||||
DEFINE_WAIT(wait);
|
||||
|
|
|
@ -1702,7 +1702,7 @@ xfsbufd(
|
|||
struct blk_plug plug;
|
||||
|
||||
if (unlikely(freezing(current)))
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
|
||||
/* sleep for a long time if there is nothing to do. */
|
||||
if (list_empty(&target->bt_delwri_queue))
|
||||
|
|
|
@ -5,71 +5,58 @@
|
|||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
#ifdef CONFIG_FREEZER
|
||||
extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
|
||||
extern bool pm_freezing; /* PM freezing in effect */
|
||||
extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
|
||||
|
||||
/*
|
||||
* Check if a process has been frozen
|
||||
*/
|
||||
static inline int frozen(struct task_struct *p)
|
||||
static inline bool frozen(struct task_struct *p)
|
||||
{
|
||||
return p->flags & PF_FROZEN;
|
||||
}
|
||||
|
||||
extern bool freezing_slow_path(struct task_struct *p);
|
||||
|
||||
/*
|
||||
* Check if there is a request to freeze a process
|
||||
*/
|
||||
static inline int freezing(struct task_struct *p)
|
||||
static inline bool freezing(struct task_struct *p)
|
||||
{
|
||||
return test_tsk_thread_flag(p, TIF_FREEZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Request that a process be frozen
|
||||
*/
|
||||
static inline void set_freeze_flag(struct task_struct *p)
|
||||
{
|
||||
set_tsk_thread_flag(p, TIF_FREEZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sometimes we may need to cancel the previous 'freeze' request
|
||||
*/
|
||||
static inline void clear_freeze_flag(struct task_struct *p)
|
||||
{
|
||||
clear_tsk_thread_flag(p, TIF_FREEZE);
|
||||
}
|
||||
|
||||
static inline bool should_send_signal(struct task_struct *p)
|
||||
{
|
||||
return !(p->flags & PF_FREEZER_NOSIG);
|
||||
if (likely(!atomic_read(&system_freezing_cnt)))
|
||||
return false;
|
||||
return freezing_slow_path(p);
|
||||
}
|
||||
|
||||
/* Takes and releases task alloc lock using task_lock() */
|
||||
extern int thaw_process(struct task_struct *p);
|
||||
extern void __thaw_task(struct task_struct *t);
|
||||
|
||||
extern void refrigerator(void);
|
||||
extern bool __refrigerator(bool check_kthr_stop);
|
||||
extern int freeze_processes(void);
|
||||
extern int freeze_kernel_threads(void);
|
||||
extern void thaw_processes(void);
|
||||
|
||||
static inline int try_to_freeze(void)
|
||||
static inline bool try_to_freeze(void)
|
||||
{
|
||||
if (freezing(current)) {
|
||||
refrigerator();
|
||||
return 1;
|
||||
} else
|
||||
return 0;
|
||||
might_sleep();
|
||||
if (likely(!freezing(current)))
|
||||
return false;
|
||||
return __refrigerator(false);
|
||||
}
|
||||
|
||||
extern bool freeze_task(struct task_struct *p, bool sig_only);
|
||||
extern void cancel_freezing(struct task_struct *p);
|
||||
extern bool freeze_task(struct task_struct *p);
|
||||
extern bool set_freezable(void);
|
||||
|
||||
#ifdef CONFIG_CGROUP_FREEZER
|
||||
extern int cgroup_freezing_or_frozen(struct task_struct *task);
|
||||
extern bool cgroup_freezing(struct task_struct *task);
|
||||
#else /* !CONFIG_CGROUP_FREEZER */
|
||||
static inline int cgroup_freezing_or_frozen(struct task_struct *task)
|
||||
static inline bool cgroup_freezing(struct task_struct *task)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
#endif /* !CONFIG_CGROUP_FREEZER */
|
||||
|
||||
|
@ -80,33 +67,27 @@ static inline int cgroup_freezing_or_frozen(struct task_struct *task)
|
|||
* appropriately in case the child has exited before the freezing of tasks is
|
||||
* complete. However, we don't want kernel threads to be frozen in unexpected
|
||||
* places, so we allow them to block freeze_processes() instead or to set
|
||||
* PF_NOFREEZE if needed and PF_FREEZER_SKIP is only set for userland vfork
|
||||
* parents. Fortunately, in the ____call_usermodehelper() case the parent won't
|
||||
* really block freeze_processes(), since ____call_usermodehelper() (the child)
|
||||
* does a little before exec/exit and it can't be frozen before waking up the
|
||||
* parent.
|
||||
* PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
|
||||
* parent won't really block freeze_processes(), since ____call_usermodehelper()
|
||||
* (the child) does a little before exec/exit and it can't be frozen before
|
||||
* waking up the parent.
|
||||
*/
|
||||
|
||||
/*
|
||||
* If the current task is a user space one, tell the freezer not to count it as
|
||||
* freezable.
|
||||
*/
|
||||
|
||||
/* Tell the freezer not to count the current task as freezable. */
|
||||
static inline void freezer_do_not_count(void)
|
||||
{
|
||||
if (current->mm)
|
||||
current->flags |= PF_FREEZER_SKIP;
|
||||
current->flags |= PF_FREEZER_SKIP;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the current task is a user space one, tell the freezer to count it as
|
||||
* freezable again and try to freeze it.
|
||||
* Tell the freezer to count the current task as freezable again and try to
|
||||
* freeze it.
|
||||
*/
|
||||
static inline void freezer_count(void)
|
||||
{
|
||||
if (current->mm) {
|
||||
current->flags &= ~PF_FREEZER_SKIP;
|
||||
try_to_freeze();
|
||||
}
|
||||
current->flags &= ~PF_FREEZER_SKIP;
|
||||
try_to_freeze();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -118,21 +99,29 @@ static inline int freezer_should_skip(struct task_struct *p)
|
|||
}
|
||||
|
||||
/*
|
||||
* Tell the freezer that the current task should be frozen by it
|
||||
* These macros are intended to be used whenever you want allow a task that's
|
||||
* sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note
|
||||
* that neither return any clear indication of whether a freeze event happened
|
||||
* while in this function.
|
||||
*/
|
||||
static inline void set_freezable(void)
|
||||
{
|
||||
current->flags &= ~PF_NOFREEZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell the freezer that the current task should be frozen by it and that it
|
||||
* should send a fake signal to the task to freeze it.
|
||||
*/
|
||||
static inline void set_freezable_with_signal(void)
|
||||
{
|
||||
current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG);
|
||||
}
|
||||
/* Like schedule(), but should not block the freezer. */
|
||||
#define freezable_schedule() \
|
||||
({ \
|
||||
freezer_do_not_count(); \
|
||||
schedule(); \
|
||||
freezer_count(); \
|
||||
})
|
||||
|
||||
/* Like schedule_timeout_killable(), but should not block the freezer. */
|
||||
#define freezable_schedule_timeout_killable(timeout) \
|
||||
({ \
|
||||
long __retval; \
|
||||
freezer_do_not_count(); \
|
||||
__retval = schedule_timeout_killable(timeout); \
|
||||
freezer_count(); \
|
||||
__retval; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Freezer-friendly wrappers around wait_event_interruptible(),
|
||||
|
@ -152,47 +141,51 @@ static inline void set_freezable_with_signal(void)
|
|||
#define wait_event_freezable(wq, condition) \
|
||||
({ \
|
||||
int __retval; \
|
||||
do { \
|
||||
for (;;) { \
|
||||
__retval = wait_event_interruptible(wq, \
|
||||
(condition) || freezing(current)); \
|
||||
if (__retval && !freezing(current)) \
|
||||
if (__retval || (condition)) \
|
||||
break; \
|
||||
else if (!(condition)) \
|
||||
__retval = -ERESTARTSYS; \
|
||||
} while (try_to_freeze()); \
|
||||
try_to_freeze(); \
|
||||
} \
|
||||
__retval; \
|
||||
})
|
||||
|
||||
|
||||
#define wait_event_freezable_timeout(wq, condition, timeout) \
|
||||
({ \
|
||||
long __retval = timeout; \
|
||||
do { \
|
||||
for (;;) { \
|
||||
__retval = wait_event_interruptible_timeout(wq, \
|
||||
(condition) || freezing(current), \
|
||||
__retval); \
|
||||
} while (try_to_freeze()); \
|
||||
if (__retval <= 0 || (condition)) \
|
||||
break; \
|
||||
try_to_freeze(); \
|
||||
} \
|
||||
__retval; \
|
||||
})
|
||||
#else /* !CONFIG_FREEZER */
|
||||
static inline int frozen(struct task_struct *p) { return 0; }
|
||||
static inline int freezing(struct task_struct *p) { return 0; }
|
||||
static inline void set_freeze_flag(struct task_struct *p) {}
|
||||
static inline void clear_freeze_flag(struct task_struct *p) {}
|
||||
static inline int thaw_process(struct task_struct *p) { return 1; }
|
||||
|
||||
static inline void refrigerator(void) {}
|
||||
#else /* !CONFIG_FREEZER */
|
||||
static inline bool frozen(struct task_struct *p) { return false; }
|
||||
static inline bool freezing(struct task_struct *p) { return false; }
|
||||
static inline void __thaw_task(struct task_struct *t) {}
|
||||
|
||||
static inline bool __refrigerator(bool check_kthr_stop) { return false; }
|
||||
static inline int freeze_processes(void) { return -ENOSYS; }
|
||||
static inline int freeze_kernel_threads(void) { return -ENOSYS; }
|
||||
static inline void thaw_processes(void) {}
|
||||
|
||||
static inline int try_to_freeze(void) { return 0; }
|
||||
static inline bool try_to_freeze(void) { return false; }
|
||||
|
||||
static inline void freezer_do_not_count(void) {}
|
||||
static inline void freezer_count(void) {}
|
||||
static inline int freezer_should_skip(struct task_struct *p) { return 0; }
|
||||
static inline void set_freezable(void) {}
|
||||
static inline void set_freezable_with_signal(void) {}
|
||||
|
||||
#define freezable_schedule() schedule()
|
||||
|
||||
#define freezable_schedule_timeout_killable(timeout) \
|
||||
schedule_timeout_killable(timeout)
|
||||
|
||||
#define wait_event_freezable(wq, condition) \
|
||||
wait_event_interruptible(wq, condition)
|
||||
|
|
|
@ -117,5 +117,7 @@ extern void usermodehelper_init(void);
|
|||
extern int usermodehelper_disable(void);
|
||||
extern void usermodehelper_enable(void);
|
||||
extern bool usermodehelper_is_disabled(void);
|
||||
extern void read_lock_usermodehelper(void);
|
||||
extern void read_unlock_usermodehelper(void);
|
||||
|
||||
#endif /* __LINUX_KMOD_H__ */
|
||||
|
|
|
@ -35,6 +35,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
|
|||
void kthread_bind(struct task_struct *k, unsigned int cpu);
|
||||
int kthread_stop(struct task_struct *k);
|
||||
int kthread_should_stop(void);
|
||||
bool kthread_freezable_should_stop(bool *was_frozen);
|
||||
void *kthread_data(struct task_struct *k);
|
||||
|
||||
int kthreadd(void *unused);
|
||||
|
|
|
@ -256,62 +256,34 @@ static inline char *early_platform_driver_setup_func(void) \
|
|||
}
|
||||
#endif /* MODULE */
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
extern int platform_pm_prepare(struct device *dev);
|
||||
extern void platform_pm_complete(struct device *dev);
|
||||
#else
|
||||
#define platform_pm_prepare NULL
|
||||
#define platform_pm_complete NULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
extern int platform_pm_suspend(struct device *dev);
|
||||
extern int platform_pm_suspend_noirq(struct device *dev);
|
||||
extern int platform_pm_resume(struct device *dev);
|
||||
extern int platform_pm_resume_noirq(struct device *dev);
|
||||
#else
|
||||
#define platform_pm_suspend NULL
|
||||
#define platform_pm_resume NULL
|
||||
#define platform_pm_suspend_noirq NULL
|
||||
#define platform_pm_resume_noirq NULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
||||
extern int platform_pm_freeze(struct device *dev);
|
||||
extern int platform_pm_freeze_noirq(struct device *dev);
|
||||
extern int platform_pm_thaw(struct device *dev);
|
||||
extern int platform_pm_thaw_noirq(struct device *dev);
|
||||
extern int platform_pm_poweroff(struct device *dev);
|
||||
extern int platform_pm_poweroff_noirq(struct device *dev);
|
||||
extern int platform_pm_restore(struct device *dev);
|
||||
extern int platform_pm_restore_noirq(struct device *dev);
|
||||
#else
|
||||
#define platform_pm_freeze NULL
|
||||
#define platform_pm_thaw NULL
|
||||
#define platform_pm_poweroff NULL
|
||||
#define platform_pm_restore NULL
|
||||
#define platform_pm_freeze_noirq NULL
|
||||
#define platform_pm_thaw_noirq NULL
|
||||
#define platform_pm_poweroff_noirq NULL
|
||||
#define platform_pm_restore_noirq NULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#define USE_PLATFORM_PM_SLEEP_OPS \
|
||||
.prepare = platform_pm_prepare, \
|
||||
.complete = platform_pm_complete, \
|
||||
.suspend = platform_pm_suspend, \
|
||||
.resume = platform_pm_resume, \
|
||||
.freeze = platform_pm_freeze, \
|
||||
.thaw = platform_pm_thaw, \
|
||||
.poweroff = platform_pm_poweroff, \
|
||||
.restore = platform_pm_restore, \
|
||||
.suspend_noirq = platform_pm_suspend_noirq, \
|
||||
.resume_noirq = platform_pm_resume_noirq, \
|
||||
.freeze_noirq = platform_pm_freeze_noirq, \
|
||||
.thaw_noirq = platform_pm_thaw_noirq, \
|
||||
.poweroff_noirq = platform_pm_poweroff_noirq, \
|
||||
.restore_noirq = platform_pm_restore_noirq,
|
||||
.restore = platform_pm_restore,
|
||||
#else
|
||||
#define USE_PLATFORM_PM_SLEEP_OPS
|
||||
#endif
|
||||
|
|
|
@ -300,19 +300,6 @@ const struct dev_pm_ops name = { \
|
|||
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
|
||||
}
|
||||
|
||||
/*
|
||||
* Use this for subsystems (bus types, device types, device classes) that don't
|
||||
* need any special suspend/resume handling in addition to invoking the PM
|
||||
* callbacks provided by device drivers supporting both the system sleep PM and
|
||||
* runtime PM, make the pm member point to generic_subsys_pm_ops.
|
||||
*/
|
||||
#ifdef CONFIG_PM
|
||||
extern struct dev_pm_ops generic_subsys_pm_ops;
|
||||
#define GENERIC_SUBSYS_PM_OPS (&generic_subsys_pm_ops)
|
||||
#else
|
||||
#define GENERIC_SUBSYS_PM_OPS NULL
|
||||
#endif
|
||||
|
||||
/**
|
||||
* PM_EVENT_ messages
|
||||
*
|
||||
|
@ -521,6 +508,8 @@ struct dev_pm_info {
|
|||
unsigned long active_jiffies;
|
||||
unsigned long suspended_jiffies;
|
||||
unsigned long accounting_timestamp;
|
||||
ktime_t suspend_time;
|
||||
s64 max_time_suspended_ns;
|
||||
#endif
|
||||
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
|
||||
struct pm_qos_constraints *constraints;
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#define _LINUX_PM_DOMAIN_H
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
enum gpd_status {
|
||||
GPD_STATE_ACTIVE = 0, /* PM domain is active */
|
||||
|
@ -21,6 +22,23 @@ enum gpd_status {
|
|||
|
||||
struct dev_power_governor {
|
||||
bool (*power_down_ok)(struct dev_pm_domain *domain);
|
||||
bool (*stop_ok)(struct device *dev);
|
||||
};
|
||||
|
||||
struct gpd_dev_ops {
|
||||
int (*start)(struct device *dev);
|
||||
int (*stop)(struct device *dev);
|
||||
int (*save_state)(struct device *dev);
|
||||
int (*restore_state)(struct device *dev);
|
||||
int (*suspend)(struct device *dev);
|
||||
int (*suspend_late)(struct device *dev);
|
||||
int (*resume_early)(struct device *dev);
|
||||
int (*resume)(struct device *dev);
|
||||
int (*freeze)(struct device *dev);
|
||||
int (*freeze_late)(struct device *dev);
|
||||
int (*thaw_early)(struct device *dev);
|
||||
int (*thaw)(struct device *dev);
|
||||
bool (*active_wakeup)(struct device *dev);
|
||||
};
|
||||
|
||||
struct generic_pm_domain {
|
||||
|
@ -32,6 +50,7 @@ struct generic_pm_domain {
|
|||
struct mutex lock;
|
||||
struct dev_power_governor *gov;
|
||||
struct work_struct power_off_work;
|
||||
char *name;
|
||||
unsigned int in_progress; /* Number of devices being suspended now */
|
||||
atomic_t sd_count; /* Number of subdomains with power "on" */
|
||||
enum gpd_status status; /* Current state of the domain */
|
||||
|
@ -44,10 +63,13 @@ struct generic_pm_domain {
|
|||
bool suspend_power_off; /* Power status before system suspend */
|
||||
bool dev_irq_safe; /* Device callbacks are IRQ-safe */
|
||||
int (*power_off)(struct generic_pm_domain *domain);
|
||||
s64 power_off_latency_ns;
|
||||
int (*power_on)(struct generic_pm_domain *domain);
|
||||
int (*start_device)(struct device *dev);
|
||||
int (*stop_device)(struct device *dev);
|
||||
bool (*active_wakeup)(struct device *dev);
|
||||
s64 power_on_latency_ns;
|
||||
struct gpd_dev_ops dev_ops;
|
||||
s64 break_even_ns; /* Power break even for the entire domain. */
|
||||
s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
|
||||
ktime_t power_off_time;
|
||||
};
|
||||
|
||||
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
|
||||
|
@ -62,8 +84,18 @@ struct gpd_link {
|
|||
struct list_head slave_node;
|
||||
};
|
||||
|
||||
struct gpd_timing_data {
|
||||
s64 stop_latency_ns;
|
||||
s64 start_latency_ns;
|
||||
s64 save_state_latency_ns;
|
||||
s64 restore_state_latency_ns;
|
||||
s64 break_even_ns;
|
||||
};
|
||||
|
||||
struct generic_pm_domain_data {
|
||||
struct pm_domain_data base;
|
||||
struct gpd_dev_ops ops;
|
||||
struct gpd_timing_data td;
|
||||
bool need_restore;
|
||||
};
|
||||
|
||||
|
@ -73,18 +105,54 @@ static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PM_GENERIC_DOMAINS
|
||||
extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev);
|
||||
static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
|
||||
{
|
||||
return to_gpd_data(dev->power.subsys_data->domain_data);
|
||||
}
|
||||
|
||||
extern struct dev_power_governor simple_qos_governor;
|
||||
|
||||
extern struct generic_pm_domain *dev_to_genpd(struct device *dev);
|
||||
extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev,
|
||||
struct gpd_timing_data *td);
|
||||
|
||||
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev)
|
||||
{
|
||||
return __pm_genpd_add_device(genpd, dev, NULL);
|
||||
}
|
||||
|
||||
extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev);
|
||||
extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
||||
struct generic_pm_domain *new_subdomain);
|
||||
extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
||||
struct generic_pm_domain *target);
|
||||
extern int pm_genpd_add_callbacks(struct device *dev,
|
||||
struct gpd_dev_ops *ops,
|
||||
struct gpd_timing_data *td);
|
||||
extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
|
||||
extern void pm_genpd_init(struct generic_pm_domain *genpd,
|
||||
struct dev_power_governor *gov, bool is_off);
|
||||
|
||||
extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
|
||||
|
||||
extern bool default_stop_ok(struct device *dev);
|
||||
|
||||
extern struct dev_power_governor pm_domain_always_on_gov;
|
||||
#else
|
||||
|
||||
static inline struct generic_pm_domain *dev_to_genpd(struct device *dev)
|
||||
{
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev,
|
||||
struct gpd_timing_data *td)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev)
|
||||
{
|
||||
|
@ -105,14 +173,35 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
|||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline void pm_genpd_init(struct generic_pm_domain *genpd,
|
||||
struct dev_power_governor *gov, bool is_off) {}
|
||||
static inline int pm_genpd_add_callbacks(struct device *dev,
|
||||
struct gpd_dev_ops *ops,
|
||||
struct gpd_timing_data *td)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline void pm_genpd_init(struct generic_pm_domain *genpd, bool is_off)
|
||||
{
|
||||
}
|
||||
static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline bool default_stop_ok(struct device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#define pm_domain_always_on_gov NULL
|
||||
#endif
|
||||
|
||||
static inline int pm_genpd_remove_callbacks(struct device *dev)
|
||||
{
|
||||
return __pm_genpd_remove_callbacks(dev, true);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME
|
||||
extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
|
||||
extern void pm_genpd_poweroff_unused(void);
|
||||
|
|
|
@ -78,6 +78,7 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
|
|||
int pm_qos_request_active(struct pm_qos_request *req);
|
||||
s32 pm_qos_read_value(struct pm_qos_constraints *c);
|
||||
|
||||
s32 __dev_pm_qos_read_value(struct device *dev);
|
||||
s32 dev_pm_qos_read_value(struct device *dev);
|
||||
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
|
||||
s32 value);
|
||||
|
@ -91,6 +92,8 @@ int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
|
|||
int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
|
||||
void dev_pm_qos_constraints_init(struct device *dev);
|
||||
void dev_pm_qos_constraints_destroy(struct device *dev);
|
||||
int dev_pm_qos_add_ancestor_request(struct device *dev,
|
||||
struct dev_pm_qos_request *req, s32 value);
|
||||
#else
|
||||
static inline int pm_qos_update_target(struct pm_qos_constraints *c,
|
||||
struct plist_node *node,
|
||||
|
@ -119,6 +122,8 @@ static inline int pm_qos_request_active(struct pm_qos_request *req)
|
|||
static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
|
||||
{ return 0; }
|
||||
|
||||
static inline s32 __dev_pm_qos_read_value(struct device *dev)
|
||||
{ return 0; }
|
||||
static inline s32 dev_pm_qos_read_value(struct device *dev)
|
||||
{ return 0; }
|
||||
static inline int dev_pm_qos_add_request(struct device *dev,
|
||||
|
@ -150,6 +155,9 @@ static inline void dev_pm_qos_constraints_destroy(struct device *dev)
|
|||
{
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
}
|
||||
static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
|
||||
struct dev_pm_qos_request *req, s32 value)
|
||||
{ return 0; }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -45,6 +45,8 @@ extern void pm_runtime_irq_safe(struct device *dev);
|
|||
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
|
||||
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
|
||||
extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
|
||||
extern void pm_runtime_update_max_time_suspended(struct device *dev,
|
||||
s64 delta_ns);
|
||||
|
||||
static inline bool pm_children_suspended(struct device *dev)
|
||||
{
|
||||
|
@ -148,6 +150,9 @@ static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
|
|||
static inline unsigned long pm_runtime_autosuspend_expiration(
|
||||
struct device *dev) { return 0; }
|
||||
|
||||
static inline void pm_runtime_update_max_time_suspended(struct device *dev,
|
||||
s64 delta_ns) {}
|
||||
|
||||
#endif /* !CONFIG_PM_RUNTIME */
|
||||
|
||||
static inline int pm_runtime_idle(struct device *dev)
|
||||
|
|
|
@ -220,7 +220,7 @@ extern char ___assert_task_state[1 - 2*!!(
|
|||
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
|
||||
#define task_contributes_to_load(task) \
|
||||
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
|
||||
(task->flags & PF_FREEZING) == 0)
|
||||
(task->flags & PF_FROZEN) == 0)
|
||||
|
||||
#define __set_task_state(tsk, state_value) \
|
||||
do { (tsk)->state = (state_value); } while (0)
|
||||
|
@ -1787,7 +1787,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
|
|||
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
|
||||
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
|
||||
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
|
||||
#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
|
||||
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
|
||||
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
|
||||
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
|
||||
|
@ -1803,7 +1802,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
|
|||
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
|
||||
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
|
||||
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
|
||||
#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
|
||||
|
||||
/*
|
||||
* Only the _current_ task can read/write to tsk->flags, but other
|
||||
|
|
|
@ -95,6 +95,7 @@ struct intc_desc {
|
|||
unsigned int num_resources;
|
||||
intc_enum force_enable;
|
||||
intc_enum force_disable;
|
||||
bool skip_syscore_suspend;
|
||||
struct intc_hw_desc hw;
|
||||
};
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
#ifdef CONFIG_VT
|
||||
|
@ -331,6 +332,8 @@ static inline bool system_entering_hibernation(void) { return false; }
|
|||
#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
|
||||
#define PM_POST_RESTORE 0x0006 /* Restore failed */
|
||||
|
||||
extern struct mutex pm_mutex;
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
void save_processor_state(void);
|
||||
void restore_processor_state(void);
|
||||
|
@ -351,6 +354,19 @@ extern bool events_check_enabled;
|
|||
extern bool pm_wakeup_pending(void);
|
||||
extern bool pm_get_wakeup_count(unsigned int *count);
|
||||
extern bool pm_save_wakeup_count(unsigned int count);
|
||||
|
||||
static inline void lock_system_sleep(void)
|
||||
{
|
||||
freezer_do_not_count();
|
||||
mutex_lock(&pm_mutex);
|
||||
}
|
||||
|
||||
static inline void unlock_system_sleep(void)
|
||||
{
|
||||
mutex_unlock(&pm_mutex);
|
||||
freezer_count();
|
||||
}
|
||||
|
||||
#else /* !CONFIG_PM_SLEEP */
|
||||
|
||||
static inline int register_pm_notifier(struct notifier_block *nb)
|
||||
|
@ -366,28 +382,11 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
|
|||
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
|
||||
static inline bool pm_wakeup_pending(void) { return false; }
|
||||
#endif /* !CONFIG_PM_SLEEP */
|
||||
|
||||
extern struct mutex pm_mutex;
|
||||
|
||||
#ifndef CONFIG_HIBERNATE_CALLBACKS
|
||||
static inline void lock_system_sleep(void) {}
|
||||
static inline void unlock_system_sleep(void) {}
|
||||
|
||||
#else
|
||||
|
||||
/* Let some subsystems like memory hotadd exclude hibernation */
|
||||
|
||||
static inline void lock_system_sleep(void)
|
||||
{
|
||||
mutex_lock(&pm_mutex);
|
||||
}
|
||||
|
||||
static inline void unlock_system_sleep(void)
|
||||
{
|
||||
mutex_unlock(&pm_mutex);
|
||||
}
|
||||
#endif
|
||||
#endif /* !CONFIG_PM_SLEEP */
|
||||
|
||||
#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
|
||||
/*
|
||||
|
|
|
@ -48,19 +48,17 @@ static inline struct freezer *task_freezer(struct task_struct *task)
|
|||
struct freezer, css);
|
||||
}
|
||||
|
||||
static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
|
||||
bool cgroup_freezing(struct task_struct *task)
|
||||
{
|
||||
enum freezer_state state = task_freezer(task)->state;
|
||||
return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
|
||||
}
|
||||
enum freezer_state state;
|
||||
bool ret;
|
||||
|
||||
int cgroup_freezing_or_frozen(struct task_struct *task)
|
||||
{
|
||||
int result;
|
||||
task_lock(task);
|
||||
result = __cgroup_freezing_or_frozen(task);
|
||||
task_unlock(task);
|
||||
return result;
|
||||
rcu_read_lock();
|
||||
state = task_freezer(task)->state;
|
||||
ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN;
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -102,9 +100,6 @@ struct cgroup_subsys freezer_subsys;
|
|||
* freezer_can_attach():
|
||||
* cgroup_mutex (held by caller of can_attach)
|
||||
*
|
||||
* cgroup_freezing_or_frozen():
|
||||
* task->alloc_lock (to get task's cgroup)
|
||||
*
|
||||
* freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
|
||||
* freezer->lock
|
||||
* sighand->siglock (if the cgroup is freezing)
|
||||
|
@ -130,7 +125,7 @@ struct cgroup_subsys freezer_subsys;
|
|||
* write_lock css_set_lock (cgroup iterator start)
|
||||
* task->alloc_lock
|
||||
* read_lock css_set_lock (cgroup iterator start)
|
||||
* task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
|
||||
* task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
|
||||
* sighand->siglock
|
||||
*/
|
||||
static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
|
||||
|
@ -150,7 +145,11 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
|
|||
static void freezer_destroy(struct cgroup_subsys *ss,
|
||||
struct cgroup *cgroup)
|
||||
{
|
||||
kfree(cgroup_freezer(cgroup));
|
||||
struct freezer *freezer = cgroup_freezer(cgroup);
|
||||
|
||||
if (freezer->state != CGROUP_THAWED)
|
||||
atomic_dec(&system_freezing_cnt);
|
||||
kfree(freezer);
|
||||
}
|
||||
|
||||
/* task is frozen or will freeze immediately when next it gets woken */
|
||||
|
@ -184,13 +183,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
|
|||
|
||||
static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
|
||||
{
|
||||
rcu_read_lock();
|
||||
if (__cgroup_freezing_or_frozen(tsk)) {
|
||||
rcu_read_unlock();
|
||||
return -EBUSY;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
return cgroup_freezing(tsk) ? -EBUSY : 0;
|
||||
}
|
||||
|
||||
static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
|
||||
|
@ -220,7 +213,7 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
|
|||
|
||||
/* Locking avoids race with FREEZING -> THAWED transitions. */
|
||||
if (freezer->state == CGROUP_FREEZING)
|
||||
freeze_task(task, true);
|
||||
freeze_task(task);
|
||||
spin_unlock_irq(&freezer->lock);
|
||||
}
|
||||
|
||||
|
@ -238,7 +231,7 @@ static void update_if_frozen(struct cgroup *cgroup,
|
|||
cgroup_iter_start(cgroup, &it);
|
||||
while ((task = cgroup_iter_next(cgroup, &it))) {
|
||||
ntotal++;
|
||||
if (is_task_frozen_enough(task))
|
||||
if (freezing(task) && is_task_frozen_enough(task))
|
||||
nfrozen++;
|
||||
}
|
||||
|
||||
|
@ -286,10 +279,9 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
|
|||
struct task_struct *task;
|
||||
unsigned int num_cant_freeze_now = 0;
|
||||
|
||||
freezer->state = CGROUP_FREEZING;
|
||||
cgroup_iter_start(cgroup, &it);
|
||||
while ((task = cgroup_iter_next(cgroup, &it))) {
|
||||
if (!freeze_task(task, true))
|
||||
if (!freeze_task(task))
|
||||
continue;
|
||||
if (is_task_frozen_enough(task))
|
||||
continue;
|
||||
|
@ -307,12 +299,9 @@ static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
|
|||
struct task_struct *task;
|
||||
|
||||
cgroup_iter_start(cgroup, &it);
|
||||
while ((task = cgroup_iter_next(cgroup, &it))) {
|
||||
thaw_process(task);
|
||||
}
|
||||
while ((task = cgroup_iter_next(cgroup, &it)))
|
||||
__thaw_task(task);
|
||||
cgroup_iter_end(cgroup, &it);
|
||||
|
||||
freezer->state = CGROUP_THAWED;
|
||||
}
|
||||
|
||||
static int freezer_change_state(struct cgroup *cgroup,
|
||||
|
@ -326,20 +315,24 @@ static int freezer_change_state(struct cgroup *cgroup,
|
|||
spin_lock_irq(&freezer->lock);
|
||||
|
||||
update_if_frozen(cgroup, freezer);
|
||||
if (goal_state == freezer->state)
|
||||
goto out;
|
||||
|
||||
switch (goal_state) {
|
||||
case CGROUP_THAWED:
|
||||
if (freezer->state != CGROUP_THAWED)
|
||||
atomic_dec(&system_freezing_cnt);
|
||||
freezer->state = CGROUP_THAWED;
|
||||
unfreeze_cgroup(cgroup, freezer);
|
||||
break;
|
||||
case CGROUP_FROZEN:
|
||||
if (freezer->state == CGROUP_THAWED)
|
||||
atomic_inc(&system_freezing_cnt);
|
||||
freezer->state = CGROUP_FREEZING;
|
||||
retval = try_to_freeze_cgroup(cgroup, freezer);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
out:
|
||||
|
||||
spin_unlock_irq(&freezer->lock);
|
||||
|
||||
return retval;
|
||||
|
|
|
@ -470,7 +470,7 @@ void __ref enable_nonboot_cpus(void)
|
|||
cpu_maps_update_done();
|
||||
}
|
||||
|
||||
static int alloc_frozen_cpus(void)
|
||||
static int __init alloc_frozen_cpus(void)
|
||||
{
|
||||
if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
|
||||
return -ENOMEM;
|
||||
|
@ -543,7 +543,7 @@ cpu_hotplug_pm_callback(struct notifier_block *nb,
|
|||
}
|
||||
|
||||
|
||||
int cpu_hotplug_pm_sync_init(void)
|
||||
static int __init cpu_hotplug_pm_sync_init(void)
|
||||
{
|
||||
pm_notifier(cpu_hotplug_pm_callback, 0);
|
||||
return 0;
|
||||
|
|
|
@ -679,8 +679,6 @@ static void exit_mm(struct task_struct * tsk)
|
|||
tsk->mm = NULL;
|
||||
up_read(&mm->mmap_sem);
|
||||
enter_lazy_tlb(mm, current);
|
||||
/* We don't want this task to be frozen prematurely */
|
||||
clear_freeze_flag(tsk);
|
||||
task_unlock(tsk);
|
||||
mm_update_next_owner(mm);
|
||||
mmput(mm);
|
||||
|
@ -1040,6 +1038,7 @@ NORET_TYPE void do_exit(long code)
|
|||
exit_rcu();
|
||||
/* causes final put_task_struct in finish_task_switch(). */
|
||||
tsk->state = TASK_DEAD;
|
||||
tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
|
||||
schedule();
|
||||
BUG();
|
||||
/* Avoid "noreturn function does return". */
|
||||
|
|
|
@ -992,7 +992,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
|
|||
new_flags |= PF_FORKNOEXEC;
|
||||
new_flags |= PF_STARTING;
|
||||
p->flags = new_flags;
|
||||
clear_freeze_flag(p);
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
|
||||
|
|
209
kernel/freezer.c
209
kernel/freezer.c
|
@ -9,101 +9,114 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
/*
|
||||
* freezing is complete, mark current process as frozen
|
||||
/* total number of freezing conditions in effect */
|
||||
atomic_t system_freezing_cnt = ATOMIC_INIT(0);
|
||||
EXPORT_SYMBOL(system_freezing_cnt);
|
||||
|
||||
/* indicate whether PM freezing is in effect, protected by pm_mutex */
|
||||
bool pm_freezing;
|
||||
bool pm_nosig_freezing;
|
||||
|
||||
/* protects freezing and frozen transitions */
|
||||
static DEFINE_SPINLOCK(freezer_lock);
|
||||
|
||||
/**
|
||||
* freezing_slow_path - slow path for testing whether a task needs to be frozen
|
||||
* @p: task to be tested
|
||||
*
|
||||
* This function is called by freezing() if system_freezing_cnt isn't zero
|
||||
* and tests whether @p needs to enter and stay in frozen state. Can be
|
||||
* called under any context. The freezers are responsible for ensuring the
|
||||
* target tasks see the updated state.
|
||||
*/
|
||||
static inline void frozen_process(void)
|
||||
bool freezing_slow_path(struct task_struct *p)
|
||||
{
|
||||
if (!unlikely(current->flags & PF_NOFREEZE)) {
|
||||
current->flags |= PF_FROZEN;
|
||||
smp_wmb();
|
||||
}
|
||||
clear_freeze_flag(current);
|
||||
if (p->flags & PF_NOFREEZE)
|
||||
return false;
|
||||
|
||||
if (pm_nosig_freezing || cgroup_freezing(p))
|
||||
return true;
|
||||
|
||||
if (pm_freezing && !(p->flags & PF_KTHREAD))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(freezing_slow_path);
|
||||
|
||||
/* Refrigerator is place where frozen processes are stored :-). */
|
||||
void refrigerator(void)
|
||||
bool __refrigerator(bool check_kthr_stop)
|
||||
{
|
||||
/* Hmm, should we be allowed to suspend when there are realtime
|
||||
processes around? */
|
||||
long save;
|
||||
bool was_frozen = false;
|
||||
long save = current->state;
|
||||
|
||||
task_lock(current);
|
||||
if (freezing(current)) {
|
||||
frozen_process();
|
||||
task_unlock(current);
|
||||
} else {
|
||||
task_unlock(current);
|
||||
return;
|
||||
}
|
||||
save = current->state;
|
||||
pr_debug("%s entered refrigerator\n", current->comm);
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
recalc_sigpending(); /* We sent fake signal, clean it up */
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
/* prevent accounting of that task to load */
|
||||
current->flags |= PF_FREEZING;
|
||||
|
||||
for (;;) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
if (!frozen(current))
|
||||
|
||||
spin_lock_irq(&freezer_lock);
|
||||
current->flags |= PF_FROZEN;
|
||||
if (!freezing(current) ||
|
||||
(check_kthr_stop && kthread_should_stop()))
|
||||
current->flags &= ~PF_FROZEN;
|
||||
spin_unlock_irq(&freezer_lock);
|
||||
|
||||
if (!(current->flags & PF_FROZEN))
|
||||
break;
|
||||
was_frozen = true;
|
||||
schedule();
|
||||
}
|
||||
|
||||
/* Remove the accounting blocker */
|
||||
current->flags &= ~PF_FREEZING;
|
||||
|
||||
pr_debug("%s left refrigerator\n", current->comm);
|
||||
__set_current_state(save);
|
||||
|
||||
/*
|
||||
* Restore saved task state before returning. The mb'd version
|
||||
* needs to be used; otherwise, it might silently break
|
||||
* synchronization which depends on ordered task state change.
|
||||
*/
|
||||
set_current_state(save);
|
||||
|
||||
return was_frozen;
|
||||
}
|
||||
EXPORT_SYMBOL(refrigerator);
|
||||
EXPORT_SYMBOL(__refrigerator);
|
||||
|
||||
static void fake_signal_wake_up(struct task_struct *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&p->sighand->siglock, flags);
|
||||
signal_wake_up(p, 0);
|
||||
spin_unlock_irqrestore(&p->sighand->siglock, flags);
|
||||
if (lock_task_sighand(p, &flags)) {
|
||||
signal_wake_up(p, 0);
|
||||
unlock_task_sighand(p, &flags);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* freeze_task - send a freeze request to given task
|
||||
* @p: task to send the request to
|
||||
* @sig_only: if set, the request will only be sent if the task has the
|
||||
* PF_FREEZER_NOSIG flag unset
|
||||
* Return value: 'false', if @sig_only is set and the task has
|
||||
* PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise
|
||||
* freeze_task - send a freeze request to given task
|
||||
* @p: task to send the request to
|
||||
*
|
||||
* The freeze request is sent by setting the tasks's TIF_FREEZE flag and
|
||||
* either sending a fake signal to it or waking it up, depending on whether
|
||||
* or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task
|
||||
* has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its
|
||||
* TIF_FREEZE flag will not be set.
|
||||
* If @p is freezing, the freeze request is sent by setting %TIF_FREEZE
|
||||
* flag and either sending a fake signal to it or waking it up, depending
|
||||
* on whether it has %PF_FREEZER_NOSIG set.
|
||||
*
|
||||
* RETURNS:
|
||||
* %false, if @p is not freezing or already frozen; %true, otherwise
|
||||
*/
|
||||
bool freeze_task(struct task_struct *p, bool sig_only)
|
||||
bool freeze_task(struct task_struct *p)
|
||||
{
|
||||
/*
|
||||
* We first check if the task is freezing and next if it has already
|
||||
* been frozen to avoid the race with frozen_process() which first marks
|
||||
* the task as frozen and next clears its TIF_FREEZE.
|
||||
*/
|
||||
if (!freezing(p)) {
|
||||
smp_rmb();
|
||||
if (frozen(p))
|
||||
return false;
|
||||
unsigned long flags;
|
||||
|
||||
if (!sig_only || should_send_signal(p))
|
||||
set_freeze_flag(p);
|
||||
else
|
||||
return false;
|
||||
spin_lock_irqsave(&freezer_lock, flags);
|
||||
if (!freezing(p) || frozen(p)) {
|
||||
spin_unlock_irqrestore(&freezer_lock, flags);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (should_send_signal(p)) {
|
||||
if (!(p->flags & PF_KTHREAD)) {
|
||||
fake_signal_wake_up(p);
|
||||
/*
|
||||
* fake_signal_wake_up() goes through p's scheduler
|
||||
|
@ -111,56 +124,48 @@ bool freeze_task(struct task_struct *p, bool sig_only)
|
|||
* TASK_RUNNING transition can't race with task state
|
||||
* testing in try_to_freeze_tasks().
|
||||
*/
|
||||
} else if (sig_only) {
|
||||
return false;
|
||||
} else {
|
||||
wake_up_state(p, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&freezer_lock, flags);
|
||||
return true;
|
||||
}
|
||||
|
||||
void cancel_freezing(struct task_struct *p)
|
||||
void __thaw_task(struct task_struct *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (freezing(p)) {
|
||||
pr_debug(" clean up: %s\n", p->comm);
|
||||
clear_freeze_flag(p);
|
||||
spin_lock_irqsave(&p->sighand->siglock, flags);
|
||||
recalc_sigpending_and_wake(p);
|
||||
spin_unlock_irqrestore(&p->sighand->siglock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
static int __thaw_process(struct task_struct *p)
|
||||
{
|
||||
if (frozen(p)) {
|
||||
p->flags &= ~PF_FROZEN;
|
||||
return 1;
|
||||
}
|
||||
clear_freeze_flag(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wake up a frozen process
|
||||
*
|
||||
* task_lock() is needed to prevent the race with refrigerator() which may
|
||||
* occur if the freezing of tasks fails. Namely, without the lock, if the
|
||||
* freezing of tasks failed, thaw_tasks() might have run before a task in
|
||||
* refrigerator() could call frozen_process(), in which case the task would be
|
||||
* frozen and no one would thaw it.
|
||||
*/
|
||||
int thaw_process(struct task_struct *p)
|
||||
{
|
||||
task_lock(p);
|
||||
if (__thaw_process(p) == 1) {
|
||||
task_unlock(p);
|
||||
/*
|
||||
* Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
|
||||
* be visible to @p as waking up implies wmb. Waking up inside
|
||||
* freezer_lock also prevents wakeups from leaking outside
|
||||
* refrigerator.
|
||||
*/
|
||||
spin_lock_irqsave(&freezer_lock, flags);
|
||||
if (frozen(p))
|
||||
wake_up_process(p);
|
||||
return 1;
|
||||
}
|
||||
task_unlock(p);
|
||||
return 0;
|
||||
spin_unlock_irqrestore(&freezer_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(thaw_process);
|
||||
|
||||
/**
|
||||
* set_freezable - make %current freezable
|
||||
*
|
||||
* Mark %current freezable and enter refrigerator if necessary.
|
||||
*/
|
||||
bool set_freezable(void)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
/*
|
||||
* Modify flags while holding freezer_lock. This ensures the
|
||||
* freezer notices that we aren't frozen yet or the freezing
|
||||
* condition is visible to try_to_freeze() below.
|
||||
*/
|
||||
spin_lock_irq(&freezer_lock);
|
||||
current->flags &= ~PF_NOFREEZE;
|
||||
spin_unlock_irq(&freezer_lock);
|
||||
|
||||
return try_to_freeze();
|
||||
}
|
||||
EXPORT_SYMBOL(set_freezable);
|
||||
|
|
|
@ -1523,7 +1523,7 @@ int kernel_kexec(void)
|
|||
|
||||
#ifdef CONFIG_KEXEC_JUMP
|
||||
if (kexec_image->preserve_context) {
|
||||
mutex_lock(&pm_mutex);
|
||||
lock_system_sleep();
|
||||
pm_prepare_console();
|
||||
error = freeze_processes();
|
||||
if (error) {
|
||||
|
@ -1576,7 +1576,7 @@ int kernel_kexec(void)
|
|||
thaw_processes();
|
||||
Restore_console:
|
||||
pm_restore_console();
|
||||
mutex_unlock(&pm_mutex);
|
||||
unlock_system_sleep();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <linux/resource.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include <trace/events/module.h>
|
||||
|
@ -50,6 +51,7 @@ static struct workqueue_struct *khelper_wq;
|
|||
static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
|
||||
static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
|
||||
static DEFINE_SPINLOCK(umh_sysctl_lock);
|
||||
static DECLARE_RWSEM(umhelper_sem);
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
|
||||
|
@ -275,6 +277,7 @@ static void __call_usermodehelper(struct work_struct *work)
|
|||
* If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
|
||||
* (used for preventing user land processes from being created after the user
|
||||
* land has been frozen during a system-wide hibernation or suspend operation).
|
||||
* Should always be manipulated under umhelper_sem acquired for write.
|
||||
*/
|
||||
static int usermodehelper_disabled = 1;
|
||||
|
||||
|
@ -282,17 +285,29 @@ static int usermodehelper_disabled = 1;
|
|||
static atomic_t running_helpers = ATOMIC_INIT(0);
|
||||
|
||||
/*
|
||||
* Wait queue head used by usermodehelper_pm_callback() to wait for all running
|
||||
* Wait queue head used by usermodehelper_disable() to wait for all running
|
||||
* helpers to finish.
|
||||
*/
|
||||
static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
|
||||
|
||||
/*
|
||||
* Time to wait for running_helpers to become zero before the setting of
|
||||
* usermodehelper_disabled in usermodehelper_pm_callback() fails
|
||||
* usermodehelper_disabled in usermodehelper_disable() fails
|
||||
*/
|
||||
#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
|
||||
|
||||
void read_lock_usermodehelper(void)
|
||||
{
|
||||
down_read(&umhelper_sem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(read_lock_usermodehelper);
|
||||
|
||||
void read_unlock_usermodehelper(void)
|
||||
{
|
||||
up_read(&umhelper_sem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(read_unlock_usermodehelper);
|
||||
|
||||
/**
|
||||
* usermodehelper_disable - prevent new helpers from being started
|
||||
*/
|
||||
|
@ -300,8 +315,10 @@ int usermodehelper_disable(void)
|
|||
{
|
||||
long retval;
|
||||
|
||||
down_write(&umhelper_sem);
|
||||
usermodehelper_disabled = 1;
|
||||
smp_mb();
|
||||
up_write(&umhelper_sem);
|
||||
|
||||
/*
|
||||
* From now on call_usermodehelper_exec() won't start any new
|
||||
* helpers, so it is sufficient if running_helpers turns out to
|
||||
|
@ -314,7 +331,9 @@ int usermodehelper_disable(void)
|
|||
if (retval)
|
||||
return 0;
|
||||
|
||||
down_write(&umhelper_sem);
|
||||
usermodehelper_disabled = 0;
|
||||
up_write(&umhelper_sem);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
|
@ -323,7 +342,9 @@ int usermodehelper_disable(void)
|
|||
*/
|
||||
void usermodehelper_enable(void)
|
||||
{
|
||||
down_write(&umhelper_sem);
|
||||
usermodehelper_disabled = 0;
|
||||
up_write(&umhelper_sem);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -58,6 +58,31 @@ int kthread_should_stop(void)
|
|||
}
|
||||
EXPORT_SYMBOL(kthread_should_stop);
|
||||
|
||||
/**
|
||||
* kthread_freezable_should_stop - should this freezable kthread return now?
|
||||
* @was_frozen: optional out parameter, indicates whether %current was frozen
|
||||
*
|
||||
* kthread_should_stop() for freezable kthreads, which will enter
|
||||
* refrigerator if necessary. This function is safe from kthread_stop() /
|
||||
* freezer deadlock and freezable kthreads should use this function instead
|
||||
* of calling try_to_freeze() directly.
|
||||
*/
|
||||
bool kthread_freezable_should_stop(bool *was_frozen)
|
||||
{
|
||||
bool frozen = false;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (unlikely(freezing(current)))
|
||||
frozen = __refrigerator(true);
|
||||
|
||||
if (was_frozen)
|
||||
*was_frozen = frozen;
|
||||
|
||||
return kthread_should_stop();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
|
||||
|
||||
/**
|
||||
* kthread_data - return data value specified on kthread creation
|
||||
* @task: kthread task in question
|
||||
|
@ -257,7 +282,7 @@ int kthreadd(void *unused)
|
|||
set_cpus_allowed_ptr(tsk, cpu_all_mask);
|
||||
set_mems_allowed(node_states[N_HIGH_MEMORY]);
|
||||
|
||||
current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
|
||||
current->flags |= PF_NOFREEZE;
|
||||
|
||||
for (;;) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
|
|
@ -43,8 +43,6 @@ int in_suspend __nosavedata;
|
|||
enum {
|
||||
HIBERNATION_INVALID,
|
||||
HIBERNATION_PLATFORM,
|
||||
HIBERNATION_TEST,
|
||||
HIBERNATION_TESTPROC,
|
||||
HIBERNATION_SHUTDOWN,
|
||||
HIBERNATION_REBOOT,
|
||||
/* keep last */
|
||||
|
@ -55,7 +53,7 @@ enum {
|
|||
|
||||
static int hibernation_mode = HIBERNATION_SHUTDOWN;
|
||||
|
||||
static bool freezer_test_done;
|
||||
bool freezer_test_done;
|
||||
|
||||
static const struct platform_hibernation_ops *hibernation_ops;
|
||||
|
||||
|
@ -71,14 +69,14 @@ void hibernation_set_ops(const struct platform_hibernation_ops *ops)
|
|||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
mutex_lock(&pm_mutex);
|
||||
lock_system_sleep();
|
||||
hibernation_ops = ops;
|
||||
if (ops)
|
||||
hibernation_mode = HIBERNATION_PLATFORM;
|
||||
else if (hibernation_mode == HIBERNATION_PLATFORM)
|
||||
hibernation_mode = HIBERNATION_SHUTDOWN;
|
||||
|
||||
mutex_unlock(&pm_mutex);
|
||||
unlock_system_sleep();
|
||||
}
|
||||
|
||||
static bool entering_platform_hibernation;
|
||||
|
@ -96,15 +94,6 @@ static void hibernation_debug_sleep(void)
|
|||
mdelay(5000);
|
||||
}
|
||||
|
||||
static int hibernation_testmode(int mode)
|
||||
{
|
||||
if (hibernation_mode == mode) {
|
||||
hibernation_debug_sleep();
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hibernation_test(int level)
|
||||
{
|
||||
if (pm_test_level == level) {
|
||||
|
@ -114,7 +103,6 @@ static int hibernation_test(int level)
|
|||
return 0;
|
||||
}
|
||||
#else /* !CONFIG_PM_DEBUG */
|
||||
static int hibernation_testmode(int mode) { return 0; }
|
||||
static int hibernation_test(int level) { return 0; }
|
||||
#endif /* !CONFIG_PM_DEBUG */
|
||||
|
||||
|
@ -278,8 +266,7 @@ static int create_image(int platform_mode)
|
|||
goto Platform_finish;
|
||||
|
||||
error = disable_nonboot_cpus();
|
||||
if (error || hibernation_test(TEST_CPUS)
|
||||
|| hibernation_testmode(HIBERNATION_TEST))
|
||||
if (error || hibernation_test(TEST_CPUS))
|
||||
goto Enable_cpus;
|
||||
|
||||
local_irq_disable();
|
||||
|
@ -333,7 +320,7 @@ static int create_image(int platform_mode)
|
|||
*/
|
||||
int hibernation_snapshot(int platform_mode)
|
||||
{
|
||||
pm_message_t msg = PMSG_RECOVER;
|
||||
pm_message_t msg;
|
||||
int error;
|
||||
|
||||
error = platform_begin(platform_mode);
|
||||
|
@ -349,8 +336,7 @@ int hibernation_snapshot(int platform_mode)
|
|||
if (error)
|
||||
goto Cleanup;
|
||||
|
||||
if (hibernation_test(TEST_FREEZER) ||
|
||||
hibernation_testmode(HIBERNATION_TESTPROC)) {
|
||||
if (hibernation_test(TEST_FREEZER)) {
|
||||
|
||||
/*
|
||||
* Indicate to the caller that we are returning due to a
|
||||
|
@ -362,26 +348,26 @@ int hibernation_snapshot(int platform_mode)
|
|||
|
||||
error = dpm_prepare(PMSG_FREEZE);
|
||||
if (error) {
|
||||
dpm_complete(msg);
|
||||
dpm_complete(PMSG_RECOVER);
|
||||
goto Cleanup;
|
||||
}
|
||||
|
||||
suspend_console();
|
||||
pm_restrict_gfp_mask();
|
||||
|
||||
error = dpm_suspend(PMSG_FREEZE);
|
||||
if (error)
|
||||
goto Recover_platform;
|
||||
|
||||
if (hibernation_test(TEST_DEVICES))
|
||||
goto Recover_platform;
|
||||
if (error || hibernation_test(TEST_DEVICES))
|
||||
platform_recover(platform_mode);
|
||||
else
|
||||
error = create_image(platform_mode);
|
||||
|
||||
error = create_image(platform_mode);
|
||||
/*
|
||||
* Control returns here (1) after the image has been created or the
|
||||
* In the case that we call create_image() above, the control
|
||||
* returns here (1) after the image has been created or the
|
||||
* image creation has failed and (2) after a successful restore.
|
||||
*/
|
||||
|
||||
Resume_devices:
|
||||
/* We may need to release the preallocated image pages here. */
|
||||
if (error || !in_suspend)
|
||||
swsusp_free();
|
||||
|
@ -399,10 +385,6 @@ int hibernation_snapshot(int platform_mode)
|
|||
platform_end(platform_mode);
|
||||
return error;
|
||||
|
||||
Recover_platform:
|
||||
platform_recover(platform_mode);
|
||||
goto Resume_devices;
|
||||
|
||||
Cleanup:
|
||||
swsusp_free();
|
||||
goto Close;
|
||||
|
@ -590,9 +572,6 @@ int hibernation_platform_enter(void)
|
|||
static void power_down(void)
|
||||
{
|
||||
switch (hibernation_mode) {
|
||||
case HIBERNATION_TEST:
|
||||
case HIBERNATION_TESTPROC:
|
||||
break;
|
||||
case HIBERNATION_REBOOT:
|
||||
kernel_restart(NULL);
|
||||
break;
|
||||
|
@ -611,17 +590,6 @@ static void power_down(void)
|
|||
while(1);
|
||||
}
|
||||
|
||||
static int prepare_processes(void)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
if (freeze_processes()) {
|
||||
error = -EBUSY;
|
||||
thaw_processes();
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* hibernate - Carry out system hibernation, including saving the image.
|
||||
*/
|
||||
|
@ -629,7 +597,7 @@ int hibernate(void)
|
|||
{
|
||||
int error;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
lock_system_sleep();
|
||||
/* The snapshot device should not be opened while we're running */
|
||||
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
|
||||
error = -EBUSY;
|
||||
|
@ -654,7 +622,7 @@ int hibernate(void)
|
|||
sys_sync();
|
||||
printk("done.\n");
|
||||
|
||||
error = prepare_processes();
|
||||
error = freeze_processes();
|
||||
if (error)
|
||||
goto Finish;
|
||||
|
||||
|
@ -697,7 +665,7 @@ int hibernate(void)
|
|||
pm_restore_console();
|
||||
atomic_inc(&snapshot_device_available);
|
||||
Unlock:
|
||||
mutex_unlock(&pm_mutex);
|
||||
unlock_system_sleep();
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -811,11 +779,13 @@ static int software_resume(void)
|
|||
goto close_finish;
|
||||
|
||||
error = create_basic_memory_bitmaps();
|
||||
if (error)
|
||||
if (error) {
|
||||
usermodehelper_enable();
|
||||
goto close_finish;
|
||||
}
|
||||
|
||||
pr_debug("PM: Preparing processes for restore.\n");
|
||||
error = prepare_processes();
|
||||
error = freeze_processes();
|
||||
if (error) {
|
||||
swsusp_close(FMODE_READ);
|
||||
goto Done;
|
||||
|
@ -855,8 +825,6 @@ static const char * const hibernation_modes[] = {
|
|||
[HIBERNATION_PLATFORM] = "platform",
|
||||
[HIBERNATION_SHUTDOWN] = "shutdown",
|
||||
[HIBERNATION_REBOOT] = "reboot",
|
||||
[HIBERNATION_TEST] = "test",
|
||||
[HIBERNATION_TESTPROC] = "testproc",
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -865,17 +833,15 @@ static const char * const hibernation_modes[] = {
|
|||
* Hibernation can be handled in several ways. There are a few different ways
|
||||
* to put the system into the sleep state: using the platform driver (e.g. ACPI
|
||||
* or other hibernation_ops), powering it off or rebooting it (for testing
|
||||
* mostly), or using one of the two available test modes.
|
||||
* mostly).
|
||||
*
|
||||
* The sysfs file /sys/power/disk provides an interface for selecting the
|
||||
* hibernation mode to use. Reading from this file causes the available modes
|
||||
* to be printed. There are 5 modes that can be supported:
|
||||
* to be printed. There are 3 modes that can be supported:
|
||||
*
|
||||
* 'platform'
|
||||
* 'shutdown'
|
||||
* 'reboot'
|
||||
* 'test'
|
||||
* 'testproc'
|
||||
*
|
||||
* If a platform hibernation driver is in use, 'platform' will be supported
|
||||
* and will be used by default. Otherwise, 'shutdown' will be used by default.
|
||||
|
@ -899,8 +865,6 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
|
|||
switch (i) {
|
||||
case HIBERNATION_SHUTDOWN:
|
||||
case HIBERNATION_REBOOT:
|
||||
case HIBERNATION_TEST:
|
||||
case HIBERNATION_TESTPROC:
|
||||
break;
|
||||
case HIBERNATION_PLATFORM:
|
||||
if (hibernation_ops)
|
||||
|
@ -929,7 +893,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
|
|||
p = memchr(buf, '\n', n);
|
||||
len = p ? p - buf : n;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
lock_system_sleep();
|
||||
for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
|
||||
if (len == strlen(hibernation_modes[i])
|
||||
&& !strncmp(buf, hibernation_modes[i], len)) {
|
||||
|
@ -941,8 +905,6 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
|
|||
switch (mode) {
|
||||
case HIBERNATION_SHUTDOWN:
|
||||
case HIBERNATION_REBOOT:
|
||||
case HIBERNATION_TEST:
|
||||
case HIBERNATION_TESTPROC:
|
||||
hibernation_mode = mode;
|
||||
break;
|
||||
case HIBERNATION_PLATFORM:
|
||||
|
@ -957,7 +919,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
|
|||
if (!error)
|
||||
pr_debug("PM: Hibernation mode set to '%s'\n",
|
||||
hibernation_modes[mode]);
|
||||
mutex_unlock(&pm_mutex);
|
||||
unlock_system_sleep();
|
||||
return error ? error : n;
|
||||
}
|
||||
|
||||
|
@ -984,9 +946,9 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
|
|||
if (maj != MAJOR(res) || min != MINOR(res))
|
||||
goto out;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
lock_system_sleep();
|
||||
swsusp_resume_device = res;
|
||||
mutex_unlock(&pm_mutex);
|
||||
unlock_system_sleep();
|
||||
printk(KERN_INFO "PM: Starting manual resume from disk\n");
|
||||
noresume = 0;
|
||||
software_resume();
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Copyright (c) 2003 Patrick Mochel
|
||||
* Copyright (c) 2003 Open Source Development Lab
|
||||
*
|
||||
*
|
||||
* This file is released under the GPLv2
|
||||
*
|
||||
*/
|
||||
|
@ -116,7 +116,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
|
|||
p = memchr(buf, '\n', n);
|
||||
len = p ? p - buf : n;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
lock_system_sleep();
|
||||
|
||||
level = TEST_FIRST;
|
||||
for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
|
||||
|
@ -126,7 +126,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
|
|||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&pm_mutex);
|
||||
unlock_system_sleep();
|
||||
|
||||
return error ? error : n;
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ struct kobject *power_kobj;
|
|||
* 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
|
||||
* 'disk' (Suspend-to-Disk).
|
||||
*
|
||||
* store() accepts one of those strings, translates it into the
|
||||
* store() accepts one of those strings, translates it into the
|
||||
* proper enumerated value, and initiates a suspend transition.
|
||||
*/
|
||||
static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
|
@ -282,7 +282,7 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
|
|||
/* First, check if we are requested to hibernate */
|
||||
if (len == 4 && !strncmp(buf, "disk", len)) {
|
||||
error = hibernate();
|
||||
goto Exit;
|
||||
goto Exit;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue