Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: ACPI / PM: Do not enable GPEs for system wakeup in advance ACPICA: Truncate I/O addresses to 16 bits for Windows compatibility ACPICA: Limit maximum time for Sleep() operator ACPICA: Fix namestring associated with AE_NO_HANDLER exception ACPI / ACPICA: Fix sysfs GPE interface ACPI / ACPICA: Fix GPE initialization ACPI / ACPICA: Avoid writing full enable masks to GPE registers ACPI / ACPICA: Fix low-level GPE manipulation code ACPI / ACPICA: Use helper function for computing GPE masks ACPI / ACPICA: Do not attempt to disable GPE when installing handler ACPI: Disable Vista compatibility for Sony VGN-NS50B_L ACPI: fan: fix unbalanced code block ACPI: Store NVS state even when entering suspend to RAM suspend: Move NVS save/restore code to generic suspend functionality ACPI: Do not try to set up acpi processor stuff on cores exceeding maxcpus= ACPI: acpi_pad: Don't needlessly mark LAPIC unstable
This commit is contained in:
commit
02c646ef4e
30 changed files with 299 additions and 238 deletions
|
@ -729,7 +729,7 @@ static int __init e820_mark_nvs_memory(void)
|
|||
struct e820entry *ei = &e820.map[i];
|
||||
|
||||
if (ei->type == E820_NVS)
|
||||
hibernate_nvs_register(ei->addr, ei->size);
|
||||
suspend_nvs_register(ei->addr, ei->size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -46,6 +46,8 @@ static unsigned long power_saving_mwait_eax;
|
|||
|
||||
static unsigned char tsc_detected_unstable;
|
||||
static unsigned char tsc_marked_unstable;
|
||||
static unsigned char lapic_detected_unstable;
|
||||
static unsigned char lapic_marked_unstable;
|
||||
|
||||
static void power_saving_mwait_init(void)
|
||||
{
|
||||
|
@ -75,9 +77,6 @@ static void power_saving_mwait_init(void)
|
|||
power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
|
||||
(highest_subcstate - 1);
|
||||
|
||||
for_each_online_cpu(i)
|
||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &i);
|
||||
|
||||
#if defined(CONFIG_GENERIC_TIME) && defined(CONFIG_X86)
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_AMD:
|
||||
|
@ -86,13 +85,15 @@ static void power_saving_mwait_init(void)
|
|||
* AMD Fam10h TSC will tick in all
|
||||
* C/P/S0/S1 states when this bit is set.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
|
||||
return;
|
||||
|
||||
/*FALL THROUGH*/
|
||||
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
|
||||
tsc_detected_unstable = 1;
|
||||
if (!boot_cpu_has(X86_FEATURE_ARAT))
|
||||
lapic_detected_unstable = 1;
|
||||
break;
|
||||
default:
|
||||
/* TSC could halt in idle */
|
||||
/* TSC & LAPIC could halt in idle */
|
||||
tsc_detected_unstable = 1;
|
||||
lapic_detected_unstable = 1;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -180,10 +181,20 @@ static int power_saving_thread(void *data)
|
|||
mark_tsc_unstable("TSC halts in idle");
|
||||
tsc_marked_unstable = 1;
|
||||
}
|
||||
if (lapic_detected_unstable && !lapic_marked_unstable) {
|
||||
int i;
|
||||
/* LAPIC could halt in idle, so notify users */
|
||||
for_each_online_cpu(i)
|
||||
clockevents_notify(
|
||||
CLOCK_EVT_NOTIFY_BROADCAST_ON,
|
||||
&i);
|
||||
lapic_marked_unstable = 1;
|
||||
}
|
||||
local_irq_disable();
|
||||
cpu = smp_processor_id();
|
||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
|
||||
&cpu);
|
||||
if (lapic_marked_unstable)
|
||||
clockevents_notify(
|
||||
CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
|
||||
stop_critical_timings();
|
||||
|
||||
__monitor((void *)¤t_thread_info()->flags, 0, 0);
|
||||
|
@ -192,8 +203,9 @@ static int power_saving_thread(void *data)
|
|||
__mwait(power_saving_mwait_eax, 1);
|
||||
|
||||
start_critical_timings();
|
||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
|
||||
&cpu);
|
||||
if (lapic_marked_unstable)
|
||||
clockevents_notify(
|
||||
CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
|
||||
local_irq_enable();
|
||||
|
||||
if (jiffies > expire_time) {
|
||||
|
|
|
@ -119,6 +119,10 @@
|
|||
|
||||
#define ACPI_MAX_LOOP_ITERATIONS 0xFFFF
|
||||
|
||||
/* Maximum sleep allowed via Sleep() operator */
|
||||
|
||||
#define ACPI_MAX_SLEEP 20000 /* Two seconds */
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* ACPI Specification constants (Do not change unless the specification changes)
|
||||
|
|
|
@ -80,10 +80,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
|
|||
acpi_status
|
||||
acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info);
|
||||
|
||||
acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
|
||||
|
||||
acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
|
||||
|
||||
struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
|
||||
u32 gpe_number);
|
||||
|
||||
|
|
|
@ -125,6 +125,14 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
|
|||
*/
|
||||
u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
|
||||
|
||||
/*
|
||||
* Optionally truncate I/O addresses to 16 bits. Provides compatibility
|
||||
* with other ACPI implementations. NOTE: During ACPICA initialization,
|
||||
* this value is set to TRUE if any Windows OSI strings have been
|
||||
* requested by the BIOS.
|
||||
*/
|
||||
u8 ACPI_INIT_GLOBAL(acpi_gbl_truncate_io_addresses, FALSE);
|
||||
|
||||
/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
|
||||
|
||||
struct acpi_table_fadt acpi_gbl_FADT;
|
||||
|
|
|
@ -90,7 +90,11 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width);
|
|||
/*
|
||||
* hwgpe - GPE support
|
||||
*/
|
||||
acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
|
||||
u32 acpi_hw_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
|
||||
struct acpi_gpe_register_info *gpe_register_info);
|
||||
|
||||
acpi_status
|
||||
acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action);
|
||||
|
||||
acpi_status
|
||||
acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info);
|
||||
|
|
|
@ -69,7 +69,7 @@ acpi_status
|
|||
acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
|
||||
{
|
||||
struct acpi_gpe_register_info *gpe_register_info;
|
||||
u8 register_bit;
|
||||
u32 register_bit;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks);
|
||||
|
||||
|
@ -78,9 +78,8 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
|
|||
return_ACPI_STATUS(AE_NOT_EXIST);
|
||||
}
|
||||
|
||||
register_bit = (u8)
|
||||
(1 <<
|
||||
(gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
|
||||
register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
|
||||
gpe_register_info);
|
||||
|
||||
/* Clear the wake/run bits up front */
|
||||
|
||||
|
@ -100,106 +99,6 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
|
|||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_enable_gpe
|
||||
*
|
||||
* PARAMETERS: gpe_event_info - GPE to enable
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Hardware-enable a GPE. Always enables the GPE, regardless
|
||||
* of type or number of references.
|
||||
*
|
||||
* Note: The GPE lock should be already acquired when this function is called.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_enable_gpe);
|
||||
|
||||
|
||||
/*
|
||||
* We will only allow a GPE to be enabled if it has either an
|
||||
* associated method (_Lxx/_Exx) or a handler. Otherwise, the
|
||||
* GPE will be immediately disabled by acpi_ev_gpe_dispatch the
|
||||
* first time it fires.
|
||||
*/
|
||||
if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
|
||||
return_ACPI_STATUS(AE_NO_HANDLER);
|
||||
}
|
||||
|
||||
/* Ensure the HW enable masks are current */
|
||||
|
||||
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Clear the GPE (of stale events) */
|
||||
|
||||
status = acpi_hw_clear_gpe(gpe_event_info);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Enable the requested GPE */
|
||||
|
||||
status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_ev_disable_gpe
|
||||
*
|
||||
* PARAMETERS: gpe_event_info - GPE to disable
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Hardware-disable a GPE. Always disables the requested GPE,
|
||||
* regardless of the type or number of references.
|
||||
*
|
||||
* Note: The GPE lock should be already acquired when this function is called.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_disable_gpe);
|
||||
|
||||
|
||||
/*
|
||||
* Note: Always disable the GPE, even if we think that that it is already
|
||||
* disabled. It is possible that the AML or some other code has enabled
|
||||
* the GPE behind our back.
|
||||
*/
|
||||
|
||||
/* Ensure the HW enable masks are current */
|
||||
|
||||
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*
|
||||
* Always H/W disable this GPE, even if we don't know the GPE type.
|
||||
* Simply clear the enable bit for this particular GPE, but do not
|
||||
* write out the current GPE enable mask since this may inadvertently
|
||||
* enable GPEs too early. An example is a rogue GPE that has arrived
|
||||
* during ACPICA initialization - possibly because AML or other code
|
||||
* has enabled the GPE.
|
||||
*/
|
||||
status = acpi_hw_low_disable_gpe(gpe_event_info);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
|
@ -451,10 +350,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
|
|||
return_VOID;
|
||||
}
|
||||
|
||||
/* Update the GPE register masks for return to enabled state */
|
||||
|
||||
(void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
|
||||
|
||||
/*
|
||||
* Take a snapshot of the GPE info for this level - we copy the info to
|
||||
* prevent a race condition with remove_handler/remove_block.
|
||||
|
@ -607,7 +502,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
|
|||
* Disable the GPE, so it doesn't keep firing before the method has a
|
||||
* chance to run (it runs asynchronously with interrupts enabled).
|
||||
*/
|
||||
status = acpi_ev_disable_gpe(gpe_event_info);
|
||||
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Unable to disable GPE[0x%2X]",
|
||||
|
@ -644,7 +539,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
|
|||
* Disable the GPE. The GPE will remain disabled a handler
|
||||
* is installed or ACPICA is restarted.
|
||||
*/
|
||||
status = acpi_ev_disable_gpe(gpe_event_info);
|
||||
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_EXCEPTION((AE_INFO, status,
|
||||
"Unable to disable GPE[0x%2X]",
|
||||
|
|
|
@ -500,6 +500,19 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
|
|||
|
||||
gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
|
||||
gpe_event_info = &gpe_block->event_info[gpe_index];
|
||||
gpe_number = gpe_index + gpe_block->block_base_number;
|
||||
|
||||
/*
|
||||
* If the GPE has already been enabled for runtime
|
||||
* signaling, make sure it remains enabled, but do not
|
||||
* increment its reference counter.
|
||||
*/
|
||||
if (gpe_event_info->runtime_count) {
|
||||
acpi_set_gpe(gpe_device, gpe_number,
|
||||
ACPI_GPE_ENABLE);
|
||||
gpe_enabled_count++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
|
||||
wake_gpe_count++;
|
||||
|
@ -516,7 +529,6 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
|
|||
|
||||
/* Enable this GPE */
|
||||
|
||||
gpe_number = gpe_index + gpe_block->block_base_number;
|
||||
status = acpi_enable_gpe(gpe_device, gpe_number,
|
||||
ACPI_GPE_TYPE_RUNTIME);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
|
|
@ -719,13 +719,6 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
|
|||
handler->context = context;
|
||||
handler->method_node = gpe_event_info->dispatch.method_node;
|
||||
|
||||
/* Disable the GPE before installing the handler */
|
||||
|
||||
status = acpi_ev_disable_gpe(gpe_event_info);
|
||||
if (ACPI_FAILURE (status)) {
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
/* Install the handler */
|
||||
|
||||
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
|
||||
|
|
|
@ -208,6 +208,44 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
|
|||
|
||||
ACPI_EXPORT_SYMBOL(acpi_enable_event)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_clear_and_enable_gpe
|
||||
*
|
||||
* PARAMETERS: gpe_event_info - GPE to enable
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Clear the given GPE from stale events and enable it.
|
||||
*
|
||||
******************************************************************************/
|
||||
static acpi_status
|
||||
acpi_clear_and_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
/*
|
||||
* We will only allow a GPE to be enabled if it has either an
|
||||
* associated method (_Lxx/_Exx) or a handler. Otherwise, the
|
||||
* GPE will be immediately disabled by acpi_ev_gpe_dispatch the
|
||||
* first time it fires.
|
||||
*/
|
||||
if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
|
||||
return_ACPI_STATUS(AE_NO_HANDLER);
|
||||
}
|
||||
|
||||
/* Clear the GPE (of stale events) */
|
||||
status = acpi_hw_clear_gpe(gpe_event_info);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Enable the requested GPE */
|
||||
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_set_gpe
|
||||
|
@ -249,11 +287,11 @@ acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
|
|||
|
||||
switch (action) {
|
||||
case ACPI_GPE_ENABLE:
|
||||
status = acpi_ev_enable_gpe(gpe_event_info);
|
||||
status = acpi_clear_and_enable_gpe(gpe_event_info);
|
||||
break;
|
||||
|
||||
case ACPI_GPE_DISABLE:
|
||||
status = acpi_ev_disable_gpe(gpe_event_info);
|
||||
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -316,7 +354,11 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
|
|||
|
||||
gpe_event_info->runtime_count++;
|
||||
if (gpe_event_info->runtime_count == 1) {
|
||||
status = acpi_ev_enable_gpe(gpe_event_info);
|
||||
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
status = acpi_clear_and_enable_gpe(gpe_event_info);
|
||||
}
|
||||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
gpe_event_info->runtime_count--;
|
||||
goto unlock_and_exit;
|
||||
|
@ -343,7 +385,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
|
|||
*/
|
||||
gpe_event_info->wakeup_count++;
|
||||
if (gpe_event_info->wakeup_count == 1) {
|
||||
(void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
|
||||
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -403,7 +445,12 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type
|
|||
|
||||
gpe_event_info->runtime_count--;
|
||||
if (!gpe_event_info->runtime_count) {
|
||||
status = acpi_ev_disable_gpe(gpe_event_info);
|
||||
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
status = acpi_hw_low_set_gpe(gpe_event_info,
|
||||
ACPI_GPE_DISABLE);
|
||||
}
|
||||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
gpe_event_info->runtime_count++;
|
||||
goto unlock_and_exit;
|
||||
|
@ -424,7 +471,7 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type
|
|||
|
||||
gpe_event_info->wakeup_count--;
|
||||
if (!gpe_event_info->wakeup_count) {
|
||||
(void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
|
||||
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -201,6 +201,14 @@ acpi_status acpi_ex_system_do_sleep(u64 how_long)
|
|||
|
||||
acpi_ex_relinquish_interpreter();
|
||||
|
||||
/*
|
||||
* For compatibility with other ACPI implementations and to prevent
|
||||
* accidental deep sleeps, limit the sleep time to something reasonable.
|
||||
*/
|
||||
if (how_long > ACPI_MAX_SLEEP) {
|
||||
how_long = ACPI_MAX_SLEEP;
|
||||
}
|
||||
|
||||
acpi_os_sleep(how_long);
|
||||
|
||||
/* And now we must get the interpreter again */
|
||||
|
|
|
@ -57,21 +57,47 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
|||
|
||||
/******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_hw_low_disable_gpe
|
||||
* FUNCTION: acpi_hw_gpe_register_bit
|
||||
*
|
||||
* PARAMETERS: gpe_event_info - Info block for the GPE to be disabled
|
||||
* PARAMETERS: gpe_event_info - Info block for the GPE
|
||||
* gpe_register_info - Info block for the GPE register
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Disable a single GPE in the enable register.
|
||||
* DESCRIPTION: Compute GPE enable mask with one bit corresponding to the given
|
||||
* GPE set.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
|
||||
u32 acpi_hw_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
|
||||
struct acpi_gpe_register_info *gpe_register_info)
|
||||
{
|
||||
return (u32)1 << (gpe_event_info->gpe_number -
|
||||
gpe_register_info->base_gpe_number);
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_hw_low_set_gpe
|
||||
*
|
||||
* PARAMETERS: gpe_event_info - Info block for the GPE to be disabled
|
||||
* action - Enable or disable
|
||||
*
|
||||
* RETURN: Status
|
||||
*
|
||||
* DESCRIPTION: Enable or disable a single GPE in its enable register.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status
|
||||
acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
|
||||
{
|
||||
struct acpi_gpe_register_info *gpe_register_info;
|
||||
acpi_status status;
|
||||
u32 enable_mask;
|
||||
u32 register_bit;
|
||||
|
||||
ACPI_FUNCTION_ENTRY();
|
||||
|
||||
/* Get the info block for the entire GPE register */
|
||||
|
||||
|
@ -87,11 +113,27 @@ acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
|
|||
return (status);
|
||||
}
|
||||
|
||||
/* Clear just the bit that corresponds to this GPE */
|
||||
/* Set ot clear just the bit that corresponds to this GPE */
|
||||
|
||||
ACPI_CLEAR_BIT(enable_mask, ((u32)1 <<
|
||||
(gpe_event_info->gpe_number -
|
||||
gpe_register_info->base_gpe_number)));
|
||||
register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
|
||||
gpe_register_info);
|
||||
switch (action) {
|
||||
case ACPI_GPE_COND_ENABLE:
|
||||
if (!(register_bit & gpe_register_info->enable_for_run))
|
||||
return (AE_BAD_PARAMETER);
|
||||
|
||||
case ACPI_GPE_ENABLE:
|
||||
ACPI_SET_BIT(enable_mask, register_bit);
|
||||
break;
|
||||
|
||||
case ACPI_GPE_DISABLE:
|
||||
ACPI_CLEAR_BIT(enable_mask, register_bit);
|
||||
break;
|
||||
|
||||
default:
|
||||
ACPI_ERROR((AE_INFO, "Invalid action\n"));
|
||||
return (AE_BAD_PARAMETER);
|
||||
}
|
||||
|
||||
/* Write the updated enable mask */
|
||||
|
||||
|
@ -116,23 +158,11 @@ acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
|
|||
acpi_status
|
||||
acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info)
|
||||
{
|
||||
struct acpi_gpe_register_info *gpe_register_info;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_ENTRY();
|
||||
|
||||
/* Get the info block for the entire GPE register */
|
||||
|
||||
gpe_register_info = gpe_event_info->register_info;
|
||||
if (!gpe_register_info) {
|
||||
return (AE_NOT_EXIST);
|
||||
}
|
||||
|
||||
/* Write the entire GPE (runtime) enable register */
|
||||
|
||||
status = acpi_hw_write(gpe_register_info->enable_for_run,
|
||||
&gpe_register_info->enable_address);
|
||||
|
||||
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE);
|
||||
return (status);
|
||||
}
|
||||
|
||||
|
@ -150,21 +180,28 @@ acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info)
|
|||
|
||||
acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
|
||||
{
|
||||
struct acpi_gpe_register_info *gpe_register_info;
|
||||
acpi_status status;
|
||||
u8 register_bit;
|
||||
u32 register_bit;
|
||||
|
||||
ACPI_FUNCTION_ENTRY();
|
||||
|
||||
register_bit = (u8)(1 <<
|
||||
(gpe_event_info->gpe_number -
|
||||
gpe_event_info->register_info->base_gpe_number));
|
||||
/* Get the info block for the entire GPE register */
|
||||
|
||||
gpe_register_info = gpe_event_info->register_info;
|
||||
if (!gpe_register_info) {
|
||||
return (AE_NOT_EXIST);
|
||||
}
|
||||
|
||||
register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
|
||||
gpe_register_info);
|
||||
|
||||
/*
|
||||
* Write a one to the appropriate bit in the status register to
|
||||
* clear this GPE.
|
||||
*/
|
||||
status = acpi_hw_write(register_bit,
|
||||
&gpe_event_info->register_info->status_address);
|
||||
&gpe_register_info->status_address);
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
@ -187,7 +224,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
|
|||
acpi_event_status * event_status)
|
||||
{
|
||||
u32 in_byte;
|
||||
u8 register_bit;
|
||||
u32 register_bit;
|
||||
struct acpi_gpe_register_info *gpe_register_info;
|
||||
acpi_status status;
|
||||
acpi_event_status local_event_status = 0;
|
||||
|
@ -204,9 +241,8 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
|
|||
|
||||
/* Get the register bitmask for this GPE */
|
||||
|
||||
register_bit = (u8)(1 <<
|
||||
(gpe_event_info->gpe_number -
|
||||
gpe_event_info->register_info->base_gpe_number));
|
||||
register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
|
||||
gpe_register_info);
|
||||
|
||||
/* GPE currently enabled? (enabled for runtime?) */
|
||||
|
||||
|
|
|
@ -222,6 +222,12 @@ acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width)
|
|||
u32 one_byte;
|
||||
u32 i;
|
||||
|
||||
/* Truncate address to 16 bits if requested */
|
||||
|
||||
if (acpi_gbl_truncate_io_addresses) {
|
||||
address &= ACPI_UINT16_MAX;
|
||||
}
|
||||
|
||||
/* Validate the entire request and perform the I/O */
|
||||
|
||||
status = acpi_hw_validate_io_request(address, width);
|
||||
|
@ -279,6 +285,12 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width)
|
|||
acpi_status status;
|
||||
u32 i;
|
||||
|
||||
/* Truncate address to 16 bits if requested */
|
||||
|
||||
if (acpi_gbl_truncate_io_addresses) {
|
||||
address &= ACPI_UINT16_MAX;
|
||||
}
|
||||
|
||||
/* Validate the entire request and perform the I/O */
|
||||
|
||||
status = acpi_hw_validate_io_request(address, width);
|
||||
|
|
|
@ -193,6 +193,15 @@ acpi_status acpi_ns_initialize_devices(void)
|
|||
acpi_ns_init_one_device, NULL, &info,
|
||||
NULL);
|
||||
|
||||
/*
|
||||
* Any _OSI requests should be completed by now. If the BIOS has
|
||||
* requested any Windows OSI strings, we will always truncate
|
||||
* I/O addresses to 16 bits -- for Windows compatibility.
|
||||
*/
|
||||
if (acpi_gbl_osi_data >= ACPI_OSI_WIN_2000) {
|
||||
acpi_gbl_truncate_io_addresses = TRUE;
|
||||
}
|
||||
|
||||
ACPI_FREE(info.evaluate_info);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto error_exit;
|
||||
|
|
|
@ -218,6 +218,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
|
|||
},
|
||||
},
|
||||
{
|
||||
.callback = dmi_disable_osi_vista,
|
||||
.ident = "VGN-NS50B_L",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS50B_L"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = dmi_disable_osi_win7,
|
||||
.ident = "ASUS K50IJ",
|
||||
.matches = {
|
||||
|
|
|
@ -425,7 +425,7 @@ static int acpi_button_add(struct acpi_device *device)
|
|||
/* Button's GPE is run-wake GPE */
|
||||
acpi_enable_gpe(device->wakeup.gpe_device,
|
||||
device->wakeup.gpe_number,
|
||||
ACPI_GPE_TYPE_WAKE_RUN);
|
||||
ACPI_GPE_TYPE_RUNTIME);
|
||||
device->wakeup.run_wake_count++;
|
||||
device->wakeup.state.enabled = 1;
|
||||
}
|
||||
|
@ -449,7 +449,7 @@ static int acpi_button_remove(struct acpi_device *device, int type)
|
|||
if (device->wakeup.flags.valid) {
|
||||
acpi_disable_gpe(device->wakeup.gpe_device,
|
||||
device->wakeup.gpe_number,
|
||||
ACPI_GPE_TYPE_WAKE_RUN);
|
||||
ACPI_GPE_TYPE_RUNTIME);
|
||||
device->wakeup.run_wake_count--;
|
||||
device->wakeup.state.enabled = 0;
|
||||
}
|
||||
|
|
|
@ -347,7 +347,6 @@ static int __init acpi_fan_init(void)
|
|||
{
|
||||
int result = 0;
|
||||
|
||||
|
||||
#ifdef CONFIG_ACPI_PROCFS
|
||||
acpi_fan_dir = proc_mkdir(ACPI_FAN_CLASS, acpi_root_dir);
|
||||
if (!acpi_fan_dir)
|
||||
|
@ -356,7 +355,9 @@ static int __init acpi_fan_init(void)
|
|||
|
||||
result = acpi_bus_register_driver(&acpi_fan_driver);
|
||||
if (result < 0) {
|
||||
#ifdef CONFIG_ACPI_PROCFS
|
||||
remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
|
||||
#endif
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
|
|
@ -581,6 +581,11 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (pr->id >= setup_max_cpus && pr->id != 0)
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
|
||||
|
||||
/*
|
||||
|
|
|
@ -114,6 +114,8 @@ static int __acpi_pm_prepare(void)
|
|||
{
|
||||
int error = acpi_sleep_prepare(acpi_target_sleep_state);
|
||||
|
||||
suspend_nvs_save();
|
||||
|
||||
if (error)
|
||||
acpi_target_sleep_state = ACPI_STATE_S0;
|
||||
return error;
|
||||
|
@ -143,6 +145,9 @@ static void acpi_pm_finish(void)
|
|||
{
|
||||
u32 acpi_state = acpi_target_sleep_state;
|
||||
|
||||
suspend_nvs_free();
|
||||
acpi_ec_unblock_transactions();
|
||||
|
||||
if (acpi_state == ACPI_STATE_S0)
|
||||
return;
|
||||
|
||||
|
@ -192,6 +197,11 @@ static int acpi_suspend_begin(suspend_state_t pm_state)
|
|||
u32 acpi_state = acpi_suspend_states[pm_state];
|
||||
int error = 0;
|
||||
|
||||
error = suspend_nvs_alloc();
|
||||
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (sleep_states[acpi_state]) {
|
||||
acpi_target_sleep_state = acpi_state;
|
||||
acpi_sleep_tts_switch(acpi_target_sleep_state);
|
||||
|
@ -269,12 +279,13 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
|
|||
if (acpi_state == ACPI_STATE_S3)
|
||||
acpi_restore_state_mem();
|
||||
|
||||
suspend_nvs_restore();
|
||||
|
||||
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
|
||||
}
|
||||
|
||||
static void acpi_suspend_finish(void)
|
||||
{
|
||||
acpi_ec_unblock_transactions();
|
||||
acpi_pm_finish();
|
||||
}
|
||||
|
||||
|
@ -404,7 +415,7 @@ static int acpi_hibernation_begin(void)
|
|||
{
|
||||
int error;
|
||||
|
||||
error = s4_no_nvs ? 0 : hibernate_nvs_alloc();
|
||||
error = s4_no_nvs ? 0 : suspend_nvs_alloc();
|
||||
if (!error) {
|
||||
acpi_target_sleep_state = ACPI_STATE_S4;
|
||||
acpi_sleep_tts_switch(acpi_target_sleep_state);
|
||||
|
@ -418,7 +429,7 @@ static int acpi_hibernation_pre_snapshot(void)
|
|||
int error = acpi_pm_prepare();
|
||||
|
||||
if (!error)
|
||||
hibernate_nvs_save();
|
||||
suspend_nvs_save();
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -441,13 +452,6 @@ static int acpi_hibernation_enter(void)
|
|||
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
|
||||
}
|
||||
|
||||
static void acpi_hibernation_finish(void)
|
||||
{
|
||||
hibernate_nvs_free();
|
||||
acpi_ec_unblock_transactions();
|
||||
acpi_pm_finish();
|
||||
}
|
||||
|
||||
static void acpi_hibernation_leave(void)
|
||||
{
|
||||
/*
|
||||
|
@ -464,7 +468,7 @@ static void acpi_hibernation_leave(void)
|
|||
panic("ACPI S4 hardware signature mismatch");
|
||||
}
|
||||
/* Restore the NVS memory area */
|
||||
hibernate_nvs_restore();
|
||||
suspend_nvs_restore();
|
||||
/* Allow EC transactions to happen. */
|
||||
acpi_ec_unblock_transactions_early();
|
||||
}
|
||||
|
@ -479,7 +483,7 @@ static struct platform_hibernation_ops acpi_hibernation_ops = {
|
|||
.begin = acpi_hibernation_begin,
|
||||
.end = acpi_pm_end,
|
||||
.pre_snapshot = acpi_hibernation_pre_snapshot,
|
||||
.finish = acpi_hibernation_finish,
|
||||
.finish = acpi_pm_finish,
|
||||
.prepare = acpi_pm_prepare,
|
||||
.enter = acpi_hibernation_enter,
|
||||
.leave = acpi_hibernation_leave,
|
||||
|
@ -507,7 +511,7 @@ static int acpi_hibernation_begin_old(void)
|
|||
|
||||
if (!error) {
|
||||
if (!s4_no_nvs)
|
||||
error = hibernate_nvs_alloc();
|
||||
error = suspend_nvs_alloc();
|
||||
if (!error)
|
||||
acpi_target_sleep_state = ACPI_STATE_S4;
|
||||
}
|
||||
|
@ -517,7 +521,7 @@ static int acpi_hibernation_begin_old(void)
|
|||
static int acpi_hibernation_pre_snapshot_old(void)
|
||||
{
|
||||
acpi_pm_freeze();
|
||||
hibernate_nvs_save();
|
||||
suspend_nvs_save();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -529,8 +533,8 @@ static struct platform_hibernation_ops acpi_hibernation_ops_old = {
|
|||
.begin = acpi_hibernation_begin_old,
|
||||
.end = acpi_pm_end,
|
||||
.pre_snapshot = acpi_hibernation_pre_snapshot_old,
|
||||
.finish = acpi_hibernation_finish,
|
||||
.prepare = acpi_pm_freeze,
|
||||
.finish = acpi_pm_finish,
|
||||
.enter = acpi_hibernation_enter,
|
||||
.leave = acpi_hibernation_leave,
|
||||
.pre_restore = acpi_pm_freeze,
|
||||
|
|
|
@ -388,10 +388,12 @@ static ssize_t counter_set(struct kobject *kobj,
|
|||
if (index < num_gpes) {
|
||||
if (!strcmp(buf, "disable\n") &&
|
||||
(status & ACPI_EVENT_FLAG_ENABLED))
|
||||
result = acpi_set_gpe(handle, index, ACPI_GPE_DISABLE);
|
||||
result = acpi_disable_gpe(handle, index,
|
||||
ACPI_GPE_TYPE_RUNTIME);
|
||||
else if (!strcmp(buf, "enable\n") &&
|
||||
!(status & ACPI_EVENT_FLAG_ENABLED))
|
||||
result = acpi_set_gpe(handle, index, ACPI_GPE_ENABLE);
|
||||
result = acpi_enable_gpe(handle, index,
|
||||
ACPI_GPE_TYPE_RUNTIME);
|
||||
else if (!strcmp(buf, "clear\n") &&
|
||||
(status & ACPI_EVENT_FLAG_SET))
|
||||
result = acpi_clear_gpe(handle, index);
|
||||
|
|
|
@ -64,16 +64,13 @@ void acpi_enable_wakeup_device(u8 sleep_state)
|
|||
struct acpi_device *dev =
|
||||
container_of(node, struct acpi_device, wakeup_list);
|
||||
|
||||
if (!dev->wakeup.flags.valid)
|
||||
continue;
|
||||
|
||||
if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count)
|
||||
if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled
|
||||
|| sleep_state > (u32) dev->wakeup.sleep_state)
|
||||
continue;
|
||||
|
||||
/* The wake-up power should have been enabled already. */
|
||||
acpi_set_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
|
||||
ACPI_GPE_ENABLE);
|
||||
acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
|
||||
ACPI_GPE_TYPE_WAKE);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -96,6 +93,8 @@ void acpi_disable_wakeup_device(u8 sleep_state)
|
|||
|| (sleep_state > (u32) dev->wakeup.sleep_state))
|
||||
continue;
|
||||
|
||||
acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
|
||||
ACPI_GPE_TYPE_WAKE);
|
||||
acpi_disable_wakeup_device_power(dev);
|
||||
}
|
||||
}
|
||||
|
@ -109,13 +108,8 @@ int __init acpi_wakeup_device_init(void)
|
|||
struct acpi_device *dev = container_of(node,
|
||||
struct acpi_device,
|
||||
wakeup_list);
|
||||
/* In case user doesn't load button driver */
|
||||
if (!dev->wakeup.flags.always_enabled ||
|
||||
dev->wakeup.state.enabled)
|
||||
continue;
|
||||
acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
|
||||
ACPI_GPE_TYPE_WAKE);
|
||||
dev->wakeup.state.enabled = 1;
|
||||
if (dev->wakeup.flags.always_enabled)
|
||||
dev->wakeup.state.enabled = 1;
|
||||
}
|
||||
mutex_unlock(&acpi_device_lock);
|
||||
return 0;
|
||||
|
|
|
@ -212,7 +212,7 @@ char const *acpi_gbl_exception_names_env[] = {
|
|||
"AE_NO_GLOBAL_LOCK",
|
||||
"AE_ABORT_METHOD",
|
||||
"AE_SAME_HANDLER",
|
||||
"AE_WAKE_ONLY_GPE",
|
||||
"AE_NO_HANDLER",
|
||||
"AE_OWNER_ID_LIMIT"
|
||||
};
|
||||
|
||||
|
|
|
@ -69,6 +69,7 @@ extern acpi_name acpi_gbl_trace_method_name;
|
|||
extern u32 acpi_gbl_trace_flags;
|
||||
extern u8 acpi_gbl_enable_aml_debug_object;
|
||||
extern u8 acpi_gbl_copy_dsdt_locally;
|
||||
extern u8 acpi_gbl_truncate_io_addresses;
|
||||
|
||||
extern u32 acpi_current_gpe_count;
|
||||
extern struct acpi_table_fadt acpi_gbl_FADT;
|
||||
|
|
|
@ -663,10 +663,11 @@ typedef u32 acpi_event_status;
|
|||
#define ACPI_GPE_MAX 0xFF
|
||||
#define ACPI_NUM_GPE 256
|
||||
|
||||
/* Actions for acpi_set_gpe */
|
||||
/* Actions for acpi_set_gpe and acpi_hw_low_set_gpe */
|
||||
|
||||
#define ACPI_GPE_ENABLE 0
|
||||
#define ACPI_GPE_DISABLE 1
|
||||
#define ACPI_GPE_COND_ENABLE 2
|
||||
|
||||
/* gpe_types for acpi_enable_gpe and acpi_disable_gpe */
|
||||
|
||||
|
|
|
@ -256,22 +256,22 @@ static inline int hibernate(void) { return -ENOSYS; }
|
|||
static inline bool system_entering_hibernation(void) { return false; }
|
||||
#endif /* CONFIG_HIBERNATION */
|
||||
|
||||
#ifdef CONFIG_HIBERNATION_NVS
|
||||
extern int hibernate_nvs_register(unsigned long start, unsigned long size);
|
||||
extern int hibernate_nvs_alloc(void);
|
||||
extern void hibernate_nvs_free(void);
|
||||
extern void hibernate_nvs_save(void);
|
||||
extern void hibernate_nvs_restore(void);
|
||||
#else /* CONFIG_HIBERNATION_NVS */
|
||||
static inline int hibernate_nvs_register(unsigned long a, unsigned long b)
|
||||
#ifdef CONFIG_SUSPEND_NVS
|
||||
extern int suspend_nvs_register(unsigned long start, unsigned long size);
|
||||
extern int suspend_nvs_alloc(void);
|
||||
extern void suspend_nvs_free(void);
|
||||
extern void suspend_nvs_save(void);
|
||||
extern void suspend_nvs_restore(void);
|
||||
#else /* CONFIG_SUSPEND_NVS */
|
||||
static inline int suspend_nvs_register(unsigned long a, unsigned long b)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int hibernate_nvs_alloc(void) { return 0; }
|
||||
static inline void hibernate_nvs_free(void) {}
|
||||
static inline void hibernate_nvs_save(void) {}
|
||||
static inline void hibernate_nvs_restore(void) {}
|
||||
#endif /* CONFIG_HIBERNATION_NVS */
|
||||
static inline int suspend_nvs_alloc(void) { return 0; }
|
||||
static inline void suspend_nvs_free(void) {}
|
||||
static inline void suspend_nvs_save(void) {}
|
||||
static inline void suspend_nvs_restore(void) {}
|
||||
#endif /* CONFIG_SUSPEND_NVS */
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
void save_processor_state(void);
|
||||
|
|
|
@ -125,7 +125,9 @@ static char *ramdisk_execute_command;
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Setup configured maximum number of CPUs to activate */
|
||||
unsigned int __initdata setup_max_cpus = NR_CPUS;
|
||||
unsigned int setup_max_cpus = NR_CPUS;
|
||||
EXPORT_SYMBOL(setup_max_cpus);
|
||||
|
||||
|
||||
/*
|
||||
* Setup routine for controlling SMP activation
|
||||
|
|
|
@ -99,9 +99,13 @@ config PM_SLEEP_ADVANCED_DEBUG
|
|||
depends on PM_ADVANCED_DEBUG
|
||||
default n
|
||||
|
||||
config SUSPEND_NVS
|
||||
bool
|
||||
|
||||
config SUSPEND
|
||||
bool "Suspend to RAM and standby"
|
||||
depends on PM && ARCH_SUSPEND_POSSIBLE
|
||||
select SUSPEND_NVS if HAS_IOMEM
|
||||
default y
|
||||
---help---
|
||||
Allow the system to enter sleep states in which main memory is
|
||||
|
@ -130,13 +134,10 @@ config SUSPEND_FREEZER
|
|||
|
||||
Turning OFF this setting is NOT recommended! If in doubt, say Y.
|
||||
|
||||
config HIBERNATION_NVS
|
||||
bool
|
||||
|
||||
config HIBERNATION
|
||||
bool "Hibernation (aka 'suspend to disk')"
|
||||
depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE
|
||||
select HIBERNATION_NVS if HAS_IOMEM
|
||||
select SUSPEND_NVS if HAS_IOMEM
|
||||
---help---
|
||||
Enable the suspend to disk (STD) functionality, which is usually
|
||||
called "hibernation" in user interfaces. STD checkpoints the
|
||||
|
|
|
@ -10,6 +10,6 @@ obj-$(CONFIG_SUSPEND) += suspend.o
|
|||
obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
|
||||
obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \
|
||||
block_io.o
|
||||
obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o
|
||||
obj-$(CONFIG_SUSPEND_NVS) += nvs.o
|
||||
|
||||
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
/*
|
||||
* Platforms, like ACPI, may want us to save some memory used by them during
|
||||
* hibernation and to restore the contents of this memory during the subsequent
|
||||
* suspend and to restore the contents of this memory during the subsequent
|
||||
* resume. The code below implements a mechanism allowing us to do that.
|
||||
*/
|
||||
|
||||
|
@ -30,7 +30,7 @@ struct nvs_page {
|
|||
static LIST_HEAD(nvs_list);
|
||||
|
||||
/**
|
||||
* hibernate_nvs_register - register platform NVS memory region to save
|
||||
* suspend_nvs_register - register platform NVS memory region to save
|
||||
* @start - physical address of the region
|
||||
* @size - size of the region
|
||||
*
|
||||
|
@ -38,7 +38,7 @@ static LIST_HEAD(nvs_list);
|
|||
* things so that the data from page-aligned addresses in this region will
|
||||
* be copied into separate RAM pages.
|
||||
*/
|
||||
int hibernate_nvs_register(unsigned long start, unsigned long size)
|
||||
int suspend_nvs_register(unsigned long start, unsigned long size)
|
||||
{
|
||||
struct nvs_page *entry, *next;
|
||||
|
||||
|
@ -68,9 +68,9 @@ int hibernate_nvs_register(unsigned long start, unsigned long size)
|
|||
}
|
||||
|
||||
/**
|
||||
* hibernate_nvs_free - free data pages allocated for saving NVS regions
|
||||
* suspend_nvs_free - free data pages allocated for saving NVS regions
|
||||
*/
|
||||
void hibernate_nvs_free(void)
|
||||
void suspend_nvs_free(void)
|
||||
{
|
||||
struct nvs_page *entry;
|
||||
|
||||
|
@ -86,16 +86,16 @@ void hibernate_nvs_free(void)
|
|||
}
|
||||
|
||||
/**
|
||||
* hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
|
||||
* suspend_nvs_alloc - allocate memory necessary for saving NVS regions
|
||||
*/
|
||||
int hibernate_nvs_alloc(void)
|
||||
int suspend_nvs_alloc(void)
|
||||
{
|
||||
struct nvs_page *entry;
|
||||
|
||||
list_for_each_entry(entry, &nvs_list, node) {
|
||||
entry->data = (void *)__get_free_page(GFP_KERNEL);
|
||||
if (!entry->data) {
|
||||
hibernate_nvs_free();
|
||||
suspend_nvs_free();
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
@ -103,9 +103,9 @@ int hibernate_nvs_alloc(void)
|
|||
}
|
||||
|
||||
/**
|
||||
* hibernate_nvs_save - save NVS memory regions
|
||||
* suspend_nvs_save - save NVS memory regions
|
||||
*/
|
||||
void hibernate_nvs_save(void)
|
||||
void suspend_nvs_save(void)
|
||||
{
|
||||
struct nvs_page *entry;
|
||||
|
||||
|
@ -119,12 +119,12 @@ void hibernate_nvs_save(void)
|
|||
}
|
||||
|
||||
/**
|
||||
* hibernate_nvs_restore - restore NVS memory regions
|
||||
* suspend_nvs_restore - restore NVS memory regions
|
||||
*
|
||||
* This function is going to be called with interrupts disabled, so it
|
||||
* cannot iounmap the virtual addresses used to access the NVS region.
|
||||
*/
|
||||
void hibernate_nvs_restore(void)
|
||||
void suspend_nvs_restore(void)
|
||||
{
|
||||
struct nvs_page *entry;
|
||||
|
|
@ -16,6 +16,12 @@
|
|||
#include <linux/cpu.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/suspend.h>
|
||||
|
||||
#include "power.h"
|
||||
|
||||
|
|
Loading…
Reference in a new issue