ACPICA 20050617-0624 from Bob Moore <robert.moore@intel.com>

ACPICA 20050617:

Moved the object cache operations into the OS interface
layer (OSL) to allow the host OS to handle these operations
if desired (for example, the Linux OSL will invoke the
slab allocator).  This support is optional; the compile
time define ACPI_USE_LOCAL_CACHE may be used to utilize
the original cache code in the ACPI CA core.  The new OSL
interfaces are shown below.  See utalloc.c for an example
implementation, and acpiosxf.h for the exact interface
definitions.  Thanks to Alexey Starikovskiy.
	acpi_os_create_cache
	acpi_os_delete_cache
	acpi_os_purge_cache
	acpi_os_acquire_object
	acpi_os_release_object

Modified the interfaces to acpi_os_acquire_lock and
acpi_os_release_lock to return and restore a flags
parameter.  This fits better with many OS lock models.
Note: the current execution state (interrupt handler
or not) is no longer passed to these interfaces.  If
necessary, the OSL must determine this state by itself, a
simple and fast operation.  Thanks to Alexey Starikovskiy.

Fixed a problem in the ACPI table handling where a valid
XSDT was assumed present if the revision of the RSDP
was 2 or greater.  According to the ACPI specification,
the XSDT is optional in all cases, and the table manager
therefore now checks for both an RSDP >=2 and a valid
XSDT pointer.  Otherwise, the RSDT pointer is used.
Some ACPI 2.0 compliant BIOSs contain only the RSDT.

Fixed an interpreter problem with the Mid() operator in the
case of an input string where the resulting output string
is of zero length.  It now correctly returns a valid,
null terminated string object instead of a string object
with a null pointer.

Fixed a problem with the control method argument handling
to allow a store to an Arg object that already contains an
object of type Device.  The Device object is now correctly
overwritten.  Previously, an error was returned.

ACPICA 20050624:

Modified the new OSL cache interfaces to use ACPI_CACHE_T
as the type for the host-defined cache object.  This allows
the OSL implementation to define and type this object in
any manner desired, simplifying the OSL implementation.
For example, ACPI_CACHE_T is defined as kmem_cache_t for
Linux, and should be defined in the OS-specific header
file for other operating systems as required.

Changed the interface to AcpiOsAcquireObject to directly
return the requested object as the function return (instead
of ACPI_STATUS.) This change was made for performance
reasons, since this is the purpose of the interface in the
first place.  acpi_os_acquire_object is now similar to the
acpi_os_allocate interface.  Thanks to Alexey Starikovskiy.

Modified the initialization sequence in
acpi_initialize_subsystem to call the OSL interface
acpi_osl_initialize first, before any local initialization.
This change was required because the global initialization
now calls OSL interfaces.

Restructured the code base to split some files because
of size and/or because the code logically belonged in a
separate file.  New files are listed below.

  utilities/utcache.c	/* Local cache interfaces */
  utilities/utmutex.c	/* Local mutex support */
  utilities/utstate.c	/* State object support */
  parser/psloop.c	/* Main AML parse loop */

Signed-off-by: Len Brown <len.brown@intel.com>
This commit is contained in:
Robert Moore 2005-06-24 00:00:00 -04:00 committed by Len Brown
parent 88ac00f5a8
commit 73459f73e5
56 changed files with 2631 additions and 2040 deletions

View file

@ -632,23 +632,12 @@ acpi_ds_store_object_to_local (
* Weird, but true.
*/
if (opcode == AML_ARG_OP) {
/*
* Make sure that the object is the correct type. This may be
* overkill, butit is here because references were NS nodes in
* the past. Now they are operand objects of type Reference.
*/
if (ACPI_GET_DESCRIPTOR_TYPE (current_obj_desc) != ACPI_DESC_TYPE_OPERAND) {
ACPI_REPORT_ERROR ((
"Invalid descriptor type while storing to method arg: [%s]\n",
acpi_ut_get_descriptor_name (current_obj_desc)));
return_ACPI_STATUS (AE_AML_INTERNAL);
}
/*
* If we have a valid reference object that came from ref_of(),
* do the indirect store
*/
if ((current_obj_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) &&
if ((ACPI_GET_DESCRIPTOR_TYPE (current_obj_desc) == ACPI_DESC_TYPE_OPERAND) &&
(current_obj_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) &&
(current_obj_desc->reference.opcode == AML_REF_OF_OP)) {
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC,
"Arg (%p) is an obj_ref(Node), storing in node %p\n",

View file

@ -50,7 +50,7 @@
#include <acpi/acnamesp.h>
#include <acpi/acevents.h>
#ifdef _ACPI_ASL_COMPILER
#ifdef ACPI_ASL_COMPILER
#include <acpi/acdisasm.h>
#endif
@ -176,7 +176,7 @@ acpi_ds_load1_begin_op (
*/
status = acpi_ns_lookup (walk_state->scope_info, path, object_type,
ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT, walk_state, &(node));
#ifdef _ACPI_ASL_COMPILER
#ifdef ACPI_ASL_COMPILER
if (status == AE_NOT_FOUND) {
/*
* Table disassembly:
@ -569,7 +569,7 @@ acpi_ds_load2_begin_op (
ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
walk_state, &(node));
if (ACPI_FAILURE (status)) {
#ifdef _ACPI_ASL_COMPILER
#ifdef ACPI_ASL_COMPILER
if (status == AE_NOT_FOUND) {
status = AE_OK;
}

View file

@ -681,7 +681,7 @@ acpi_ds_create_walk_state (
ACPI_FUNCTION_TRACE ("ds_create_walk_state");
walk_state = acpi_ut_acquire_from_cache (ACPI_MEM_LIST_WALK);
walk_state = ACPI_MEM_CALLOCATE (sizeof (struct acpi_walk_state));
if (!walk_state) {
return_PTR (NULL);
}
@ -704,7 +704,7 @@ acpi_ds_create_walk_state (
status = acpi_ds_result_stack_push (walk_state);
if (ACPI_FAILURE (status)) {
acpi_ut_release_to_cache (ACPI_MEM_LIST_WALK, walk_state);
ACPI_MEM_FREE (walk_state);
return_PTR (NULL);
}
@ -900,38 +900,11 @@ acpi_ds_delete_walk_state (
acpi_ut_delete_generic_state (state);
}
acpi_ut_release_to_cache (ACPI_MEM_LIST_WALK, walk_state);
ACPI_MEM_FREE (walk_state);
return_VOID;
}
#ifdef ACPI_ENABLE_OBJECT_CACHE
/******************************************************************************
*
* FUNCTION: acpi_ds_delete_walk_state_cache
*
* PARAMETERS: None
*
* RETURN: None
*
* DESCRIPTION: Purge the global state object cache. Used during subsystem
* termination.
*
******************************************************************************/
void
acpi_ds_delete_walk_state_cache (
void)
{
ACPI_FUNCTION_TRACE ("ds_delete_walk_state_cache");
acpi_ut_delete_generic_cache (ACPI_MEM_LIST_WALK);
return_VOID;
}
#endif
#ifdef ACPI_OBSOLETE_FUNCTIONS
/*******************************************************************************
*

View file

@ -396,6 +396,7 @@ acpi_ev_gpe_detect (
struct acpi_gpe_register_info *gpe_register_info;
u32 status_reg;
u32 enable_reg;
u32 flags;
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
acpi_native_uint i;
@ -412,7 +413,7 @@ acpi_ev_gpe_detect (
/* Examine all GPE blocks attached to this interrupt level */
acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_ISR);
flags = acpi_os_acquire_lock (acpi_gbl_gpe_lock);
gpe_block = gpe_xrupt_list->gpe_block_list_head;
while (gpe_block) {
/*
@ -476,7 +477,7 @@ acpi_ev_gpe_detect (
unlock_and_exit:
acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_ISR);
acpi_os_release_lock (acpi_gbl_gpe_lock, flags);
return (int_status);
}

View file

@ -138,7 +138,6 @@ acpi_ev_valid_gpe_event (
* FUNCTION: acpi_ev_walk_gpe_list
*
* PARAMETERS: gpe_walk_callback - Routine called for each GPE block
* Flags - ACPI_NOT_ISR or ACPI_ISR
*
* RETURN: Status
*
@ -148,18 +147,18 @@ acpi_ev_valid_gpe_event (
acpi_status
acpi_ev_walk_gpe_list (
ACPI_GPE_CALLBACK gpe_walk_callback,
u32 flags)
ACPI_GPE_CALLBACK gpe_walk_callback)
{
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_xrupt_info *gpe_xrupt_info;
acpi_status status = AE_OK;
u32 flags;
ACPI_FUNCTION_TRACE ("ev_walk_gpe_list");
acpi_os_acquire_lock (acpi_gbl_gpe_lock, flags);
flags = acpi_os_acquire_lock (acpi_gbl_gpe_lock);
/* Walk the interrupt level descriptor list */
@ -500,6 +499,7 @@ acpi_ev_get_gpe_xrupt_block (
struct acpi_gpe_xrupt_info *next_gpe_xrupt;
struct acpi_gpe_xrupt_info *gpe_xrupt;
acpi_status status;
u32 flags;
ACPI_FUNCTION_TRACE ("ev_get_gpe_xrupt_block");
@ -527,7 +527,7 @@ acpi_ev_get_gpe_xrupt_block (
/* Install new interrupt descriptor with spin lock */
acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
flags = acpi_os_acquire_lock (acpi_gbl_gpe_lock);
if (acpi_gbl_gpe_xrupt_list_head) {
next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
while (next_gpe_xrupt->next) {
@ -540,7 +540,7 @@ acpi_ev_get_gpe_xrupt_block (
else {
acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
}
acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
acpi_os_release_lock (acpi_gbl_gpe_lock, flags);
/* Install new interrupt handler if not SCI_INT */
@ -577,6 +577,7 @@ acpi_ev_delete_gpe_xrupt (
struct acpi_gpe_xrupt_info *gpe_xrupt)
{
acpi_status status;
u32 flags;
ACPI_FUNCTION_TRACE ("ev_delete_gpe_xrupt");
@ -599,7 +600,7 @@ acpi_ev_delete_gpe_xrupt (
/* Unlink the interrupt block with lock */
acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
flags = acpi_os_acquire_lock (acpi_gbl_gpe_lock);
if (gpe_xrupt->previous) {
gpe_xrupt->previous->next = gpe_xrupt->next;
}
@ -607,7 +608,7 @@ acpi_ev_delete_gpe_xrupt (
if (gpe_xrupt->next) {
gpe_xrupt->next->previous = gpe_xrupt->previous;
}
acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
acpi_os_release_lock (acpi_gbl_gpe_lock, flags);
/* Free the block */
@ -637,6 +638,7 @@ acpi_ev_install_gpe_block (
struct acpi_gpe_block_info *next_gpe_block;
struct acpi_gpe_xrupt_info *gpe_xrupt_block;
acpi_status status;
u32 flags;
ACPI_FUNCTION_TRACE ("ev_install_gpe_block");
@ -655,7 +657,7 @@ acpi_ev_install_gpe_block (
/* Install the new block at the end of the list with lock */
acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
flags = acpi_os_acquire_lock (acpi_gbl_gpe_lock);
if (gpe_xrupt_block->gpe_block_list_head) {
next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
while (next_gpe_block->next) {
@ -670,7 +672,7 @@ acpi_ev_install_gpe_block (
}
gpe_block->xrupt_block = gpe_xrupt_block;
acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
acpi_os_release_lock (acpi_gbl_gpe_lock, flags);
unlock_and_exit:
status = acpi_ut_release_mutex (ACPI_MTX_EVENTS);
@ -695,6 +697,7 @@ acpi_ev_delete_gpe_block (
struct acpi_gpe_block_info *gpe_block)
{
acpi_status status;
u32 flags;
ACPI_FUNCTION_TRACE ("ev_install_gpe_block");
@ -720,7 +723,7 @@ acpi_ev_delete_gpe_block (
else {
/* Remove the block on this interrupt with lock */
acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
flags = acpi_os_acquire_lock (acpi_gbl_gpe_lock);
if (gpe_block->previous) {
gpe_block->previous->next = gpe_block->next;
}
@ -731,7 +734,7 @@ acpi_ev_delete_gpe_block (
if (gpe_block->next) {
gpe_block->next->previous = gpe_block->previous;
}
acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
acpi_os_release_lock (acpi_gbl_gpe_lock, flags);
}
/* Free the gpe_block */

View file

@ -589,7 +589,7 @@ acpi_ev_terminate (
/* Disable all GPEs in all GPE blocks */
status = acpi_ev_walk_gpe_list (acpi_hw_disable_gpe_block, ACPI_NOT_ISR);
status = acpi_ev_walk_gpe_list (acpi_hw_disable_gpe_block);
/* Remove SCI handler */
@ -602,7 +602,7 @@ acpi_ev_terminate (
/* Deallocate all handler objects installed within GPE info structs */
status = acpi_ev_walk_gpe_list (acpi_ev_delete_gpe_handlers, ACPI_NOT_ISR);
status = acpi_ev_walk_gpe_list (acpi_ev_delete_gpe_handlers);
/* Return to original mode if necessary */

View file

@ -591,6 +591,7 @@ acpi_install_gpe_handler (
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_handler_info *handler;
acpi_status status;
u32 flags;
ACPI_FUNCTION_TRACE ("acpi_install_gpe_handler");
@ -643,7 +644,7 @@ acpi_install_gpe_handler (
/* Install the handler */
acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
flags = acpi_os_acquire_lock (acpi_gbl_gpe_lock);
gpe_event_info->dispatch.handler = handler;
/* Setup up dispatch flags to indicate handler (vs. method) */
@ -651,7 +652,7 @@ acpi_install_gpe_handler (
gpe_event_info->flags &= ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); /* Clear bits */
gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER);
acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
acpi_os_release_lock (acpi_gbl_gpe_lock, flags);
unlock_and_exit:
@ -685,6 +686,7 @@ acpi_remove_gpe_handler (
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_handler_info *handler;
acpi_status status;
u32 flags;
ACPI_FUNCTION_TRACE ("acpi_remove_gpe_handler");
@ -741,7 +743,7 @@ acpi_remove_gpe_handler (
/* Remove the handler */
acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
flags = acpi_os_acquire_lock (acpi_gbl_gpe_lock);
handler = gpe_event_info->dispatch.handler;
/* Restore Method node (if any), set dispatch flags */
@ -751,7 +753,7 @@ acpi_remove_gpe_handler (
if (handler->method_node) {
gpe_event_info->flags |= ACPI_GPE_DISPATCH_METHOD;
}
acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
acpi_os_release_lock (acpi_gbl_gpe_lock, flags);
/* Now we can free the handler object */

View file

@ -367,7 +367,7 @@ acpi_ex_convert_to_ascii (
/* hex_length: 2 ascii hex chars per data byte */
hex_length = ACPI_MUL_2 (data_width);
hex_length = (acpi_native_uint) ACPI_MUL_2 (data_width);
for (i = 0, j = (hex_length-1); i < hex_length; i++, j--) {
/* Get one hex digit, most significant digits first */

View file

@ -80,6 +80,16 @@ acpi_ex_out_address (
acpi_physical_address value);
#endif /* ACPI_FUTURE_USAGE */
static void
acpi_ex_dump_reference (
union acpi_operand_object *obj_desc);
static void
acpi_ex_dump_package (
union acpi_operand_object *obj_desc,
u32 level,
u32 index);
/*******************************************************************************
*
@ -508,7 +518,7 @@ acpi_ex_out_integer (
char *title,
u32 value)
{
acpi_os_printf ("%20s : %X\n", title, value);
acpi_os_printf ("%20s : %.2X\n", title, value);
}
static void
@ -563,11 +573,146 @@ acpi_ex_dump_node (
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_dump_reference
*
* PARAMETERS: Object - Descriptor to dump
*
* DESCRIPTION: Dumps a reference object
*
******************************************************************************/
static void
acpi_ex_dump_reference (
union acpi_operand_object *obj_desc)
{
struct acpi_buffer ret_buf;
acpi_status status;
if (obj_desc->reference.opcode == AML_INT_NAMEPATH_OP) {
acpi_os_printf ("Named Object %p ", obj_desc->reference.node);
ret_buf.length = ACPI_ALLOCATE_LOCAL_BUFFER;
status = acpi_ns_handle_to_pathname (obj_desc->reference.node, &ret_buf);
if (ACPI_FAILURE (status)) {
acpi_os_printf ("Could not convert name to pathname\n");
}
else {
acpi_os_printf ("%s\n", ret_buf.pointer);
ACPI_MEM_FREE (ret_buf.pointer);
}
}
else if (obj_desc->reference.object) {
acpi_os_printf ("\nReferenced Object: %p\n", obj_desc->reference.object);
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_dump_package
*
* PARAMETERS: Object - Descriptor to dump
* Level - Indentation Level
* Index - Package index for this object
*
* DESCRIPTION: Dumps the elements of the package
*
******************************************************************************/
static void
acpi_ex_dump_package (
union acpi_operand_object *obj_desc,
u32 level,
u32 index)
{
u32 i;
/* Indentation and index output */
if (level > 0) {
for (i = 0; i < level; i++) {
acpi_os_printf (" ");
}
acpi_os_printf ("[%.2d] ", index);
}
acpi_os_printf ("%p ", obj_desc);
/* Null package elements are allowed */
if (!obj_desc) {
acpi_os_printf ("[Null Object]\n");
return;
}
/* Packages may only contain a few object types */
switch (ACPI_GET_OBJECT_TYPE (obj_desc)) {
case ACPI_TYPE_INTEGER:
acpi_os_printf ("[Integer] = %8.8X%8.8X\n",
ACPI_FORMAT_UINT64 (obj_desc->integer.value));
break;
case ACPI_TYPE_STRING:
acpi_os_printf ("[String] Value: ");
for (i = 0; i < obj_desc->string.length; i++) {
acpi_os_printf ("%c", obj_desc->string.pointer[i]);
}
acpi_os_printf ("\n");
break;
case ACPI_TYPE_BUFFER:
acpi_os_printf ("[Buffer] Length %.2X = ", obj_desc->buffer.length);
if (obj_desc->buffer.length) {
acpi_ut_dump_buffer ((u8 *) obj_desc->buffer.pointer,
obj_desc->buffer.length, DB_DWORD_DISPLAY, _COMPONENT);
}
else {
acpi_os_printf ("\n");
}
break;
case ACPI_TYPE_PACKAGE:
acpi_os_printf ("[Package] Contains %d Elements: \n",
obj_desc->package.count);
for (i = 0; i < obj_desc->package.count; i++) {
acpi_ex_dump_package (obj_desc->package.elements[i], level+1, i);
}
break;
case ACPI_TYPE_LOCAL_REFERENCE:
acpi_os_printf ("[Object Reference] ");
acpi_ex_dump_reference (obj_desc);
break;
default:
acpi_os_printf ("[Unknown Type] %X\n", ACPI_GET_OBJECT_TYPE (obj_desc));
break;
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_dump_object_descriptor
*
* PARAMETERS: *Object - Descriptor to dump
* PARAMETERS: Object - Descriptor to dump
* Flags - Force display if TRUE
*
* DESCRIPTION: Dumps the members of the object descriptor given.
@ -579,9 +724,6 @@ acpi_ex_dump_object_descriptor (
union acpi_operand_object *obj_desc,
u32 flags)
{
u32 i;
ACPI_FUNCTION_TRACE ("ex_dump_object_descriptor");
@ -648,22 +790,13 @@ acpi_ex_dump_object_descriptor (
case ACPI_TYPE_PACKAGE:
acpi_ex_out_integer ("Flags", obj_desc->package.flags);
acpi_ex_out_integer ("Count", obj_desc->package.count);
acpi_ex_out_pointer ("Elements", obj_desc->package.elements);
acpi_ex_out_integer ("Elements", obj_desc->package.count);
acpi_ex_out_pointer ("Element List", obj_desc->package.elements);
/* Dump the package contents */
if (obj_desc->package.count > 0) {
acpi_os_printf ("\nPackage Contents:\n");
for (i = 0; i < obj_desc->package.count; i++) {
acpi_os_printf ("[%.3d] %p", i, obj_desc->package.elements[i]);
if (obj_desc->package.elements[i]) {
acpi_os_printf (" %s",
acpi_ut_get_object_type_name (obj_desc->package.elements[i]));
}
acpi_os_printf ("\n");
}
}
acpi_os_printf ("\nPackage Contents:\n");
acpi_ex_dump_package (obj_desc, 0, 0);
break;
@ -790,10 +923,7 @@ acpi_ex_dump_object_descriptor (
acpi_ex_out_pointer ("Node", obj_desc->reference.node);
acpi_ex_out_pointer ("Where", obj_desc->reference.where);
if (obj_desc->reference.object) {
acpi_os_printf ("\nReferenced Object:\n");
acpi_ex_dump_object_descriptor (obj_desc->reference.object, flags);
}
acpi_ex_dump_reference (obj_desc);
break;

View file

@ -302,7 +302,7 @@ acpi_ex_do_concatenate (
/* Result of two Integers is a Buffer */
/* Need enough buffer space for two integers */
return_desc = acpi_ut_create_buffer_object (
return_desc = acpi_ut_create_buffer_object ((acpi_size)
ACPI_MUL_2 (acpi_gbl_integer_byte_width));
if (!return_desc) {
status = AE_NO_MEMORY;

View file

@ -113,8 +113,9 @@ acpi_ex_opcode_0A_0T_1R (
status = AE_NO_MEMORY;
goto cleanup;
}
#if ACPI_MACHINE_WIDTH != 16
return_desc->integer.value = acpi_os_get_timer ();
#endif
break;
default: /* Unknown opcode */

View file

@ -160,7 +160,7 @@ acpi_ex_opcode_3A_1T_1R (
{
union acpi_operand_object **operand = &walk_state->operands[0];
union acpi_operand_object *return_desc = NULL;
char *buffer;
char *buffer = NULL;
acpi_status status = AE_OK;
acpi_integer index;
acpi_size length;
@ -193,35 +193,64 @@ acpi_ex_opcode_3A_1T_1R (
* If the index is beyond the length of the String/Buffer, or if the
* requested length is zero, return a zero-length String/Buffer
*/
if ((index < operand[0]->string.length) &&
(length > 0)) {
/* Truncate request if larger than the actual String/Buffer */
if (index >= operand[0]->string.length) {
length = 0;
}
if ((index + length) >
operand[0]->string.length) {
length = (acpi_size) operand[0]->string.length -
(acpi_size) index;
}
/* Truncate request if larger than the actual String/Buffer */
/* Allocate a new buffer for the String/Buffer */
else if ((index + length) > operand[0]->string.length) {
length = (acpi_size) operand[0]->string.length -
(acpi_size) index;
}
/* Strings always have a sub-pointer, not so for buffers */
switch (ACPI_GET_OBJECT_TYPE (operand[0])) {
case ACPI_TYPE_STRING:
/* Always allocate a new buffer for the String */
buffer = ACPI_MEM_CALLOCATE ((acpi_size) length + 1);
if (!buffer) {
status = AE_NO_MEMORY;
goto cleanup;
}
break;
case ACPI_TYPE_BUFFER:
/* If the requested length is zero, don't allocate a buffer */
if (length > 0) {
/* Allocate a new buffer for the Buffer */
buffer = ACPI_MEM_CALLOCATE (length);
if (!buffer) {
status = AE_NO_MEMORY;
goto cleanup;
}
}
break;
default: /* Should not happen */
status = AE_AML_OPERAND_TYPE;
goto cleanup;
}
if (length > 0) {
/* Copy the portion requested */
ACPI_MEMCPY (buffer, operand[0]->string.pointer + index,
length);
/* Set the length of the new String/Buffer */
return_desc->string.pointer = buffer;
return_desc->string.length = (u32) length;
}
/* Set the length of the new String/Buffer */
return_desc->string.pointer = buffer;
return_desc->string.length = (u32) length;
/* Mark buffer initialized */
return_desc->buffer.flags |= AOPOBJ_DATA_VALID;
@ -244,13 +273,13 @@ acpi_ex_opcode_3A_1T_1R (
/* Delete return object on error */
if (ACPI_FAILURE (status)) {
if (ACPI_FAILURE (status) || walk_state->result_obj) {
acpi_ut_remove_reference (return_desc);
}
/* Set the return object and exit */
if (!walk_state->result_obj) {
else {
walk_state->result_obj = return_desc;
}
return_ACPI_STATUS (status);

View file

@ -147,7 +147,7 @@ acpi_ex_do_debug_object (
case ACPI_TYPE_BUFFER:
ACPI_DEBUG_PRINT_RAW ((ACPI_DB_DEBUG_OBJECT, "[0x%.2X]",
ACPI_DEBUG_PRINT_RAW ((ACPI_DB_DEBUG_OBJECT, "[0x%.2X]\n",
(u32) source_desc->buffer.length));
ACPI_DUMP_BUFFER (source_desc->buffer.pointer,
(source_desc->buffer.length < 32) ? source_desc->buffer.length : 32);

View file

@ -369,7 +369,7 @@ acpi_ex_eisa_id_to_string (
*
* RETURN: None, string
*
* DESCRIPTOIN: Convert a number to string representation. Assumes string
* DESCRIPTION: Convert a number to string representation. Assumes string
* buffer is large enough to hold the string.
*
******************************************************************************/

View file

@ -374,7 +374,7 @@ acpi_hw_enable_wakeup_gpe_block (
*
* FUNCTION: acpi_hw_disable_all_gpes
*
* PARAMETERS: Flags - ACPI_NOT_ISR or ACPI_ISR
* PARAMETERS: None
*
* RETURN: Status
*
@ -384,7 +384,7 @@ acpi_hw_enable_wakeup_gpe_block (
acpi_status
acpi_hw_disable_all_gpes (
u32 flags)
void)
{
acpi_status status;
@ -392,8 +392,8 @@ acpi_hw_disable_all_gpes (
ACPI_FUNCTION_TRACE ("hw_disable_all_gpes");
status = acpi_ev_walk_gpe_list (acpi_hw_disable_gpe_block, flags);
status = acpi_ev_walk_gpe_list (acpi_hw_clear_gpe_block, flags);
status = acpi_ev_walk_gpe_list (acpi_hw_disable_gpe_block);
status = acpi_ev_walk_gpe_list (acpi_hw_clear_gpe_block);
return_ACPI_STATUS (status);
}
@ -402,7 +402,7 @@ acpi_hw_disable_all_gpes (
*
* FUNCTION: acpi_hw_enable_all_runtime_gpes
*
* PARAMETERS: Flags - ACPI_NOT_ISR or ACPI_ISR
* PARAMETERS: None
*
* RETURN: Status
*
@ -412,7 +412,7 @@ acpi_hw_disable_all_gpes (
acpi_status
acpi_hw_enable_all_runtime_gpes (
u32 flags)
void)
{
acpi_status status;
@ -420,7 +420,7 @@ acpi_hw_enable_all_runtime_gpes (
ACPI_FUNCTION_TRACE ("hw_enable_all_runtime_gpes");
status = acpi_ev_walk_gpe_list (acpi_hw_enable_runtime_gpe_block, flags);
status = acpi_ev_walk_gpe_list (acpi_hw_enable_runtime_gpe_block);
return_ACPI_STATUS (status);
}
@ -429,7 +429,7 @@ acpi_hw_enable_all_runtime_gpes (
*
* FUNCTION: acpi_hw_enable_all_wakeup_gpes
*
* PARAMETERS: Flags - ACPI_NOT_ISR or ACPI_ISR
* PARAMETERS: None
*
* RETURN: Status
*
@ -439,7 +439,7 @@ acpi_hw_enable_all_runtime_gpes (
acpi_status
acpi_hw_enable_all_wakeup_gpes (
u32 flags)
void)
{
acpi_status status;
@ -447,7 +447,7 @@ acpi_hw_enable_all_wakeup_gpes (
ACPI_FUNCTION_TRACE ("hw_enable_all_wakeup_gpes");
status = acpi_ev_walk_gpe_list (acpi_hw_enable_wakeup_gpe_block, flags);
status = acpi_ev_walk_gpe_list (acpi_hw_enable_wakeup_gpe_block);
return_ACPI_STATUS (status);
}

View file

@ -106,7 +106,7 @@ acpi_hw_clear_acpi_status (
/* Clear the GPE Bits in all GPE registers in all GPE blocks */
status = acpi_ev_walk_gpe_list (acpi_hw_clear_gpe_block, ACPI_ISR);
status = acpi_ev_walk_gpe_list (acpi_hw_clear_gpe_block);
unlock_and_exit:
if (flags & ACPI_MTX_LOCK) {

View file

@ -274,13 +274,13 @@ acpi_enter_sleep_state (
* 1) Disable/Clear all GPEs
* 2) Enable all wakeup GPEs
*/
status = acpi_hw_disable_all_gpes (ACPI_ISR);
status = acpi_hw_disable_all_gpes ();
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
acpi_gbl_system_awake_and_running = FALSE;
status = acpi_hw_enable_all_wakeup_gpes (ACPI_ISR);
status = acpi_hw_enable_all_wakeup_gpes ();
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
@ -424,13 +424,13 @@ acpi_enter_sleep_state_s4bios (
* 1) Disable/Clear all GPEs
* 2) Enable all wakeup GPEs
*/
status = acpi_hw_disable_all_gpes (ACPI_ISR);
status = acpi_hw_disable_all_gpes ();
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
acpi_gbl_system_awake_and_running = FALSE;
status = acpi_hw_enable_all_wakeup_gpes (ACPI_ISR);
status = acpi_hw_enable_all_wakeup_gpes ();
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
@ -557,13 +557,13 @@ acpi_leave_sleep_state (
* 1) Disable/Clear all GPEs
* 2) Enable all runtime GPEs
*/
status = acpi_hw_disable_all_gpes (ACPI_NOT_ISR);
status = acpi_hw_disable_all_gpes ();
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
acpi_gbl_system_awake_and_running = TRUE;
status = acpi_hw_enable_all_runtime_gpes (ACPI_NOT_ISR);
status = acpi_hw_enable_all_runtime_gpes ();
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}

View file

@ -159,7 +159,7 @@ acpi_ns_root_initialize (
obj_desc->method.param_count = (u8) ACPI_TO_INTEGER (val);
obj_desc->common.flags |= AOPOBJ_DATA_VALID;
#if defined (_ACPI_ASL_COMPILER) || defined (_ACPI_DUMP_App)
#if defined (ACPI_ASL_COMPILER) || defined (ACPI_DUMP_App)
/*
* i_aSL Compiler cheats by putting parameter count

View file

@ -83,7 +83,7 @@ acpi_ns_create_node (
return_PTR (NULL);
}
ACPI_MEM_TRACKING (acpi_gbl_memory_lists[ACPI_MEM_LIST_NSNODE].total_allocated++);
ACPI_MEM_TRACKING (acpi_gbl_ns_node_list->total_allocated++);
node->name.integer = name;
node->reference_count = 1;
@ -151,7 +151,7 @@ acpi_ns_delete_node (
}
}
ACPI_MEM_TRACKING (acpi_gbl_memory_lists[ACPI_MEM_LIST_NSNODE].total_freed++);
ACPI_MEM_TRACKING (acpi_gbl_ns_node_list->total_freed++);
/*
* Detach an object if there is one then delete the node
@ -362,7 +362,7 @@ acpi_ns_delete_children (
/* Now we can free this child object */
ACPI_MEM_TRACKING (acpi_gbl_memory_lists[ACPI_MEM_LIST_NSNODE].total_freed++);
ACPI_MEM_TRACKING (acpi_gbl_ns_node_list->total_freed++);
ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "Object %p, Remaining %X\n",
child_node, acpi_gbl_current_node_count));

View file

@ -208,33 +208,37 @@ acpi_ns_dump_one_object (
return (AE_OK);
}
/* Indent the object according to the level */
if (!(info->display_type & ACPI_DISPLAY_SHORT)) {
/* Indent the object according to the level */
acpi_os_printf ("%2d%*s", (u32) level - 1, (int) level * 2, " ");
acpi_os_printf ("%2d%*s", (u32) level - 1, (int) level * 2, " ");
/* Check the node type and name */
/* Check the node type and name */
if (type > ACPI_TYPE_LOCAL_MAX) {
ACPI_REPORT_WARNING (("Invalid ACPI Type %08X\n", type));
}
if (type > ACPI_TYPE_LOCAL_MAX) {
ACPI_REPORT_WARNING (("Invalid ACPI Type %08X\n", type));
}
if (!acpi_ut_valid_acpi_name (this_node->name.integer)) {
ACPI_REPORT_WARNING (("Invalid ACPI Name %08X\n",
this_node->name.integer));
if (!acpi_ut_valid_acpi_name (this_node->name.integer)) {
ACPI_REPORT_WARNING (("Invalid ACPI Name %08X\n",
this_node->name.integer));
}
acpi_os_printf ("%4.4s", acpi_ut_get_node_name (this_node));
}
/*
* Now we can print out the pertinent information
*/
acpi_os_printf ("%4.4s %-12s %p ",
acpi_ut_get_node_name (this_node), acpi_ut_get_type_name (type), this_node);
acpi_os_printf (" %-12s %p ",
acpi_ut_get_type_name (type), this_node);
dbg_level = acpi_dbg_level;
acpi_dbg_level = 0;
obj_desc = acpi_ns_get_attached_object (this_node);
acpi_dbg_level = dbg_level;
switch (info->display_type) {
switch (info->display_type & ACPI_DISPLAY_MASK) {
case ACPI_DISPLAY_SUMMARY:
if (!obj_desc) {
@ -646,7 +650,7 @@ acpi_ns_dump_entry (
}
#ifdef _ACPI_ASL_COMPILER
#ifdef ACPI_ASL_COMPILER
/*******************************************************************************
*
* FUNCTION: acpi_ns_dump_tables

View file

@ -778,54 +778,6 @@ acpi_os_delete_lock (
return_VOID;
}
/*
* Acquire a spinlock.
*
* handle is a pointer to the spinlock_t.
* flags is *not* the result of save_flags - it is an ACPI-specific flag variable
* that indicates whether we are at interrupt level.
*/
void
acpi_os_acquire_lock (
acpi_handle handle,
u32 flags)
{
ACPI_FUNCTION_TRACE ("os_acquire_lock");
ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Acquiring spinlock[%p] from %s level\n", handle,
((flags & ACPI_NOT_ISR) ? "non-interrupt" : "interrupt")));
if (flags & ACPI_NOT_ISR)
ACPI_DISABLE_IRQS();
spin_lock((spinlock_t *)handle);
return_VOID;
}
/*
* Release a spinlock. See above.
*/
void
acpi_os_release_lock (
acpi_handle handle,
u32 flags)
{
ACPI_FUNCTION_TRACE ("os_release_lock");
ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Releasing spinlock[%p] from %s level\n", handle,
((flags & ACPI_NOT_ISR) ? "non-interrupt" : "interrupt")));
spin_unlock((spinlock_t *)handle);
if (flags & ACPI_NOT_ISR)
ACPI_ENABLE_IRQS();
return_VOID;
}
acpi_status
acpi_os_create_semaphore(
u32 max_units,
@ -1172,3 +1124,151 @@ unsigned int max_cstate = ACPI_PROCESSOR_MAX_POWER;
EXPORT_SYMBOL(max_cstate);
/*
* Acquire a spinlock.
*
* handle is a pointer to the spinlock_t.
* flags is *not* the result of save_flags - it is an ACPI-specific flag variable
* that indicates whether we are at interrupt level.
*/
unsigned long
acpi_os_acquire_lock (
acpi_handle handle)
{
unsigned long flags;
spin_lock_irqsave((spinlock_t *)handle, flags);
return flags;
}
/*
* Release a spinlock. See above.
*/
void
acpi_os_release_lock (
acpi_handle handle,
unsigned long flags)
{
spin_unlock_irqrestore((spinlock_t *)handle, flags);
}
#ifndef ACPI_USE_LOCAL_CACHE
/*******************************************************************************
*
* FUNCTION: acpi_os_create_cache
*
* PARAMETERS: CacheName - Ascii name for the cache
* ObjectSize - Size of each cached object
* MaxDepth - Maximum depth of the cache (in objects)
* ReturnCache - Where the new cache object is returned
*
* RETURN: Status
*
* DESCRIPTION: Create a cache object
*
******************************************************************************/
acpi_status
acpi_os_create_cache (
char *name,
u16 size,
u16 depth,
acpi_cache_t **cache)
{
*cache = kmem_cache_create (name, size, 0, 0, NULL, NULL);
return AE_OK;
}
/*******************************************************************************
*
* FUNCTION: acpi_os_purge_cache
*
* PARAMETERS: Cache - Handle to cache object
*
* RETURN: Status
*
* DESCRIPTION: Free all objects within the requested cache.
*
******************************************************************************/
acpi_status
acpi_os_purge_cache (
acpi_cache_t *cache)
{
(void) kmem_cache_shrink(cache);
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_os_delete_cache
*
* PARAMETERS: Cache - Handle to cache object
*
* RETURN: Status
*
* DESCRIPTION: Free all objects within the requested cache and delete the
* cache object.
*
******************************************************************************/
acpi_status
acpi_os_delete_cache (
acpi_cache_t *cache)
{
(void)kmem_cache_destroy(cache);
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_os_release_object
*
* PARAMETERS: Cache - Handle to cache object
* Object - The object to be released
*
* RETURN: None
*
* DESCRIPTION: Release an object to the specified cache. If cache is full,
* the object is deleted.
*
******************************************************************************/
acpi_status
acpi_os_release_object (
acpi_cache_t *cache,
void *object)
{
kmem_cache_free(cache, object);
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_os_acquire_object
*
* PARAMETERS: Cache - Handle to cache object
* ReturnObject - Where the object is returned
*
* RETURN: Status
*
* DESCRIPTION: Get an object from the specified cache. If cache is empty,
* the object is allocated.
*
******************************************************************************/
void *
acpi_os_acquire_object (
acpi_cache_t *cache)
{
void *object = kmem_cache_alloc(cache, GFP_KERNEL);
WARN_ON(!object);
return object;
}
#endif

View file

@ -2,7 +2,7 @@
# Makefile for all Linux ACPI interpreter subdirectories
#
obj-y := psargs.o psparse.o pstree.o pswalk.o \
obj-y := psargs.o psparse.o psloop.o pstree.o pswalk.o \
psopcode.o psscope.o psutils.o psxface.o
EXTRA_CFLAGS += $(ACPI_CFLAGS)

View file

@ -0,0 +1,775 @@
/******************************************************************************
*
* Module Name: psloop - Main AML parse loop
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
/*
* Parse the AML and build an operation tree as most interpreters,
* like Perl, do. Parsing is done by hand rather than with a YACC
* generated parser to tightly constrain stack and dynamic memory
* usage. At the same time, parsing is kept flexible and the code
* fairly compact by parsing based on a list of AML opcode
* templates in aml_op_info[]
*/
#include <acpi/acpi.h>
#include <acpi/acparser.h>
#include <acpi/acdispat.h>
#include <acpi/amlcode.h>
#include <acpi/acnamesp.h>
#include <acpi/acinterp.h>
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME ("psloop")
static u32 acpi_gbl_depth = 0;
/*******************************************************************************
*
* FUNCTION: acpi_ps_parse_loop
*
* PARAMETERS: walk_state - Current state
*
* RETURN: Status
*
* DESCRIPTION: Parse AML (pointed to by the current parser state) and return
* a tree of ops.
*
******************************************************************************/
acpi_status
acpi_ps_parse_loop (
struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
acpi_status status2;
union acpi_parse_object *op = NULL; /* current op */
union acpi_parse_object *arg = NULL;
union acpi_parse_object *pre_op = NULL;
struct acpi_parse_state *parser_state;
u8 *aml_op_start = NULL;
ACPI_FUNCTION_TRACE_PTR ("ps_parse_loop", walk_state);
if (walk_state->descending_callback == NULL) {
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
parser_state = &walk_state->parser_state;
walk_state->arg_types = 0;
#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
if (walk_state->walk_type & ACPI_WALK_METHOD_RESTART) {
/* We are restarting a preempted control method */
if (acpi_ps_has_completed_scope (parser_state)) {
/*
* We must check if a predicate to an IF or WHILE statement
* was just completed
*/
if ((parser_state->scope->parse_scope.op) &&
((parser_state->scope->parse_scope.op->common.aml_opcode == AML_IF_OP) ||
(parser_state->scope->parse_scope.op->common.aml_opcode == AML_WHILE_OP)) &&
(walk_state->control_state) &&
(walk_state->control_state->common.state ==
ACPI_CONTROL_PREDICATE_EXECUTING)) {
/*
* A predicate was just completed, get the value of the
* predicate and branch based on that value
*/
walk_state->op = NULL;
status = acpi_ds_get_predicate_value (walk_state, ACPI_TO_POINTER (TRUE));
if (ACPI_FAILURE (status) &&
((status & AE_CODE_MASK) != AE_CODE_CONTROL)) {
if (status == AE_AML_NO_RETURN_VALUE) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Invoked method did not return a value, %s\n",
acpi_format_exception (status)));
}
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"get_predicate Failed, %s\n",
acpi_format_exception (status)));
return_ACPI_STATUS (status);
}
status = acpi_ps_next_parse_state (walk_state, op, status);
}
acpi_ps_pop_scope (parser_state, &op,
&walk_state->arg_types, &walk_state->arg_count);
ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "Popped scope, Op=%p\n", op));
}
else if (walk_state->prev_op) {
/* We were in the middle of an op */
op = walk_state->prev_op;
walk_state->arg_types = walk_state->prev_arg_types;
}
}
#endif
/* Iterative parsing loop, while there is more AML to process: */
while ((parser_state->aml < parser_state->aml_end) || (op)) {
aml_op_start = parser_state->aml;
if (!op) {
/* Get the next opcode from the AML stream */
walk_state->aml_offset = (u32) ACPI_PTR_DIFF (parser_state->aml,
parser_state->aml_start);
walk_state->opcode = acpi_ps_peek_opcode (parser_state);
/*
* First cut to determine what we have found:
* 1) A valid AML opcode
* 2) A name string
* 3) An unknown/invalid opcode
*/
walk_state->op_info = acpi_ps_get_opcode_info (walk_state->opcode);
switch (walk_state->op_info->class) {
case AML_CLASS_ASCII:
case AML_CLASS_PREFIX:
/*
* Starts with a valid prefix or ASCII char, this is a name
* string. Convert the bare name string to a namepath.
*/
walk_state->opcode = AML_INT_NAMEPATH_OP;
walk_state->arg_types = ARGP_NAMESTRING;
break;
case AML_CLASS_UNKNOWN:
/* The opcode is unrecognized. Just skip unknown opcodes */
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Found unknown opcode %X at AML address %p offset %X, ignoring\n",
walk_state->opcode, parser_state->aml, walk_state->aml_offset));
ACPI_DUMP_BUFFER (parser_state->aml, 128);
/* Assume one-byte bad opcode */
parser_state->aml++;
continue;
default:
/* Found opcode info, this is a normal opcode */
parser_state->aml += acpi_ps_get_opcode_size (walk_state->opcode);
walk_state->arg_types = walk_state->op_info->parse_args;
break;
}
/* Create Op structure and append to parent's argument list */
if (walk_state->op_info->flags & AML_NAMED) {
/* Allocate a new pre_op if necessary */
if (!pre_op) {
pre_op = acpi_ps_alloc_op (walk_state->opcode);
if (!pre_op) {
status = AE_NO_MEMORY;
goto close_this_op;
}
}
pre_op->common.value.arg = NULL;
pre_op->common.aml_opcode = walk_state->opcode;
/*
* Get and append arguments until we find the node that contains
* the name (the type ARGP_NAME).
*/
while (GET_CURRENT_ARG_TYPE (walk_state->arg_types) &&
(GET_CURRENT_ARG_TYPE (walk_state->arg_types) != ARGP_NAME)) {
status = acpi_ps_get_next_arg (walk_state, parser_state,
GET_CURRENT_ARG_TYPE (walk_state->arg_types), &arg);
if (ACPI_FAILURE (status)) {
goto close_this_op;
}
acpi_ps_append_arg (pre_op, arg);
INCREMENT_ARG_LIST (walk_state->arg_types);
}
/*
* Make sure that we found a NAME and didn't run out of
* arguments
*/
if (!GET_CURRENT_ARG_TYPE (walk_state->arg_types)) {
status = AE_AML_NO_OPERAND;
goto close_this_op;
}
/* We know that this arg is a name, move to next arg */
INCREMENT_ARG_LIST (walk_state->arg_types);
/*
* Find the object. This will either insert the object into
* the namespace or simply look it up
*/
walk_state->op = NULL;
status = walk_state->descending_callback (walk_state, &op);
if (ACPI_FAILURE (status)) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"During name lookup/catalog, %s\n",
acpi_format_exception (status)));
goto close_this_op;
}
if (!op) {
continue;
}
status = acpi_ps_next_parse_state (walk_state, op, status);
if (status == AE_CTRL_PENDING) {
status = AE_OK;
goto close_this_op;
}
if (ACPI_FAILURE (status)) {
goto close_this_op;
}
acpi_ps_append_arg (op, pre_op->common.value.arg);
acpi_gbl_depth++;
if (op->common.aml_opcode == AML_REGION_OP) {
/*
* Defer final parsing of an operation_region body,
* because we don't have enough info in the first pass
* to parse it correctly (i.e., there may be method
* calls within the term_arg elements of the body.)
*
* However, we must continue parsing because
* the opregion is not a standalone package --
* we don't know where the end is at this point.
*
* (Length is unknown until parse of the body complete)
*/
op->named.data = aml_op_start;
op->named.length = 0;
}
}
else {
/* Not a named opcode, just allocate Op and append to parent */
walk_state->op_info = acpi_ps_get_opcode_info (walk_state->opcode);
op = acpi_ps_alloc_op (walk_state->opcode);
if (!op) {
status = AE_NO_MEMORY;
goto close_this_op;
}
if (walk_state->op_info->flags & AML_CREATE) {
/*
* Backup to beginning of create_xXXfield declaration
* body_length is unknown until we parse the body
*/
op->named.data = aml_op_start;
op->named.length = 0;
}
acpi_ps_append_arg (acpi_ps_get_parent_scope (parser_state), op);
if ((walk_state->descending_callback != NULL)) {
/*
* Find the object. This will either insert the object into
* the namespace or simply look it up
*/
walk_state->op = op;
status = walk_state->descending_callback (walk_state, &op);
status = acpi_ps_next_parse_state (walk_state, op, status);
if (status == AE_CTRL_PENDING) {
status = AE_OK;
goto close_this_op;
}
if (ACPI_FAILURE (status)) {
goto close_this_op;
}
}
}
op->common.aml_offset = walk_state->aml_offset;
if (walk_state->op_info) {
ACPI_DEBUG_PRINT ((ACPI_DB_PARSE,
"Opcode %4.4X [%s] Op %p Aml %p aml_offset %5.5X\n",
(u32) op->common.aml_opcode, walk_state->op_info->name,
op, parser_state->aml, op->common.aml_offset));
}
}
/*
* Start arg_count at zero because we don't know if there are
* any args yet
*/
walk_state->arg_count = 0;
/* Are there any arguments that must be processed? */
if (walk_state->arg_types) {
/* Get arguments */
switch (op->common.aml_opcode) {
case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
case AML_WORD_OP: /* AML_WORDDATA_ARG */
case AML_DWORD_OP: /* AML_DWORDATA_ARG */
case AML_QWORD_OP: /* AML_QWORDATA_ARG */
case AML_STRING_OP: /* AML_ASCIICHARLIST_ARG */
/* Fill in constant or string argument directly */
acpi_ps_get_next_simple_arg (parser_state,
GET_CURRENT_ARG_TYPE (walk_state->arg_types), op);
break;
case AML_INT_NAMEPATH_OP: /* AML_NAMESTRING_ARG */
status = acpi_ps_get_next_namepath (walk_state, parser_state, op, 1);
if (ACPI_FAILURE (status)) {
goto close_this_op;
}
walk_state->arg_types = 0;
break;
default:
/*
* Op is not a constant or string, append each argument
* to the Op
*/
while (GET_CURRENT_ARG_TYPE (walk_state->arg_types) &&
!walk_state->arg_count) {
walk_state->aml_offset = (u32)
ACPI_PTR_DIFF (parser_state->aml, parser_state->aml_start);
status = acpi_ps_get_next_arg (walk_state, parser_state,
GET_CURRENT_ARG_TYPE (walk_state->arg_types),
&arg);
if (ACPI_FAILURE (status)) {
goto close_this_op;
}
if (arg) {
arg->common.aml_offset = walk_state->aml_offset;
acpi_ps_append_arg (op, arg);
}
INCREMENT_ARG_LIST (walk_state->arg_types);
}
/* Special processing for certain opcodes */
if ((walk_state->pass_number <= ACPI_IMODE_LOAD_PASS1) &&
((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) == 0)) {
/*
* We want to skip If/Else/While constructs during Pass1
* because we want to actually conditionally execute the
* code during Pass2.
*
* Except for disassembly, where we always want to
* walk the If/Else/While packages
*/
switch (op->common.aml_opcode) {
case AML_IF_OP:
case AML_ELSE_OP:
case AML_WHILE_OP:
/* Skip body of if/else/while in pass 1 */
parser_state->aml = parser_state->pkg_end;
walk_state->arg_count = 0;
break;
default:
break;
}
}
switch (op->common.aml_opcode) {
case AML_METHOD_OP:
/*
* Skip parsing of control method
* because we don't have enough info in the first pass
* to parse it correctly.
*
* Save the length and address of the body
*/
op->named.data = parser_state->aml;
op->named.length = (u32) (parser_state->pkg_end -
parser_state->aml);
/* Skip body of method */
parser_state->aml = parser_state->pkg_end;
walk_state->arg_count = 0;
break;
case AML_BUFFER_OP:
case AML_PACKAGE_OP:
case AML_VAR_PACKAGE_OP:
if ((op->common.parent) &&
(op->common.parent->common.aml_opcode == AML_NAME_OP) &&
(walk_state->pass_number <= ACPI_IMODE_LOAD_PASS2)) {
/*
* Skip parsing of Buffers and Packages
* because we don't have enough info in the first pass
* to parse them correctly.
*/
op->named.data = aml_op_start;
op->named.length = (u32) (parser_state->pkg_end -
aml_op_start);
/* Skip body */
parser_state->aml = parser_state->pkg_end;
walk_state->arg_count = 0;
}
break;
case AML_WHILE_OP:
if (walk_state->control_state) {
walk_state->control_state->control.package_end =
parser_state->pkg_end;
}
break;
default:
/* No action for all other opcodes */
break;
}
break;
}
}
/* Check for arguments that need to be processed */
if (walk_state->arg_count) {
/*
* There are arguments (complex ones), push Op and
* prepare for argument
*/
status = acpi_ps_push_scope (parser_state, op,
walk_state->arg_types, walk_state->arg_count);
if (ACPI_FAILURE (status)) {
goto close_this_op;
}
op = NULL;
continue;
}
/*
* All arguments have been processed -- Op is complete,
* prepare for next
*/
walk_state->op_info = acpi_ps_get_opcode_info (op->common.aml_opcode);
if (walk_state->op_info->flags & AML_NAMED) {
if (acpi_gbl_depth) {
acpi_gbl_depth--;
}
if (op->common.aml_opcode == AML_REGION_OP) {
/*
* Skip parsing of control method or opregion body,
* because we don't have enough info in the first pass
* to parse them correctly.
*
* Completed parsing an op_region declaration, we now
* know the length.
*/
op->named.length = (u32) (parser_state->aml - op->named.data);
}
}
if (walk_state->op_info->flags & AML_CREATE) {
/*
* Backup to beginning of create_xXXfield declaration (1 for
* Opcode)
*
* body_length is unknown until we parse the body
*/
op->named.length = (u32) (parser_state->aml - op->named.data);
}
/* This op complete, notify the dispatcher */
if (walk_state->ascending_callback != NULL) {
walk_state->op = op;
walk_state->opcode = op->common.aml_opcode;
status = walk_state->ascending_callback (walk_state);
status = acpi_ps_next_parse_state (walk_state, op, status);
if (status == AE_CTRL_PENDING) {
status = AE_OK;
goto close_this_op;
}
}
close_this_op:
/*
* Finished one argument of the containing scope
*/
parser_state->scope->parse_scope.arg_count--;
/* Finished with pre_op */
if (pre_op) {
acpi_ps_free_op (pre_op);
pre_op = NULL;
}
/* Close this Op (will result in parse subtree deletion) */
status2 = acpi_ps_complete_this_op (walk_state, op);
if (ACPI_FAILURE (status2)) {
return_ACPI_STATUS (status2);
}
op = NULL;
switch (status) {
case AE_OK:
break;
case AE_CTRL_TRANSFER:
/* We are about to transfer to a called method. */
walk_state->prev_op = op;
walk_state->prev_arg_types = walk_state->arg_types;
return_ACPI_STATUS (status);
case AE_CTRL_END:
acpi_ps_pop_scope (parser_state, &op,
&walk_state->arg_types, &walk_state->arg_count);
if (op) {
walk_state->op = op;
walk_state->op_info = acpi_ps_get_opcode_info (op->common.aml_opcode);
walk_state->opcode = op->common.aml_opcode;
status = walk_state->ascending_callback (walk_state);
status = acpi_ps_next_parse_state (walk_state, op, status);
status2 = acpi_ps_complete_this_op (walk_state, op);
if (ACPI_FAILURE (status2)) {
return_ACPI_STATUS (status2);
}
op = NULL;
}
status = AE_OK;
break;
case AE_CTRL_BREAK:
case AE_CTRL_CONTINUE:
/* Pop off scopes until we find the While */
while (!op || (op->common.aml_opcode != AML_WHILE_OP)) {
acpi_ps_pop_scope (parser_state, &op,
&walk_state->arg_types, &walk_state->arg_count);
}
/* Close this iteration of the While loop */
walk_state->op = op;
walk_state->op_info = acpi_ps_get_opcode_info (op->common.aml_opcode);
walk_state->opcode = op->common.aml_opcode;
status = walk_state->ascending_callback (walk_state);
status = acpi_ps_next_parse_state (walk_state, op, status);
status2 = acpi_ps_complete_this_op (walk_state, op);
if (ACPI_FAILURE (status2)) {
return_ACPI_STATUS (status2);
}
op = NULL;
status = AE_OK;
break;
case AE_CTRL_TERMINATE:
status = AE_OK;
/* Clean up */
do {
if (op) {
status2 = acpi_ps_complete_this_op (walk_state, op);
if (ACPI_FAILURE (status2)) {
return_ACPI_STATUS (status2);
}
}
acpi_ps_pop_scope (parser_state, &op,
&walk_state->arg_types, &walk_state->arg_count);
} while (op);
return_ACPI_STATUS (status);
default: /* All other non-AE_OK status */
do {
if (op) {
status2 = acpi_ps_complete_this_op (walk_state, op);
if (ACPI_FAILURE (status2)) {
return_ACPI_STATUS (status2);
}
}
acpi_ps_pop_scope (parser_state, &op,
&walk_state->arg_types, &walk_state->arg_count);
} while (op);
/*
* TBD: Cleanup parse ops on error
*/
#if 0
if (op == NULL) {
acpi_ps_pop_scope (parser_state, &op,
&walk_state->arg_types, &walk_state->arg_count);
}
#endif
walk_state->prev_op = op;
walk_state->prev_arg_types = walk_state->arg_types;
return_ACPI_STATUS (status);
}
/* This scope complete? */
if (acpi_ps_has_completed_scope (parser_state)) {
acpi_ps_pop_scope (parser_state, &op,
&walk_state->arg_types, &walk_state->arg_count);
ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "Popped scope, Op=%p\n", op));
}
else {
op = NULL;
}
} /* while parser_state->Aml */
/*
* Complete the last Op (if not completed), and clear the scope stack.
* It is easily possible to end an AML "package" with an unbounded number
* of open scopes (such as when several ASL blocks are closed with
* sequential closing braces). We want to terminate each one cleanly.
*/
ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "AML package complete at Op %p\n", op));
do {
if (op) {
if (walk_state->ascending_callback != NULL) {
walk_state->op = op;
walk_state->op_info = acpi_ps_get_opcode_info (op->common.aml_opcode);
walk_state->opcode = op->common.aml_opcode;
status = walk_state->ascending_callback (walk_state);
status = acpi_ps_next_parse_state (walk_state, op, status);
if (status == AE_CTRL_PENDING) {
status = AE_OK;
goto close_this_op;
}
if (status == AE_CTRL_TERMINATE) {
status = AE_OK;
/* Clean up */
do {
if (op) {
status2 = acpi_ps_complete_this_op (walk_state, op);
if (ACPI_FAILURE (status2)) {
return_ACPI_STATUS (status2);
}
}
acpi_ps_pop_scope (parser_state, &op,
&walk_state->arg_types, &walk_state->arg_count);
} while (op);
return_ACPI_STATUS (status);
}
else if (ACPI_FAILURE (status)) {
/* First error is most important */
(void) acpi_ps_complete_this_op (walk_state, op);
return_ACPI_STATUS (status);
}
}
status2 = acpi_ps_complete_this_op (walk_state, op);
if (ACPI_FAILURE (status2)) {
return_ACPI_STATUS (status2);
}
}
acpi_ps_pop_scope (parser_state, &op, &walk_state->arg_types,
&walk_state->arg_count);
} while (op);
return_ACPI_STATUS (status);
}

View file

@ -428,33 +428,23 @@ acpi_ps_get_opcode_info (
/*
* Detect normal 8-bit opcode or extended 16-bit opcode
*/
switch ((u8) (opcode >> 8)) {
case 0:
if (!(opcode & 0xFF00)) {
/* Simple (8-bit) opcode: 0-255, can't index beyond table */
return (&acpi_gbl_aml_op_info [acpi_gbl_short_op_index [(u8) opcode]]);
case AML_EXTOP:
/* Extended (16-bit, prefix+opcode) opcode */
if (((u8) opcode) <= MAX_EXTENDED_OPCODE) {
return (&acpi_gbl_aml_op_info [acpi_gbl_long_op_index [(u8) opcode]]);
}
/* Else fall through to error case below */
/*lint -fallthrough */
default:
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Unknown AML opcode [%4.4X]\n", opcode));
break;
}
if (((opcode & 0xFF00) == AML_EXTENDED_OPCODE) &&
(((u8) opcode) <= MAX_EXTENDED_OPCODE)) {
/* Valid extended (16-bit) opcode */
/* Default is "unknown opcode" */
return (&acpi_gbl_aml_op_info [acpi_gbl_long_op_index [(u8) opcode]]);
}
/* Unknown AML opcode */
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Unknown AML opcode [%4.4X]\n", opcode));
return (&acpi_gbl_aml_op_info [_UNK]);
}

View file

@ -62,26 +62,6 @@
ACPI_MODULE_NAME ("psparse")
static u32 acpi_gbl_depth = 0;
/* Local prototypes */
static acpi_status
acpi_ps_complete_this_op (
struct acpi_walk_state *walk_state,
union acpi_parse_object *op);
static acpi_status
acpi_ps_next_parse_state (
struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
acpi_status callback_status);
static acpi_status
acpi_ps_parse_loop (
struct acpi_walk_state *walk_state);
/*******************************************************************************
*
* FUNCTION: acpi_ps_get_opcode_size
@ -134,8 +114,8 @@ acpi_ps_peek_opcode (
aml = parser_state->aml;
opcode = (u16) ACPI_GET8 (aml);
if (opcode == AML_EXTOP) {
/* Extended opcode */
if (opcode == AML_EXTENDED_OP_PREFIX) {
/* Extended opcode, get the second opcode byte */
aml++;
opcode = (u16) ((opcode << 8) | ACPI_GET8 (aml));
@ -158,7 +138,7 @@ acpi_ps_peek_opcode (
*
******************************************************************************/
static acpi_status
acpi_status
acpi_ps_complete_this_op (
struct acpi_walk_state *walk_state,
union acpi_parse_object *op)
@ -331,7 +311,7 @@ acpi_ps_complete_this_op (
*
******************************************************************************/
static acpi_status
acpi_status
acpi_ps_next_parse_state (
struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
@ -439,706 +419,6 @@ acpi_ps_next_parse_state (
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_parse_loop
*
* PARAMETERS: walk_state - Current state
*
* RETURN: Status
*
* DESCRIPTION: Parse AML (pointed to by the current parser state) and return
* a tree of ops.
*
******************************************************************************/
static acpi_status
acpi_ps_parse_loop (
struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
acpi_status status2;
union acpi_parse_object *op = NULL; /* current op */
union acpi_parse_object *arg = NULL;
union acpi_parse_object *pre_op = NULL;
struct acpi_parse_state *parser_state;
u8 *aml_op_start = NULL;
ACPI_FUNCTION_TRACE_PTR ("ps_parse_loop", walk_state);
if (walk_state->descending_callback == NULL) {
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
parser_state = &walk_state->parser_state;
walk_state->arg_types = 0;
#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
if (walk_state->walk_type & ACPI_WALK_METHOD_RESTART) {
/* We are restarting a preempted control method */
if (acpi_ps_has_completed_scope (parser_state)) {
/*
* We must check if a predicate to an IF or WHILE statement
* was just completed
*/
if ((parser_state->scope->parse_scope.op) &&
((parser_state->scope->parse_scope.op->common.aml_opcode == AML_IF_OP) ||
(parser_state->scope->parse_scope.op->common.aml_opcode == AML_WHILE_OP)) &&
(walk_state->control_state) &&
(walk_state->control_state->common.state ==
ACPI_CONTROL_PREDICATE_EXECUTING)) {
/*
* A predicate was just completed, get the value of the
* predicate and branch based on that value
*/
walk_state->op = NULL;
status = acpi_ds_get_predicate_value (walk_state, ACPI_TO_POINTER (TRUE));
if (ACPI_FAILURE (status) &&
((status & AE_CODE_MASK) != AE_CODE_CONTROL)) {
if (status == AE_AML_NO_RETURN_VALUE) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Invoked method did not return a value, %s\n",
acpi_format_exception (status)));
}
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"get_predicate Failed, %s\n",
acpi_format_exception (status)));
return_ACPI_STATUS (status);
}
status = acpi_ps_next_parse_state (walk_state, op, status);
}
acpi_ps_pop_scope (parser_state, &op,
&walk_state->arg_types, &walk_state->arg_count);
ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "Popped scope, Op=%p\n", op));
}
else if (walk_state->prev_op) {
/* We were in the middle of an op */
op = walk_state->prev_op;
walk_state->arg_types = walk_state->prev_arg_types;
}
}
#endif
/* Iterative parsing loop, while there is more AML to process: */
while ((parser_state->aml < parser_state->aml_end) || (op)) {
aml_op_start = parser_state->aml;
if (!op) {
/* Get the next opcode from the AML stream */
walk_state->aml_offset = (u32) ACPI_PTR_DIFF (parser_state->aml,
parser_state->aml_start);
walk_state->opcode = acpi_ps_peek_opcode (parser_state);
/*
* First cut to determine what we have found:
* 1) A valid AML opcode
* 2) A name string
* 3) An unknown/invalid opcode
*/
walk_state->op_info = acpi_ps_get_opcode_info (walk_state->opcode);
switch (walk_state->op_info->class) {
case AML_CLASS_ASCII:
case AML_CLASS_PREFIX:
/*
* Starts with a valid prefix or ASCII char, this is a name
* string. Convert the bare name string to a namepath.
*/
walk_state->opcode = AML_INT_NAMEPATH_OP;
walk_state->arg_types = ARGP_NAMESTRING;
break;
case AML_CLASS_UNKNOWN:
/* The opcode is unrecognized. Just skip unknown opcodes */
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Found unknown opcode %X at AML address %p offset %X, ignoring\n",
walk_state->opcode, parser_state->aml, walk_state->aml_offset));
ACPI_DUMP_BUFFER (parser_state->aml, 128);
/* Assume one-byte bad opcode */
parser_state->aml++;
continue;
default:
/* Found opcode info, this is a normal opcode */
parser_state->aml += acpi_ps_get_opcode_size (walk_state->opcode);
walk_state->arg_types = walk_state->op_info->parse_args;
break;
}
/* Create Op structure and append to parent's argument list */
if (walk_state->op_info->flags & AML_NAMED) {
/* Allocate a new pre_op if necessary */
if (!pre_op) {
pre_op = acpi_ps_alloc_op (walk_state->opcode);
if (!pre_op) {
status = AE_NO_MEMORY;
goto close_this_op;
}
}
pre_op->common.value.arg = NULL;
pre_op->common.aml_opcode = walk_state->opcode;
/*
* Get and append arguments until we find the node that contains
* the name (the type ARGP_NAME).
*/
while (GET_CURRENT_ARG_TYPE (walk_state->arg_types) &&
(GET_CURRENT_ARG_TYPE (walk_state->arg_types) != ARGP_NAME)) {
status = acpi_ps_get_next_arg (walk_state, parser_state,
GET_CURRENT_ARG_TYPE (walk_state->arg_types), &arg);
if (ACPI_FAILURE (status)) {
goto close_this_op;
}
acpi_ps_append_arg (pre_op, arg);
INCREMENT_ARG_LIST (walk_state->arg_types);
}
/*
* Make sure that we found a NAME and didn't run out of
* arguments
*/
if (!GET_CURRENT_ARG_TYPE (walk_state->arg_types)) {
status = AE_AML_NO_OPERAND;
goto close_this_op;
}
/* We know that this arg is a name, move to next arg */
INCREMENT_ARG_LIST (walk_state->arg_types);
/*
* Find the object. This will either insert the object into
* the namespace or simply look it up
*/
walk_state->op = NULL;
status = walk_state->descending_callback (walk_state, &op);
if (ACPI_FAILURE (status)) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"During name lookup/catalog, %s\n",
acpi_format_exception (status)));
goto close_this_op;
}
if (!op) {
continue;
}
status = acpi_ps_next_parse_state (walk_state, op, status);
if (status == AE_CTRL_PENDING) {
status = AE_OK;
goto close_this_op;
}
if (ACPI_FAILURE (status)) {
goto close_this_op;
}
acpi_ps_append_arg (op, pre_op->common.value.arg);
acpi_gbl_depth++;
if (op->common.aml_opcode == AML_REGION_OP) {
/*
* Defer final parsing of an operation_region body,
* because we don't have enough info in the first pass
* to parse it correctly (i.e., there may be method
* calls within the term_arg elements of the body.)
*
* However, we must continue parsing because
* the opregion is not a standalone package --
* we don't know where the end is at this point.
*
* (Length is unknown until parse of the body complete)
*/
op->named.data = aml_op_start;
op->named.length = 0;
}
}
else {
/* Not a named opcode, just allocate Op and append to parent */
walk_state->op_info = acpi_ps_get_opcode_info (walk_state->opcode);
op = acpi_ps_alloc_op (walk_state->opcode);
if (!op) {
status = AE_NO_MEMORY;
goto close_this_op;
}
if (walk_state->op_info->flags & AML_CREATE) {
/*
* Backup to beginning of create_xXXfield declaration
* body_length is unknown until we parse the body
*/
op->named.data = aml_op_start;
op->named.length = 0;
}
acpi_ps_append_arg (acpi_ps_get_parent_scope (parser_state), op);
if ((walk_state->descending_callback != NULL)) {
/*
* Find the object. This will either insert the object into
* the namespace or simply look it up
*/
walk_state->op = op;
status = walk_state->descending_callback (walk_state, &op);
status = acpi_ps_next_parse_state (walk_state, op, status);
if (status == AE_CTRL_PENDING) {
status = AE_OK;
goto close_this_op;
}
if (ACPI_FAILURE (status)) {
goto close_this_op;
}
}
}
op->common.aml_offset = walk_state->aml_offset;
if (walk_state->op_info) {
ACPI_DEBUG_PRINT ((ACPI_DB_PARSE,
"Opcode %4.4X [%s] Op %p Aml %p aml_offset %5.5X\n",
(u32) op->common.aml_opcode, walk_state->op_info->name,
op, parser_state->aml, op->common.aml_offset));
}
}
/*
* Start arg_count at zero because we don't know if there are
* any args yet
*/
walk_state->arg_count = 0;
/* Are there any arguments that must be processed? */
if (walk_state->arg_types) {
/* Get arguments */
switch (op->common.aml_opcode) {
case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
case AML_WORD_OP: /* AML_WORDDATA_ARG */
case AML_DWORD_OP: /* AML_DWORDATA_ARG */
case AML_QWORD_OP: /* AML_QWORDATA_ARG */
case AML_STRING_OP: /* AML_ASCIICHARLIST_ARG */
/* Fill in constant or string argument directly */
acpi_ps_get_next_simple_arg (parser_state,
GET_CURRENT_ARG_TYPE (walk_state->arg_types), op);
break;
case AML_INT_NAMEPATH_OP: /* AML_NAMESTRING_ARG */
status = acpi_ps_get_next_namepath (walk_state, parser_state, op, 1);
if (ACPI_FAILURE (status)) {
goto close_this_op;
}
walk_state->arg_types = 0;
break;
default:
/*
* Op is not a constant or string, append each argument
* to the Op
*/
while (GET_CURRENT_ARG_TYPE (walk_state->arg_types) &&
!walk_state->arg_count) {
walk_state->aml_offset = (u32)
ACPI_PTR_DIFF (parser_state->aml, parser_state->aml_start);
status = acpi_ps_get_next_arg (walk_state, parser_state,
GET_CURRENT_ARG_TYPE (walk_state->arg_types),
&arg);
if (ACPI_FAILURE (status)) {
goto close_this_op;
}
if (arg) {
arg->common.aml_offset = walk_state->aml_offset;
acpi_ps_append_arg (op, arg);
}
INCREMENT_ARG_LIST (walk_state->arg_types);
}
/* Special processing for certain opcodes */
if (walk_state->pass_number <= ACPI_IMODE_LOAD_PASS1) {
switch (op->common.aml_opcode) {
case AML_IF_OP:
case AML_ELSE_OP:
case AML_WHILE_OP:
/* Skip body of if/else/while in pass 1 */
parser_state->aml = parser_state->pkg_end;
walk_state->arg_count = 0;
break;
default:
break;
}
}
switch (op->common.aml_opcode) {
case AML_METHOD_OP:
/*
* Skip parsing of control method
* because we don't have enough info in the first pass
* to parse it correctly.
*
* Save the length and address of the body
*/
op->named.data = parser_state->aml;
op->named.length = (u32) (parser_state->pkg_end -
parser_state->aml);
/* Skip body of method */
parser_state->aml = parser_state->pkg_end;
walk_state->arg_count = 0;
break;
case AML_BUFFER_OP:
case AML_PACKAGE_OP:
case AML_VAR_PACKAGE_OP:
if ((op->common.parent) &&
(op->common.parent->common.aml_opcode == AML_NAME_OP) &&
(walk_state->pass_number <= ACPI_IMODE_LOAD_PASS2)) {
/*
* Skip parsing of Buffers and Packages
* because we don't have enough info in the first pass
* to parse them correctly.
*/
op->named.data = aml_op_start;
op->named.length = (u32) (parser_state->pkg_end -
aml_op_start);
/* Skip body */
parser_state->aml = parser_state->pkg_end;
walk_state->arg_count = 0;
}
break;
case AML_WHILE_OP:
if (walk_state->control_state) {
walk_state->control_state->control.package_end =
parser_state->pkg_end;
}
break;
default:
/* No action for all other opcodes */
break;
}
break;
}
}
/* Check for arguments that need to be processed */
if (walk_state->arg_count) {
/*
* There are arguments (complex ones), push Op and
* prepare for argument
*/
status = acpi_ps_push_scope (parser_state, op,
walk_state->arg_types, walk_state->arg_count);
if (ACPI_FAILURE (status)) {
goto close_this_op;
}
op = NULL;
continue;
}
/*
* All arguments have been processed -- Op is complete,
* prepare for next
*/
walk_state->op_info = acpi_ps_get_opcode_info (op->common.aml_opcode);
if (walk_state->op_info->flags & AML_NAMED) {
if (acpi_gbl_depth) {
acpi_gbl_depth--;
}
if (op->common.aml_opcode == AML_REGION_OP) {
/*
* Skip parsing of control method or opregion body,
* because we don't have enough info in the first pass
* to parse them correctly.
*
* Completed parsing an op_region declaration, we now
* know the length.
*/
op->named.length = (u32) (parser_state->aml - op->named.data);
}
}
if (walk_state->op_info->flags & AML_CREATE) {
/*
* Backup to beginning of create_xXXfield declaration (1 for
* Opcode)
*
* body_length is unknown until we parse the body
*/
op->named.length = (u32) (parser_state->aml - op->named.data);
}
/* This op complete, notify the dispatcher */
if (walk_state->ascending_callback != NULL) {
walk_state->op = op;
walk_state->opcode = op->common.aml_opcode;
status = walk_state->ascending_callback (walk_state);
status = acpi_ps_next_parse_state (walk_state, op, status);
if (status == AE_CTRL_PENDING) {
status = AE_OK;
goto close_this_op;
}
}
close_this_op:
/*
* Finished one argument of the containing scope
*/
parser_state->scope->parse_scope.arg_count--;
/* Finished with pre_op */
if (pre_op) {
acpi_ps_free_op (pre_op);
pre_op = NULL;
}
/* Close this Op (will result in parse subtree deletion) */
status2 = acpi_ps_complete_this_op (walk_state, op);
if (ACPI_FAILURE (status2)) {
return_ACPI_STATUS (status2);
}
op = NULL;
switch (status) {
case AE_OK:
break;
case AE_CTRL_TRANSFER:
/* We are about to transfer to a called method. */
walk_state->prev_op = op;
walk_state->prev_arg_types = walk_state->arg_types;
return_ACPI_STATUS (status);
case AE_CTRL_END:
acpi_ps_pop_scope (parser_state, &op,
&walk_state->arg_types, &walk_state->arg_count);
if (op) {
walk_state->op = op;
walk_state->op_info = acpi_ps_get_opcode_info (op->common.aml_opcode);
walk_state->opcode = op->common.aml_opcode;
status = walk_state->ascending_callback (walk_state);
status = acpi_ps_next_parse_state (walk_state, op, status);
status2 = acpi_ps_complete_this_op (walk_state, op);
if (ACPI_FAILURE (status2)) {
return_ACPI_STATUS (status2);
}
op = NULL;
}
status = AE_OK;
break;
case AE_CTRL_BREAK:
case AE_CTRL_CONTINUE:
/* Pop off scopes until we find the While */
while (!op || (op->common.aml_opcode != AML_WHILE_OP)) {
acpi_ps_pop_scope (parser_state, &op,
&walk_state->arg_types, &walk_state->arg_count);
}
/* Close this iteration of the While loop */
walk_state->op = op;
walk_state->op_info = acpi_ps_get_opcode_info (op->common.aml_opcode);
walk_state->opcode = op->common.aml_opcode;
status = walk_state->ascending_callback (walk_state);
status = acpi_ps_next_parse_state (walk_state, op, status);
status2 = acpi_ps_complete_this_op (walk_state, op);
if (ACPI_FAILURE (status2)) {
return_ACPI_STATUS (status2);
}
op = NULL;
status = AE_OK;
break;
case AE_CTRL_TERMINATE:
status = AE_OK;
/* Clean up */
do {
if (op) {
status2 = acpi_ps_complete_this_op (walk_state, op);
if (ACPI_FAILURE (status2)) {
return_ACPI_STATUS (status2);
}
}
acpi_ps_pop_scope (parser_state, &op,
&walk_state->arg_types, &walk_state->arg_count);
} while (op);
return_ACPI_STATUS (status);
default: /* All other non-AE_OK status */
do {
if (op) {
status2 = acpi_ps_complete_this_op (walk_state, op);
if (ACPI_FAILURE (status2)) {
return_ACPI_STATUS (status2);
}
}
acpi_ps_pop_scope (parser_state, &op,
&walk_state->arg_types, &walk_state->arg_count);
} while (op);
/*
* TBD: Cleanup parse ops on error
*/
#if 0
if (op == NULL) {
acpi_ps_pop_scope (parser_state, &op,
&walk_state->arg_types, &walk_state->arg_count);
}
#endif
walk_state->prev_op = op;
walk_state->prev_arg_types = walk_state->arg_types;
return_ACPI_STATUS (status);
}
/* This scope complete? */
if (acpi_ps_has_completed_scope (parser_state)) {
acpi_ps_pop_scope (parser_state, &op,
&walk_state->arg_types, &walk_state->arg_count);
ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "Popped scope, Op=%p\n", op));
}
else {
op = NULL;
}
} /* while parser_state->Aml */
/*
* Complete the last Op (if not completed), and clear the scope stack.
* It is easily possible to end an AML "package" with an unbounded number
* of open scopes (such as when several ASL blocks are closed with
* sequential closing braces). We want to terminate each one cleanly.
*/
ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "AML package complete at Op %p\n", op));
do {
if (op) {
if (walk_state->ascending_callback != NULL) {
walk_state->op = op;
walk_state->op_info = acpi_ps_get_opcode_info (op->common.aml_opcode);
walk_state->opcode = op->common.aml_opcode;
status = walk_state->ascending_callback (walk_state);
status = acpi_ps_next_parse_state (walk_state, op, status);
if (status == AE_CTRL_PENDING) {
status = AE_OK;
goto close_this_op;
}
if (status == AE_CTRL_TERMINATE) {
status = AE_OK;
/* Clean up */
do {
if (op) {
status2 = acpi_ps_complete_this_op (walk_state, op);
if (ACPI_FAILURE (status2)) {
return_ACPI_STATUS (status2);
}
}
acpi_ps_pop_scope (parser_state, &op,
&walk_state->arg_types, &walk_state->arg_count);
} while (op);
return_ACPI_STATUS (status);
}
else if (ACPI_FAILURE (status)) {
/* First error is most important */
(void) acpi_ps_complete_this_op (walk_state, op);
return_ACPI_STATUS (status);
}
}
status2 = acpi_ps_complete_this_op (walk_state, op);
if (ACPI_FAILURE (status2)) {
return_ACPI_STATUS (status2);
}
}
acpi_ps_pop_scope (parser_state, &op, &walk_state->arg_types,
&walk_state->arg_count);
} while (op);
return_ACPI_STATUS (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ps_parse_aml

View file

@ -154,12 +154,14 @@ acpi_ps_alloc_op (
if (flags == ACPI_PARSEOP_GENERIC) {
/* The generic op (default) is by far the most common (16 to 1) */
op = acpi_ut_acquire_from_cache (ACPI_MEM_LIST_PSNODE);
op = acpi_os_acquire_object (acpi_gbl_ps_node_cache);
memset(op, 0, sizeof(struct acpi_parse_obj_common));
}
else {
/* Extended parseop */
op = acpi_ut_acquire_from_cache (ACPI_MEM_LIST_PSNODE_EXT);
op = acpi_os_acquire_object (acpi_gbl_ps_node_ext_cache);
memset(op, 0, sizeof(struct acpi_parse_obj_named));
}
/* Initialize the Op */
@ -198,41 +200,14 @@ acpi_ps_free_op (
}
if (op->common.flags & ACPI_PARSEOP_GENERIC) {
acpi_ut_release_to_cache (ACPI_MEM_LIST_PSNODE, op);
acpi_os_release_object (acpi_gbl_ps_node_cache, op);
}
else {
acpi_ut_release_to_cache (ACPI_MEM_LIST_PSNODE_EXT, op);
acpi_os_release_object (acpi_gbl_ps_node_ext_cache, op);
}
}
#ifdef ACPI_ENABLE_OBJECT_CACHE
/*******************************************************************************
*
* FUNCTION: acpi_ps_delete_parse_cache
*
* PARAMETERS: None
*
* RETURN: None
*
* DESCRIPTION: Free all objects that are on the parse cache list.
*
******************************************************************************/
void
acpi_ps_delete_parse_cache (
void)
{
ACPI_FUNCTION_TRACE ("ps_delete_parse_cache");
acpi_ut_delete_generic_cache (ACPI_MEM_LIST_PSNODE);
acpi_ut_delete_generic_cache (ACPI_MEM_LIST_PSNODE_EXT);
return_VOID;
}
#endif
/*******************************************************************************
*
* FUNCTION: Utility functions

View file

@ -97,7 +97,9 @@ acpi_tb_get_table_count (
ACPI_FUNCTION_ENTRY ();
if (RSDP->revision < 2) {
/* RSDT pointers are 32 bits, XSDT pointers are 64 bits */
if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
pointer_size = sizeof (u32);
}
else {
@ -158,7 +160,9 @@ acpi_tb_convert_to_xsdt (
/* Copy the table pointers */
for (i = 0; i < acpi_gbl_rsdt_table_count; i++) {
if (acpi_gbl_RSDP->revision < 2) {
/* RSDT pointers are 32 bits, XSDT pointers are 64 bits */
if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
ACPI_STORE_ADDRESS (new_table->table_offset_entry[i],
(ACPI_CAST_PTR (struct rsdt_descriptor_rev1,
table_info->pointer))->table_offset_entry[i]);

View file

@ -159,8 +159,8 @@ acpi_tb_verify_rsdp (
*
* RETURN: None, Address
*
* DESCRIPTION: Extract the address of the RSDT or XSDT, depending on the
* version of the RSDP
* DESCRIPTION: Extract the address of either the RSDT or XSDT, depending on the
* version of the RSDP and whether the XSDT pointer is valid
*
******************************************************************************/
@ -174,16 +174,19 @@ acpi_tb_get_rsdt_address (
out_address->pointer_type = acpi_gbl_table_flags | ACPI_LOGICAL_ADDRESSING;
/*
* For RSDP revision 0 or 1, we use the RSDT.
* For RSDP revision 2 (and above), we use the XSDT
*/
if (acpi_gbl_RSDP->revision < 2) {
out_address->pointer.value = acpi_gbl_RSDP->rsdt_physical_address;
}
else {
/* Use XSDT if it is present */
if ((acpi_gbl_RSDP->revision >= 2) &&
acpi_gbl_RSDP->xsdt_physical_address) {
out_address->pointer.value =
acpi_gbl_RSDP->xsdt_physical_address;
acpi_gbl_root_table_type = ACPI_TABLE_TYPE_XSDT;
}
else {
/* No XSDT, use the RSDT */
out_address->pointer.value = acpi_gbl_RSDP->rsdt_physical_address;
acpi_gbl_root_table_type = ACPI_TABLE_TYPE_RSDT;
}
}
@ -211,10 +214,9 @@ acpi_tb_validate_rsdt (
/*
* For RSDP revision 0 or 1, we use the RSDT.
* For RSDP revision 2 and above, we use the XSDT
* Search for appropriate signature, RSDT or XSDT
*/
if (acpi_gbl_RSDP->revision < 2) {
if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
no_match = ACPI_STRNCMP ((char *) table_ptr, RSDT_SIG,
sizeof (RSDT_SIG) -1);
}
@ -236,11 +238,11 @@ acpi_tb_validate_rsdt (
acpi_gbl_RSDP->rsdt_physical_address,
(void *) (acpi_native_uint) acpi_gbl_RSDP->rsdt_physical_address));
if (acpi_gbl_RSDP->revision < 2) {
ACPI_REPORT_ERROR (("Looking for RSDT (RSDP->Rev < 2)\n"))
if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
ACPI_REPORT_ERROR (("Looking for RSDT\n"))
}
else {
ACPI_REPORT_ERROR (("Looking for XSDT (RSDP->Rev >= 2)\n"))
ACPI_REPORT_ERROR (("Looking for XSDT\n"))
}
ACPI_DUMP_BUFFER ((char *) table_ptr, 48);

View file

@ -287,9 +287,11 @@ acpi_get_firmware_table (
* requested table
*/
for (i = 0, j = 0; i < table_count; i++) {
/* Get the next table pointer, handle RSDT vs. XSDT */
if (acpi_gbl_RSDP->revision < 2) {
/*
* Get the next table pointer, handle RSDT vs. XSDT
* RSDT pointers are 32 bits, XSDT pointers are 64 bits
*/
if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
address.pointer.value = (ACPI_CAST_PTR (
RSDT_DESCRIPTOR, rsdt_info->pointer))->table_offset_entry[i];
}

View file

@ -3,6 +3,6 @@
#
obj-y := utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
utcopy.o utdelete.o utglobal.o utmath.o utobject.o
utcopy.o utdelete.o utglobal.o utmath.o utobject.o utstate.o utmutex.o utobject.o utcache.o
EXTRA_CFLAGS += $(ACPI_CFLAGS)

View file

@ -1,6 +1,6 @@
/******************************************************************************
*
* Module Name: utalloc - local cache and memory allocation routines
* Module Name: utalloc - local memory allocation routines
*
*****************************************************************************/
@ -52,12 +52,10 @@
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
static struct acpi_debug_mem_block *
acpi_ut_find_allocation (
u32 list_id,
void *allocation);
static acpi_status
acpi_ut_track_allocation (
u32 list_id,
struct acpi_debug_mem_block *address,
acpi_size size,
u8 alloc_type,
@ -67,206 +65,118 @@ acpi_ut_track_allocation (
static acpi_status
acpi_ut_remove_allocation (
u32 list_id,
struct acpi_debug_mem_block *address,
u32 component,
char *module,
u32 line);
#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
/*******************************************************************************
*
* FUNCTION: acpi_ut_release_to_cache
*
* PARAMETERS: list_id - Memory list/cache ID
* Object - The object to be released
*
* RETURN: None
*
* DESCRIPTION: Release an object to the specified cache. If cache is full,
* the object is deleted.
*
******************************************************************************/
void
acpi_ut_release_to_cache (
u32 list_id,
void *object)
{
struct acpi_memory_list *cache_info;
ACPI_FUNCTION_ENTRY ();
cache_info = &acpi_gbl_memory_lists[list_id];
#ifdef ACPI_ENABLE_OBJECT_CACHE
/* If walk cache is full, just free this wallkstate object */
if (cache_info->cache_depth >= cache_info->max_cache_depth) {
ACPI_MEM_FREE (object);
ACPI_MEM_TRACKING (cache_info->total_freed++);
}
/* Otherwise put this object back into the cache */
else {
if (ACPI_FAILURE (acpi_ut_acquire_mutex (ACPI_MTX_CACHES))) {
return;
}
/* Mark the object as cached */
ACPI_MEMSET (object, 0xCA, cache_info->object_size);
ACPI_SET_DESCRIPTOR_TYPE (object, ACPI_DESC_TYPE_CACHED);
/* Put the object at the head of the cache list */
* (ACPI_CAST_INDIRECT_PTR (char,
&(((char *) object)[cache_info->link_offset]))) = cache_info->list_head;
cache_info->list_head = object;
cache_info->cache_depth++;
(void) acpi_ut_release_mutex (ACPI_MTX_CACHES);
}
#else
/* Object cache is disabled; just free the object */
ACPI_MEM_FREE (object);
ACPI_MEM_TRACKING (cache_info->total_freed++);
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
static acpi_status
acpi_ut_create_list (
char *list_name,
u16 object_size,
acpi_handle *return_cache);
#endif
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_acquire_from_cache
* FUNCTION: acpi_ut_create_caches
*
* PARAMETERS: list_id - Memory list ID
* PARAMETERS: None
*
* RETURN: A requested object. NULL if the object could not be
* allocated.
* RETURN: Status
*
* DESCRIPTION: Get an object from the specified cache. If cache is empty,
* the object is allocated.
* DESCRIPTION: Create all local caches
*
******************************************************************************/
void *
acpi_ut_acquire_from_cache (
u32 list_id)
acpi_status
acpi_ut_create_caches (
void)
{
struct acpi_memory_list *cache_info;
void *object;
acpi_status status;
ACPI_FUNCTION_NAME ("ut_acquire_from_cache");
cache_info = &acpi_gbl_memory_lists[list_id];
#ifdef ACPI_ENABLE_OBJECT_CACHE
if (ACPI_FAILURE (acpi_ut_acquire_mutex (ACPI_MTX_CACHES))) {
return (NULL);
}
ACPI_MEM_TRACKING (cache_info->cache_requests++);
/* Check the cache first */
if (cache_info->list_head) {
/* There is an object available, use it */
object = cache_info->list_head;
cache_info->list_head = *(ACPI_CAST_INDIRECT_PTR (char,
&(((char *) object)[cache_info->link_offset])));
ACPI_MEM_TRACKING (cache_info->cache_hits++);
cache_info->cache_depth--;
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Object %p from %s\n",
object, acpi_gbl_memory_lists[list_id].list_name));
#endif
if (ACPI_FAILURE (acpi_ut_release_mutex (ACPI_MTX_CACHES))) {
return (NULL);
}
/* Memory allocation lists */
/* Clear (zero) the previously used Object */
ACPI_MEMSET (object, 0, cache_info->object_size);
status = acpi_ut_create_list ("Acpi-Global", 0,
&acpi_gbl_global_list);
if (ACPI_FAILURE (status)) {
return (status);
}
else {
/* The cache is empty, create a new object */
/* Avoid deadlock with ACPI_MEM_CALLOCATE */
if (ACPI_FAILURE (acpi_ut_release_mutex (ACPI_MTX_CACHES))) {
return (NULL);
}
object = ACPI_MEM_CALLOCATE (cache_info->object_size);
ACPI_MEM_TRACKING (cache_info->total_allocated++);
status = acpi_ut_create_list ("Acpi-Namespace", sizeof (struct acpi_namespace_node),
&acpi_gbl_ns_node_list);
if (ACPI_FAILURE (status)) {
return (status);
}
#else
/* Object cache is disabled; just allocate the object */
object = ACPI_MEM_CALLOCATE (cache_info->object_size);
ACPI_MEM_TRACKING (cache_info->total_allocated++);
#endif
return (object);
/* Object Caches, for frequently used objects */
status = acpi_os_create_cache ("acpi_state", sizeof (union acpi_generic_state),
ACPI_MAX_STATE_CACHE_DEPTH, &acpi_gbl_state_cache);
if (ACPI_FAILURE (status)) {
return (status);
}
status = acpi_os_create_cache ("acpi_parse", sizeof (struct acpi_parse_obj_common),
ACPI_MAX_PARSE_CACHE_DEPTH, &acpi_gbl_ps_node_cache);
if (ACPI_FAILURE (status)) {
return (status);
}
status = acpi_os_create_cache ("acpi_parse_ext", sizeof (struct acpi_parse_obj_named),
ACPI_MAX_EXTPARSE_CACHE_DEPTH, &acpi_gbl_ps_node_ext_cache);
if (ACPI_FAILURE (status)) {
return (status);
}
status = acpi_os_create_cache ("acpi_operand", sizeof (union acpi_operand_object),
ACPI_MAX_OBJECT_CACHE_DEPTH, &acpi_gbl_operand_cache);
if (ACPI_FAILURE (status)) {
return (status);
}
return (AE_OK);
}
#ifdef ACPI_ENABLE_OBJECT_CACHE
/*******************************************************************************
*
* FUNCTION: acpi_ut_delete_generic_cache
* FUNCTION: acpi_ut_delete_caches
*
* PARAMETERS: list_id - Memory list ID
* PARAMETERS: None
*
* RETURN: None
* RETURN: Status
*
* DESCRIPTION: Free all objects within the requested cache.
* DESCRIPTION: Purge and delete all local caches
*
******************************************************************************/
void
acpi_ut_delete_generic_cache (
u32 list_id)
acpi_status
acpi_ut_delete_caches (
void)
{
struct acpi_memory_list *cache_info;
char *next;
(void) acpi_os_delete_cache (acpi_gbl_state_cache);
acpi_gbl_state_cache = NULL;
ACPI_FUNCTION_ENTRY ();
(void) acpi_os_delete_cache (acpi_gbl_operand_cache);
acpi_gbl_operand_cache = NULL;
(void) acpi_os_delete_cache (acpi_gbl_ps_node_cache);
acpi_gbl_ps_node_cache = NULL;
cache_info = &acpi_gbl_memory_lists[list_id];
while (cache_info->list_head) {
/* Delete one cached state object */
(void) acpi_os_delete_cache (acpi_gbl_ps_node_ext_cache);
acpi_gbl_ps_node_ext_cache = NULL;
next = *(ACPI_CAST_INDIRECT_PTR (char,
&(((char *) cache_info->list_head)[cache_info->link_offset])));
ACPI_MEM_FREE (cache_info->list_head);
cache_info->list_head = next;
cache_info->cache_depth--;
}
return (AE_OK);
}
#endif
/*******************************************************************************
*
@ -500,6 +410,43 @@ acpi_ut_callocate (
* occurs in the body of acpi_ut_free.
*/
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_list
*
* PARAMETERS: cache_name - Ascii name for the cache
* object_size - Size of each cached object
* return_cache - Where the new cache object is returned
*
* RETURN: Status
*
* DESCRIPTION: Create a local memory list for tracking purposed
*
******************************************************************************/
static acpi_status
acpi_ut_create_list (
char *list_name,
u16 object_size,
acpi_handle *return_cache)
{
struct acpi_memory_list *cache;
cache = acpi_os_allocate (sizeof (struct acpi_memory_list));
if (!cache) {
return (AE_NO_MEMORY);
}
ACPI_MEMSET (cache, 0, sizeof (struct acpi_memory_list));
cache->list_name = list_name;
cache->object_size = object_size;
*return_cache = cache;
return (AE_OK);
}
/*******************************************************************************
*
@ -533,15 +480,15 @@ acpi_ut_allocate_and_track (
return (NULL);
}
status = acpi_ut_track_allocation (ACPI_MEM_LIST_GLOBAL, allocation, size,
status = acpi_ut_track_allocation (allocation, size,
ACPI_MEM_MALLOC, component, module, line);
if (ACPI_FAILURE (status)) {
acpi_os_free (allocation);
return (NULL);
}
acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].total_allocated++;
acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].current_total_size += (u32) size;
acpi_gbl_global_list->total_allocated++;
acpi_gbl_global_list->current_total_size += (u32) size;
return ((void *) &allocation->user_space);
}
@ -583,15 +530,15 @@ acpi_ut_callocate_and_track (
return (NULL);
}
status = acpi_ut_track_allocation (ACPI_MEM_LIST_GLOBAL, allocation, size,
status = acpi_ut_track_allocation (allocation, size,
ACPI_MEM_CALLOC, component, module, line);
if (ACPI_FAILURE (status)) {
acpi_os_free (allocation);
return (NULL);
}
acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].total_allocated++;
acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].current_total_size += (u32) size;
acpi_gbl_global_list->total_allocated++;
acpi_gbl_global_list->current_total_size += (u32) size;
return ((void *) &allocation->user_space);
}
@ -636,10 +583,10 @@ acpi_ut_free_and_track (
debug_block = ACPI_CAST_PTR (struct acpi_debug_mem_block,
(((char *) allocation) - sizeof (struct acpi_debug_mem_header)));
acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].total_freed++;
acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].current_total_size -= debug_block->size;
acpi_gbl_global_list->total_freed++;
acpi_gbl_global_list->current_total_size -= debug_block->size;
status = acpi_ut_remove_allocation (ACPI_MEM_LIST_GLOBAL, debug_block,
status = acpi_ut_remove_allocation (debug_block,
component, module, line);
if (ACPI_FAILURE (status)) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not free memory, %s\n",
@ -658,8 +605,7 @@ acpi_ut_free_and_track (
*
* FUNCTION: acpi_ut_find_allocation
*
* PARAMETERS: list_id - Memory list to search
* Allocation - Address of allocated memory
* PARAMETERS: Allocation - Address of allocated memory
*
* RETURN: A list element if found; NULL otherwise.
*
@ -669,7 +615,6 @@ acpi_ut_free_and_track (
static struct acpi_debug_mem_block *
acpi_ut_find_allocation (
u32 list_id,
void *allocation)
{
struct acpi_debug_mem_block *element;
@ -678,11 +623,7 @@ acpi_ut_find_allocation (
ACPI_FUNCTION_ENTRY ();
if (list_id > ACPI_MEM_LIST_MAX) {
return (NULL);
}
element = acpi_gbl_memory_lists[list_id].list_head;
element = acpi_gbl_global_list->list_head;
/* Search for the address. */
@ -702,8 +643,7 @@ acpi_ut_find_allocation (
*
* FUNCTION: acpi_ut_track_allocation
*
* PARAMETERS: list_id - Memory list to search
* Allocation - Address of allocated memory
* PARAMETERS: Allocation - Address of allocated memory
* Size - Size of the allocation
* alloc_type - MEM_MALLOC or MEM_CALLOC
* Component - Component type of caller
@ -718,7 +658,6 @@ acpi_ut_find_allocation (
static acpi_status
acpi_ut_track_allocation (
u32 list_id,
struct acpi_debug_mem_block *allocation,
acpi_size size,
u8 alloc_type,
@ -734,11 +673,7 @@ acpi_ut_track_allocation (
ACPI_FUNCTION_TRACE_PTR ("ut_track_allocation", allocation);
if (list_id > ACPI_MEM_LIST_MAX) {
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
mem_list = &acpi_gbl_memory_lists[list_id];
mem_list = acpi_gbl_global_list;
status = acpi_ut_acquire_mutex (ACPI_MTX_MEMORY);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
@ -748,8 +683,7 @@ acpi_ut_track_allocation (
* Search list for this address to make sure it is not already on the list.
* This will catch several kinds of problems.
*/
element = acpi_ut_find_allocation (list_id, allocation);
element = acpi_ut_find_allocation (allocation);
if (element) {
ACPI_REPORT_ERROR ((
"ut_track_allocation: Allocation already present in list! (%p)\n",
@ -793,8 +727,7 @@ acpi_ut_track_allocation (
*
* FUNCTION: acpi_ut_remove_allocation
*
* PARAMETERS: list_id - Memory list to search
* Allocation - Address of allocated memory
* PARAMETERS: Allocation - Address of allocated memory
* Component - Component type of caller
* Module - Source file name of caller
* Line - Line number of caller
@ -807,7 +740,6 @@ acpi_ut_track_allocation (
static acpi_status
acpi_ut_remove_allocation (
u32 list_id,
struct acpi_debug_mem_block *allocation,
u32 component,
char *module,
@ -820,11 +752,7 @@ acpi_ut_remove_allocation (
ACPI_FUNCTION_TRACE ("ut_remove_allocation");
if (list_id > ACPI_MEM_LIST_MAX) {
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
mem_list = &acpi_gbl_memory_lists[list_id];
mem_list = acpi_gbl_global_list;
if (NULL == mem_list->list_head) {
/* No allocations! */
@ -959,7 +887,7 @@ acpi_ut_dump_allocations (
return;
}
element = acpi_gbl_memory_lists[0].list_head;
element = acpi_gbl_global_list->list_head;
while (element) {
if ((element->component & component) &&
((module == NULL) || (0 == ACPI_STRCMP (module, element->module)))) {

View file

@ -0,0 +1,322 @@
/******************************************************************************
*
* Module Name: utcache - local cache allocation routines
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME ("utcache")
#ifdef ACPI_USE_LOCAL_CACHE
/*******************************************************************************
*
* FUNCTION: acpi_os_create_cache
*
* PARAMETERS: cache_name - Ascii name for the cache
* object_size - Size of each cached object
* max_depth - Maximum depth of the cache (in objects)
* return_cache - Where the new cache object is returned
*
* RETURN: Status
*
* DESCRIPTION: Create a cache object
*
******************************************************************************/
acpi_status
acpi_os_create_cache (
char *cache_name,
u16 object_size,
u16 max_depth,
struct acpi_memory_list **return_cache)
{
struct acpi_memory_list *cache;
if (!cache_name || !return_cache || (object_size < 16)) {
return (AE_BAD_PARAMETER);
}
/* Create the cache object */
cache = acpi_os_allocate (sizeof (struct acpi_memory_list));
if (!cache) {
return (AE_NO_MEMORY);
}
/* Populate the cache object and return it */
ACPI_MEMSET (cache, 0, sizeof (struct acpi_memory_list));
cache->link_offset = 8;
cache->list_name = cache_name;
cache->object_size = object_size;
cache->max_depth = max_depth;
*return_cache = cache;
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_os_purge_cache
*
* PARAMETERS: Cache - Handle to cache object
*
* RETURN: Status
*
* DESCRIPTION: Free all objects within the requested cache.
*
******************************************************************************/
acpi_status
acpi_os_purge_cache (
struct acpi_memory_list *cache)
{
char *next;
ACPI_FUNCTION_ENTRY ();
if (!cache) {
return (AE_BAD_PARAMETER);
}
/* Walk the list of objects in this cache */
while (cache->list_head) {
/* Delete and unlink one cached state object */
next = *(ACPI_CAST_INDIRECT_PTR (char,
&(((char *) cache->list_head)[cache->link_offset])));
ACPI_MEM_FREE (cache->list_head);
cache->list_head = next;
cache->current_depth--;
}
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_os_delete_cache
*
* PARAMETERS: Cache - Handle to cache object
*
* RETURN: Status
*
* DESCRIPTION: Free all objects within the requested cache and delete the
* cache object.
*
******************************************************************************/
acpi_status
acpi_os_delete_cache (
struct acpi_memory_list *cache)
{
acpi_status status;
/* Purge all objects in the cache */
status = acpi_os_purge_cache (cache);
if (ACPI_FAILURE (status)) {
return (status);
}
/* Now we can delete the cache object */
acpi_os_free (cache);
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_os_release_object
*
* PARAMETERS: Cache - Handle to cache object
* Object - The object to be released
*
* RETURN: None
*
* DESCRIPTION: Release an object to the specified cache. If cache is full,
* the object is deleted.
*
******************************************************************************/
acpi_status
acpi_os_release_object (
struct acpi_memory_list *cache,
void *object)
{
acpi_status status;
ACPI_FUNCTION_ENTRY ();
if (!cache || !object) {
return (AE_BAD_PARAMETER);
}
/* If cache is full, just free this object */
if (cache->current_depth >= cache->max_depth) {
ACPI_MEM_FREE (object);
ACPI_MEM_TRACKING (cache->total_freed++);
}
/* Otherwise put this object back into the cache */
else {
status = acpi_ut_acquire_mutex (ACPI_MTX_CACHES);
if (ACPI_FAILURE (status)) {
return (status);
}
/* Mark the object as cached */
ACPI_MEMSET (object, 0xCA, cache->object_size);
ACPI_SET_DESCRIPTOR_TYPE (object, ACPI_DESC_TYPE_CACHED);
/* Put the object at the head of the cache list */
* (ACPI_CAST_INDIRECT_PTR (char,
&(((char *) object)[cache->link_offset]))) = cache->list_head;
cache->list_head = object;
cache->current_depth++;
(void) acpi_ut_release_mutex (ACPI_MTX_CACHES);
}
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_os_acquire_object
*
* PARAMETERS: Cache - Handle to cache object
*
* RETURN: the acquired object. NULL on error
*
* DESCRIPTION: Get an object from the specified cache. If cache is empty,
* the object is allocated.
*
******************************************************************************/
void *
acpi_os_acquire_object (
struct acpi_memory_list *cache)
{
acpi_status status;
void *object;
ACPI_FUNCTION_NAME ("ut_acquire_from_cache");
if (!cache) {
return (NULL);
}
status = acpi_ut_acquire_mutex (ACPI_MTX_CACHES);
if (ACPI_FAILURE (status)) {
return (NULL);
}
ACPI_MEM_TRACKING (cache->requests++);
/* Check the cache first */
if (cache->list_head) {
/* There is an object available, use it */
object = cache->list_head;
cache->list_head = *(ACPI_CAST_INDIRECT_PTR (char,
&(((char *) object)[cache->link_offset])));
cache->current_depth--;
ACPI_MEM_TRACKING (cache->hits++);
ACPI_MEM_TRACKING (ACPI_DEBUG_PRINT ((ACPI_DB_EXEC,
"Object %p from %s\n", object, cache->list_name)));
status = acpi_ut_release_mutex (ACPI_MTX_CACHES);
if (ACPI_FAILURE (status)) {
return (NULL);
}
/* Clear (zero) the previously used Object */
ACPI_MEMSET (object, 0, cache->object_size);
}
else {
/* The cache is empty, create a new object */
ACPI_MEM_TRACKING (cache->total_allocated++);
/* Avoid deadlock with ACPI_MEM_CALLOCATE */
status = acpi_ut_release_mutex (ACPI_MTX_CACHES);
if (ACPI_FAILURE (status)) {
return (NULL);
}
object = ACPI_MEM_CALLOCATE (cache->object_size);
if (!object) {
return (NULL);
}
}
return (object);
}
#endif /* ACPI_USE_LOCAL_CACHE */

View file

@ -549,7 +549,7 @@ acpi_ut_dump_buffer (
/* Dump fill spaces */
acpi_os_printf ("%*s", ((display * 2) + 1), " ");
j += display;
j += (acpi_native_uint) display;
continue;
}
@ -584,7 +584,7 @@ acpi_ut_dump_buffer (
break;
}
j += display;
j += (acpi_native_uint) display;
}
/*

View file

@ -820,42 +820,20 @@ void
acpi_ut_init_globals (
void)
{
acpi_status status;
u32 i;
ACPI_FUNCTION_TRACE ("ut_init_globals");
/* Memory allocation and cache lists */
/* Create all memory caches */
ACPI_MEMSET (acpi_gbl_memory_lists, 0, sizeof (struct acpi_memory_list) * ACPI_NUM_MEM_LISTS);
acpi_gbl_memory_lists[ACPI_MEM_LIST_STATE].link_offset = (u16) ACPI_PTR_DIFF (&(((union acpi_generic_state *) NULL)->common.next), NULL);
acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE].link_offset = (u16) ACPI_PTR_DIFF (&(((union acpi_parse_object *) NULL)->common.next), NULL);
acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE_EXT].link_offset = (u16) ACPI_PTR_DIFF (&(((union acpi_parse_object *) NULL)->common.next), NULL);
acpi_gbl_memory_lists[ACPI_MEM_LIST_OPERAND].link_offset = (u16) ACPI_PTR_DIFF (&(((union acpi_operand_object *) NULL)->cache.next), NULL);
acpi_gbl_memory_lists[ACPI_MEM_LIST_WALK].link_offset = (u16) ACPI_PTR_DIFF (&(((struct acpi_walk_state *) NULL)->next), NULL);
acpi_gbl_memory_lists[ACPI_MEM_LIST_NSNODE].object_size = sizeof (struct acpi_namespace_node);
acpi_gbl_memory_lists[ACPI_MEM_LIST_STATE].object_size = sizeof (union acpi_generic_state);
acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE].object_size = sizeof (struct acpi_parse_obj_common);
acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE_EXT].object_size = sizeof (struct acpi_parse_obj_named);
acpi_gbl_memory_lists[ACPI_MEM_LIST_OPERAND].object_size = sizeof (union acpi_operand_object);
acpi_gbl_memory_lists[ACPI_MEM_LIST_WALK].object_size = sizeof (struct acpi_walk_state);
acpi_gbl_memory_lists[ACPI_MEM_LIST_STATE].max_cache_depth = ACPI_MAX_STATE_CACHE_DEPTH;
acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE].max_cache_depth = ACPI_MAX_PARSE_CACHE_DEPTH;
acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE_EXT].max_cache_depth = ACPI_MAX_EXTPARSE_CACHE_DEPTH;
acpi_gbl_memory_lists[ACPI_MEM_LIST_OPERAND].max_cache_depth = ACPI_MAX_OBJECT_CACHE_DEPTH;
acpi_gbl_memory_lists[ACPI_MEM_LIST_WALK].max_cache_depth = ACPI_MAX_WALK_CACHE_DEPTH;
ACPI_MEM_TRACKING (acpi_gbl_memory_lists[ACPI_MEM_LIST_GLOBAL].list_name = "Global Memory Allocation");
ACPI_MEM_TRACKING (acpi_gbl_memory_lists[ACPI_MEM_LIST_NSNODE].list_name = "Namespace Nodes");
ACPI_MEM_TRACKING (acpi_gbl_memory_lists[ACPI_MEM_LIST_STATE].list_name = "State Object Cache");
ACPI_MEM_TRACKING (acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE].list_name = "Parse Node Cache");
ACPI_MEM_TRACKING (acpi_gbl_memory_lists[ACPI_MEM_LIST_PSNODE_EXT].list_name = "Extended Parse Node Cache");
ACPI_MEM_TRACKING (acpi_gbl_memory_lists[ACPI_MEM_LIST_OPERAND].list_name = "Operand Object Cache");
ACPI_MEM_TRACKING (acpi_gbl_memory_lists[ACPI_MEM_LIST_WALK].list_name = "Tree Walk Node Cache");
status = acpi_ut_create_caches ();
if (ACPI_FAILURE (status))
{
return;
}
/* ACPI table structure */

View file

@ -264,7 +264,7 @@ acpi_ut_subsystem_shutdown (
/* Purge the local caches */
(void) acpi_purge_cached_objects ();
(void) acpi_ut_delete_caches ();
/* Debug only - display leftover memory allocation, if any */

View file

@ -49,16 +49,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME ("utmisc")
/* Local prototypes */
static acpi_status
acpi_ut_create_mutex (
acpi_mutex_handle mutex_id);
static acpi_status
acpi_ut_delete_mutex (
acpi_mutex_handle mutex_id);
/*******************************************************************************
*
@ -84,6 +74,10 @@ acpi_ut_strupr (
ACPI_FUNCTION_ENTRY ();
if (!src_string) {
return (NULL);
}
/* Walk entire string, uppercasing the letters */
for (string = src_string; *string; string++) {
@ -541,326 +535,6 @@ acpi_ut_strtoul64 (
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_mutex_initialize
*
* PARAMETERS: None.
*
* RETURN: Status
*
* DESCRIPTION: Create the system mutex objects.
*
******************************************************************************/
acpi_status
acpi_ut_mutex_initialize (
void)
{
u32 i;
acpi_status status;
ACPI_FUNCTION_TRACE ("ut_mutex_initialize");
/*
* Create each of the predefined mutex objects
*/
for (i = 0; i < NUM_MUTEX; i++) {
status = acpi_ut_create_mutex (i);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
}
status = acpi_os_create_lock (&acpi_gbl_gpe_lock);
return_ACPI_STATUS (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_mutex_terminate
*
* PARAMETERS: None.
*
* RETURN: None.
*
* DESCRIPTION: Delete all of the system mutex objects.
*
******************************************************************************/
void
acpi_ut_mutex_terminate (
void)
{
u32 i;
ACPI_FUNCTION_TRACE ("ut_mutex_terminate");
/*
* Delete each predefined mutex object
*/
for (i = 0; i < NUM_MUTEX; i++) {
(void) acpi_ut_delete_mutex (i);
}
acpi_os_delete_lock (acpi_gbl_gpe_lock);
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_mutex
*
* PARAMETERS: mutex_iD - ID of the mutex to be created
*
* RETURN: Status
*
* DESCRIPTION: Create a mutex object.
*
******************************************************************************/
static acpi_status
acpi_ut_create_mutex (
acpi_mutex_handle mutex_id)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_U32 ("ut_create_mutex", mutex_id);
if (mutex_id > MAX_MUTEX) {
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
if (!acpi_gbl_mutex_info[mutex_id].mutex) {
status = acpi_os_create_semaphore (1, 1,
&acpi_gbl_mutex_info[mutex_id].mutex);
acpi_gbl_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED;
acpi_gbl_mutex_info[mutex_id].use_count = 0;
}
return_ACPI_STATUS (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_delete_mutex
*
* PARAMETERS: mutex_iD - ID of the mutex to be deleted
*
* RETURN: Status
*
* DESCRIPTION: Delete a mutex object.
*
******************************************************************************/
static acpi_status
acpi_ut_delete_mutex (
acpi_mutex_handle mutex_id)
{
acpi_status status;
ACPI_FUNCTION_TRACE_U32 ("ut_delete_mutex", mutex_id);
if (mutex_id > MAX_MUTEX) {
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
status = acpi_os_delete_semaphore (acpi_gbl_mutex_info[mutex_id].mutex);
acpi_gbl_mutex_info[mutex_id].mutex = NULL;
acpi_gbl_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED;
return_ACPI_STATUS (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_acquire_mutex
*
* PARAMETERS: mutex_iD - ID of the mutex to be acquired
*
* RETURN: Status
*
* DESCRIPTION: Acquire a mutex object.
*
******************************************************************************/
acpi_status
acpi_ut_acquire_mutex (
acpi_mutex_handle mutex_id)
{
acpi_status status;
u32 this_thread_id;
ACPI_FUNCTION_NAME ("ut_acquire_mutex");
if (mutex_id > MAX_MUTEX) {
return (AE_BAD_PARAMETER);
}
this_thread_id = acpi_os_get_thread_id ();
#ifdef ACPI_MUTEX_DEBUG
{
u32 i;
/*
* Mutex debug code, for internal debugging only.
*
* Deadlock prevention. Check if this thread owns any mutexes of value
* greater than or equal to this one. If so, the thread has violated
* the mutex ordering rule. This indicates a coding error somewhere in
* the ACPI subsystem code.
*/
for (i = mutex_id; i < MAX_MUTEX; i++) {
if (acpi_gbl_mutex_info[i].owner_id == this_thread_id) {
if (i == mutex_id) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Mutex [%s] already acquired by this thread [%X]\n",
acpi_ut_get_mutex_name (mutex_id), this_thread_id));
return (AE_ALREADY_ACQUIRED);
}
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Invalid acquire order: Thread %X owns [%s], wants [%s]\n",
this_thread_id, acpi_ut_get_mutex_name (i),
acpi_ut_get_mutex_name (mutex_id)));
return (AE_ACQUIRE_DEADLOCK);
}
}
}
#endif
ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX,
"Thread %X attempting to acquire Mutex [%s]\n",
this_thread_id, acpi_ut_get_mutex_name (mutex_id)));
status = acpi_os_wait_semaphore (acpi_gbl_mutex_info[mutex_id].mutex,
1, ACPI_WAIT_FOREVER);
if (ACPI_SUCCESS (status)) {
ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Thread %X acquired Mutex [%s]\n",
this_thread_id, acpi_ut_get_mutex_name (mutex_id)));
acpi_gbl_mutex_info[mutex_id].use_count++;
acpi_gbl_mutex_info[mutex_id].owner_id = this_thread_id;
}
else {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Thread %X could not acquire Mutex [%s] %s\n",
this_thread_id, acpi_ut_get_mutex_name (mutex_id),
acpi_format_exception (status)));
}
return (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_release_mutex
*
* PARAMETERS: mutex_iD - ID of the mutex to be released
*
* RETURN: Status
*
* DESCRIPTION: Release a mutex object.
*
******************************************************************************/
acpi_status
acpi_ut_release_mutex (
acpi_mutex_handle mutex_id)
{
acpi_status status;
u32 this_thread_id;
ACPI_FUNCTION_NAME ("ut_release_mutex");
this_thread_id = acpi_os_get_thread_id ();
ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX,
"Thread %X releasing Mutex [%s]\n", this_thread_id,
acpi_ut_get_mutex_name (mutex_id)));
if (mutex_id > MAX_MUTEX) {
return (AE_BAD_PARAMETER);
}
/*
* Mutex must be acquired in order to release it!
*/
if (acpi_gbl_mutex_info[mutex_id].owner_id == ACPI_MUTEX_NOT_ACQUIRED) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Mutex [%s] is not acquired, cannot release\n",
acpi_ut_get_mutex_name (mutex_id)));
return (AE_NOT_ACQUIRED);
}
#ifdef ACPI_MUTEX_DEBUG
{
u32 i;
/*
* Mutex debug code, for internal debugging only.
*
* Deadlock prevention. Check if this thread owns any mutexes of value
* greater than this one. If so, the thread has violated the mutex
* ordering rule. This indicates a coding error somewhere in
* the ACPI subsystem code.
*/
for (i = mutex_id; i < MAX_MUTEX; i++) {
if (acpi_gbl_mutex_info[i].owner_id == this_thread_id) {
if (i == mutex_id) {
continue;
}
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Invalid release order: owns [%s], releasing [%s]\n",
acpi_ut_get_mutex_name (i), acpi_ut_get_mutex_name (mutex_id)));
return (AE_RELEASE_DEADLOCK);
}
}
}
#endif
/* Mark unlocked FIRST */
acpi_gbl_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED;
status = acpi_os_signal_semaphore (acpi_gbl_mutex_info[mutex_id].mutex, 1);
if (ACPI_FAILURE (status)) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Thread %X could not release Mutex [%s] %s\n",
this_thread_id, acpi_ut_get_mutex_name (mutex_id),
acpi_format_exception (status)));
}
else {
ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Thread %X released Mutex [%s]\n",
this_thread_id, acpi_ut_get_mutex_name (mutex_id)));
}
return (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_update_state_and_push
@ -903,361 +577,6 @@ acpi_ut_create_update_state_and_push (
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_pkg_state_and_push
*
* PARAMETERS: Object - Object to be added to the new state
* Action - Increment/Decrement
* state_list - List the state will be added to
*
* RETURN: Status
*
* DESCRIPTION: Create a new state and push it
*
******************************************************************************/
#ifdef ACPI_FUTURE_USAGE
acpi_status
acpi_ut_create_pkg_state_and_push (
void *internal_object,
void *external_object,
u16 index,
union acpi_generic_state **state_list)
{
union acpi_generic_state *state;
ACPI_FUNCTION_ENTRY ();
state = acpi_ut_create_pkg_state (internal_object, external_object, index);
if (!state) {
return (AE_NO_MEMORY);
}
acpi_ut_push_generic_state (state_list, state);
return (AE_OK);
}
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
*
* FUNCTION: acpi_ut_push_generic_state
*
* PARAMETERS: list_head - Head of the state stack
* State - State object to push
*
* RETURN: None
*
* DESCRIPTION: Push a state object onto a state stack
*
******************************************************************************/
void
acpi_ut_push_generic_state (
union acpi_generic_state **list_head,
union acpi_generic_state *state)
{
ACPI_FUNCTION_TRACE ("ut_push_generic_state");
/* Push the state object onto the front of the list (stack) */
state->common.next = *list_head;
*list_head = state;
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_pop_generic_state
*
* PARAMETERS: list_head - Head of the state stack
*
* RETURN: The popped state object
*
* DESCRIPTION: Pop a state object from a state stack
*
******************************************************************************/
union acpi_generic_state *
acpi_ut_pop_generic_state (
union acpi_generic_state **list_head)
{
union acpi_generic_state *state;
ACPI_FUNCTION_TRACE ("ut_pop_generic_state");
/* Remove the state object at the head of the list (stack) */
state = *list_head;
if (state) {
/* Update the list head */
*list_head = state->common.next;
}
return_PTR (state);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_generic_state
*
* PARAMETERS: None
*
* RETURN: The new state object. NULL on failure.
*
* DESCRIPTION: Create a generic state object. Attempt to obtain one from
* the global state cache; If none available, create a new one.
*
******************************************************************************/
union acpi_generic_state *
acpi_ut_create_generic_state (
void)
{
union acpi_generic_state *state;
ACPI_FUNCTION_ENTRY ();
state = acpi_ut_acquire_from_cache (ACPI_MEM_LIST_STATE);
/* Initialize */
if (state) {
state->common.data_type = ACPI_DESC_TYPE_STATE;
}
return (state);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_thread_state
*
* PARAMETERS: None
*
* RETURN: New Thread State. NULL on failure
*
* DESCRIPTION: Create a "Thread State" - a flavor of the generic state used
* to track per-thread info during method execution
*
******************************************************************************/
struct acpi_thread_state *
acpi_ut_create_thread_state (
void)
{
union acpi_generic_state *state;
ACPI_FUNCTION_TRACE ("ut_create_thread_state");
/* Create the generic state object */
state = acpi_ut_create_generic_state ();
if (!state) {
return_PTR (NULL);
}
/* Init fields specific to the update struct */
state->common.data_type = ACPI_DESC_TYPE_STATE_THREAD;
state->thread.thread_id = acpi_os_get_thread_id ();
return_PTR ((struct acpi_thread_state *) state);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_update_state
*
* PARAMETERS: Object - Initial Object to be installed in the state
* Action - Update action to be performed
*
* RETURN: New state object, null on failure
*
* DESCRIPTION: Create an "Update State" - a flavor of the generic state used
* to update reference counts and delete complex objects such
* as packages.
*
******************************************************************************/
union acpi_generic_state *
acpi_ut_create_update_state (
union acpi_operand_object *object,
u16 action)
{
union acpi_generic_state *state;
ACPI_FUNCTION_TRACE_PTR ("ut_create_update_state", object);
/* Create the generic state object */
state = acpi_ut_create_generic_state ();
if (!state) {
return_PTR (NULL);
}
/* Init fields specific to the update struct */
state->common.data_type = ACPI_DESC_TYPE_STATE_UPDATE;
state->update.object = object;
state->update.value = action;
return_PTR (state);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_pkg_state
*
* PARAMETERS: Object - Initial Object to be installed in the state
* Action - Update action to be performed
*
* RETURN: New state object, null on failure
*
* DESCRIPTION: Create a "Package State"
*
******************************************************************************/
union acpi_generic_state *
acpi_ut_create_pkg_state (
void *internal_object,
void *external_object,
u16 index)
{
union acpi_generic_state *state;
ACPI_FUNCTION_TRACE_PTR ("ut_create_pkg_state", internal_object);
/* Create the generic state object */
state = acpi_ut_create_generic_state ();
if (!state) {
return_PTR (NULL);
}
/* Init fields specific to the update struct */
state->common.data_type = ACPI_DESC_TYPE_STATE_PACKAGE;
state->pkg.source_object = (union acpi_operand_object *) internal_object;
state->pkg.dest_object = external_object;
state->pkg.index = index;
state->pkg.num_packages = 1;
return_PTR (state);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_control_state
*
* PARAMETERS: None
*
* RETURN: New state object, null on failure
*
* DESCRIPTION: Create a "Control State" - a flavor of the generic state used
* to support nested IF/WHILE constructs in the AML.
*
******************************************************************************/
union acpi_generic_state *
acpi_ut_create_control_state (
void)
{
union acpi_generic_state *state;
ACPI_FUNCTION_TRACE ("ut_create_control_state");
/* Create the generic state object */
state = acpi_ut_create_generic_state ();
if (!state) {
return_PTR (NULL);
}
/* Init fields specific to the control struct */
state->common.data_type = ACPI_DESC_TYPE_STATE_CONTROL;
state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING;
return_PTR (state);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_delete_generic_state
*
* PARAMETERS: State - The state object to be deleted
*
* RETURN: None
*
* DESCRIPTION: Put a state object back into the global state cache. The object
* is not actually freed at this time.
*
******************************************************************************/
void
acpi_ut_delete_generic_state (
union acpi_generic_state *state)
{
ACPI_FUNCTION_TRACE ("ut_delete_generic_state");
acpi_ut_release_to_cache (ACPI_MEM_LIST_STATE, state);
return_VOID;
}
#ifdef ACPI_ENABLE_OBJECT_CACHE
/*******************************************************************************
*
* FUNCTION: acpi_ut_delete_generic_state_cache
*
* PARAMETERS: None
*
* RETURN: None
*
* DESCRIPTION: Purge the global state object cache. Used during subsystem
* termination.
*
******************************************************************************/
void
acpi_ut_delete_generic_state_cache (
void)
{
ACPI_FUNCTION_TRACE ("ut_delete_generic_state_cache");
acpi_ut_delete_generic_cache (ACPI_MEM_LIST_STATE);
return_VOID;
}
#endif
/*******************************************************************************
*
* FUNCTION: acpi_ut_walk_package_tree

View file

@ -0,0 +1,380 @@
/*******************************************************************************
*
* Module Name: utmutex - local mutex support
*
******************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME ("utmutex")
/* Local prototypes */
static acpi_status
acpi_ut_create_mutex (
acpi_mutex_handle mutex_id);
static acpi_status
acpi_ut_delete_mutex (
acpi_mutex_handle mutex_id);
/*******************************************************************************
*
* FUNCTION: acpi_ut_mutex_initialize
*
* PARAMETERS: None.
*
* RETURN: Status
*
* DESCRIPTION: Create the system mutex objects.
*
******************************************************************************/
acpi_status
acpi_ut_mutex_initialize (
void)
{
u32 i;
acpi_status status;
ACPI_FUNCTION_TRACE ("ut_mutex_initialize");
/*
* Create each of the predefined mutex objects
*/
for (i = 0; i < NUM_MUTEX; i++) {
status = acpi_ut_create_mutex (i);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
}
status = acpi_os_create_lock (&acpi_gbl_gpe_lock);
return_ACPI_STATUS (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_mutex_terminate
*
* PARAMETERS: None.
*
* RETURN: None.
*
* DESCRIPTION: Delete all of the system mutex objects.
*
******************************************************************************/
void
acpi_ut_mutex_terminate (
void)
{
u32 i;
ACPI_FUNCTION_TRACE ("ut_mutex_terminate");
/*
* Delete each predefined mutex object
*/
for (i = 0; i < NUM_MUTEX; i++) {
(void) acpi_ut_delete_mutex (i);
}
acpi_os_delete_lock (acpi_gbl_gpe_lock);
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_mutex
*
* PARAMETERS: mutex_iD - ID of the mutex to be created
*
* RETURN: Status
*
* DESCRIPTION: Create a mutex object.
*
******************************************************************************/
static acpi_status
acpi_ut_create_mutex (
acpi_mutex_handle mutex_id)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_U32 ("ut_create_mutex", mutex_id);
if (mutex_id > MAX_MUTEX) {
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
if (!acpi_gbl_mutex_info[mutex_id].mutex) {
status = acpi_os_create_semaphore (1, 1,
&acpi_gbl_mutex_info[mutex_id].mutex);
acpi_gbl_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED;
acpi_gbl_mutex_info[mutex_id].use_count = 0;
}
return_ACPI_STATUS (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_delete_mutex
*
* PARAMETERS: mutex_iD - ID of the mutex to be deleted
*
* RETURN: Status
*
* DESCRIPTION: Delete a mutex object.
*
******************************************************************************/
static acpi_status
acpi_ut_delete_mutex (
acpi_mutex_handle mutex_id)
{
acpi_status status;
ACPI_FUNCTION_TRACE_U32 ("ut_delete_mutex", mutex_id);
if (mutex_id > MAX_MUTEX) {
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
status = acpi_os_delete_semaphore (acpi_gbl_mutex_info[mutex_id].mutex);
acpi_gbl_mutex_info[mutex_id].mutex = NULL;
acpi_gbl_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED;
return_ACPI_STATUS (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_acquire_mutex
*
* PARAMETERS: mutex_iD - ID of the mutex to be acquired
*
* RETURN: Status
*
* DESCRIPTION: Acquire a mutex object.
*
******************************************************************************/
acpi_status
acpi_ut_acquire_mutex (
acpi_mutex_handle mutex_id)
{
acpi_status status;
u32 this_thread_id;
ACPI_FUNCTION_NAME ("ut_acquire_mutex");
if (mutex_id > MAX_MUTEX) {
return (AE_BAD_PARAMETER);
}
this_thread_id = acpi_os_get_thread_id ();
#ifdef ACPI_MUTEX_DEBUG
{
u32 i;
/*
* Mutex debug code, for internal debugging only.
*
* Deadlock prevention. Check if this thread owns any mutexes of value
* greater than or equal to this one. If so, the thread has violated
* the mutex ordering rule. This indicates a coding error somewhere in
* the ACPI subsystem code.
*/
for (i = mutex_id; i < MAX_MUTEX; i++) {
if (acpi_gbl_mutex_info[i].owner_id == this_thread_id) {
if (i == mutex_id) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Mutex [%s] already acquired by this thread [%X]\n",
acpi_ut_get_mutex_name (mutex_id), this_thread_id));
return (AE_ALREADY_ACQUIRED);
}
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Invalid acquire order: Thread %X owns [%s], wants [%s]\n",
this_thread_id, acpi_ut_get_mutex_name (i),
acpi_ut_get_mutex_name (mutex_id)));
return (AE_ACQUIRE_DEADLOCK);
}
}
}
#endif
ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX,
"Thread %X attempting to acquire Mutex [%s]\n",
this_thread_id, acpi_ut_get_mutex_name (mutex_id)));
status = acpi_os_wait_semaphore (acpi_gbl_mutex_info[mutex_id].mutex,
1, ACPI_WAIT_FOREVER);
if (ACPI_SUCCESS (status)) {
ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Thread %X acquired Mutex [%s]\n",
this_thread_id, acpi_ut_get_mutex_name (mutex_id)));
acpi_gbl_mutex_info[mutex_id].use_count++;
acpi_gbl_mutex_info[mutex_id].owner_id = this_thread_id;
}
else {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Thread %X could not acquire Mutex [%s] %s\n",
this_thread_id, acpi_ut_get_mutex_name (mutex_id),
acpi_format_exception (status)));
}
return (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_release_mutex
*
* PARAMETERS: mutex_iD - ID of the mutex to be released
*
* RETURN: Status
*
* DESCRIPTION: Release a mutex object.
*
******************************************************************************/
acpi_status
acpi_ut_release_mutex (
acpi_mutex_handle mutex_id)
{
acpi_status status;
u32 this_thread_id;
ACPI_FUNCTION_NAME ("ut_release_mutex");
this_thread_id = acpi_os_get_thread_id ();
ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX,
"Thread %X releasing Mutex [%s]\n", this_thread_id,
acpi_ut_get_mutex_name (mutex_id)));
if (mutex_id > MAX_MUTEX) {
return (AE_BAD_PARAMETER);
}
/*
* Mutex must be acquired in order to release it!
*/
if (acpi_gbl_mutex_info[mutex_id].owner_id == ACPI_MUTEX_NOT_ACQUIRED) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Mutex [%s] is not acquired, cannot release\n",
acpi_ut_get_mutex_name (mutex_id)));
return (AE_NOT_ACQUIRED);
}
#ifdef ACPI_MUTEX_DEBUG
{
u32 i;
/*
* Mutex debug code, for internal debugging only.
*
* Deadlock prevention. Check if this thread owns any mutexes of value
* greater than this one. If so, the thread has violated the mutex
* ordering rule. This indicates a coding error somewhere in
* the ACPI subsystem code.
*/
for (i = mutex_id; i < MAX_MUTEX; i++) {
if (acpi_gbl_mutex_info[i].owner_id == this_thread_id) {
if (i == mutex_id) {
continue;
}
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Invalid release order: owns [%s], releasing [%s]\n",
acpi_ut_get_mutex_name (i), acpi_ut_get_mutex_name (mutex_id)));
return (AE_RELEASE_DEADLOCK);
}
}
}
#endif
/* Mark unlocked FIRST */
acpi_gbl_mutex_info[mutex_id].owner_id = ACPI_MUTEX_NOT_ACQUIRED;
status = acpi_os_signal_semaphore (acpi_gbl_mutex_info[mutex_id].mutex, 1);
if (ACPI_FAILURE (status)) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Thread %X could not release Mutex [%s] %s\n",
this_thread_id, acpi_ut_get_mutex_name (mutex_id),
acpi_format_exception (status)));
}
else {
ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Thread %X released Mutex [%s]\n",
this_thread_id, acpi_ut_get_mutex_name (mutex_id)));
}
return (status);
}

View file

@ -338,7 +338,7 @@ acpi_ut_allocate_object_desc_dbg (
ACPI_FUNCTION_TRACE ("ut_allocate_object_desc_dbg");
object = acpi_ut_acquire_from_cache (ACPI_MEM_LIST_OPERAND);
object = acpi_os_acquire_object (acpi_gbl_operand_cache);
if (!object) {
_ACPI_REPORT_ERROR (module_name, line_number, component_id,
("Could not allocate an object descriptor\n"));
@ -347,7 +347,7 @@ acpi_ut_allocate_object_desc_dbg (
}
/* Mark the descriptor type */
memset(object, 0, sizeof(union acpi_operand_object));
ACPI_SET_DESCRIPTOR_TYPE (object, ACPI_DESC_TYPE_OPERAND);
ACPI_DEBUG_PRINT ((ACPI_DB_ALLOCATIONS, "%p Size %X\n",
@ -385,39 +385,11 @@ acpi_ut_delete_object_desc (
return_VOID;
}
acpi_ut_release_to_cache (ACPI_MEM_LIST_OPERAND, object);
(void) acpi_os_release_object (acpi_gbl_operand_cache, object);
return_VOID;
}
#ifdef ACPI_ENABLE_OBJECT_CACHE
/*******************************************************************************
*
* FUNCTION: acpi_ut_delete_object_cache
*
* PARAMETERS: None
*
* RETURN: None
*
* DESCRIPTION: Purge the global state object cache. Used during subsystem
* termination.
*
******************************************************************************/
void
acpi_ut_delete_object_cache (
void)
{
ACPI_FUNCTION_TRACE ("ut_delete_object_cache");
acpi_ut_delete_generic_cache (ACPI_MEM_LIST_OPERAND);
return_VOID;
}
#endif
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_simple_object_size

View file

@ -0,0 +1,376 @@
/*******************************************************************************
*
* Module Name: utstate - state object support procedures
*
******************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME ("utstate")
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_pkg_state_and_push
*
* PARAMETERS: Object - Object to be added to the new state
* Action - Increment/Decrement
* state_list - List the state will be added to
*
* RETURN: Status
*
* DESCRIPTION: Create a new state and push it
*
******************************************************************************/
acpi_status
acpi_ut_create_pkg_state_and_push (
void *internal_object,
void *external_object,
u16 index,
union acpi_generic_state **state_list)
{
union acpi_generic_state *state;
ACPI_FUNCTION_ENTRY ();
state = acpi_ut_create_pkg_state (internal_object, external_object, index);
if (!state) {
return (AE_NO_MEMORY);
}
acpi_ut_push_generic_state (state_list, state);
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_push_generic_state
*
* PARAMETERS: list_head - Head of the state stack
* State - State object to push
*
* RETURN: None
*
* DESCRIPTION: Push a state object onto a state stack
*
******************************************************************************/
void
acpi_ut_push_generic_state (
union acpi_generic_state **list_head,
union acpi_generic_state *state)
{
ACPI_FUNCTION_TRACE ("ut_push_generic_state");
/* Push the state object onto the front of the list (stack) */
state->common.next = *list_head;
*list_head = state;
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_pop_generic_state
*
* PARAMETERS: list_head - Head of the state stack
*
* RETURN: The popped state object
*
* DESCRIPTION: Pop a state object from a state stack
*
******************************************************************************/
union acpi_generic_state *
acpi_ut_pop_generic_state (
union acpi_generic_state **list_head)
{
union acpi_generic_state *state;
ACPI_FUNCTION_TRACE ("ut_pop_generic_state");
/* Remove the state object at the head of the list (stack) */
state = *list_head;
if (state) {
/* Update the list head */
*list_head = state->common.next;
}
return_PTR (state);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_generic_state
*
* PARAMETERS: None
*
* RETURN: The new state object. NULL on failure.
*
* DESCRIPTION: Create a generic state object. Attempt to obtain one from
* the global state cache; If none available, create a new one.
*
******************************************************************************/
union acpi_generic_state *
acpi_ut_create_generic_state (
void)
{
union acpi_generic_state *state;
ACPI_FUNCTION_ENTRY ();
state = acpi_os_acquire_object (acpi_gbl_state_cache);
if (state) {
/* Initialize */
memset(state, 0, sizeof(union acpi_generic_state));
state->common.data_type = ACPI_DESC_TYPE_STATE;
}
return (state);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_thread_state
*
* PARAMETERS: None
*
* RETURN: New Thread State. NULL on failure
*
* DESCRIPTION: Create a "Thread State" - a flavor of the generic state used
* to track per-thread info during method execution
*
******************************************************************************/
struct acpi_thread_state *
acpi_ut_create_thread_state (
void)
{
union acpi_generic_state *state;
ACPI_FUNCTION_TRACE ("ut_create_thread_state");
/* Create the generic state object */
state = acpi_ut_create_generic_state ();
if (!state) {
return_PTR (NULL);
}
/* Init fields specific to the update struct */
state->common.data_type = ACPI_DESC_TYPE_STATE_THREAD;
state->thread.thread_id = acpi_os_get_thread_id ();
return_PTR ((struct acpi_thread_state *) state);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_update_state
*
* PARAMETERS: Object - Initial Object to be installed in the state
* Action - Update action to be performed
*
* RETURN: New state object, null on failure
*
* DESCRIPTION: Create an "Update State" - a flavor of the generic state used
* to update reference counts and delete complex objects such
* as packages.
*
******************************************************************************/
union acpi_generic_state *
acpi_ut_create_update_state (
union acpi_operand_object *object,
u16 action)
{
union acpi_generic_state *state;
ACPI_FUNCTION_TRACE_PTR ("ut_create_update_state", object);
/* Create the generic state object */
state = acpi_ut_create_generic_state ();
if (!state) {
return_PTR (NULL);
}
/* Init fields specific to the update struct */
state->common.data_type = ACPI_DESC_TYPE_STATE_UPDATE;
state->update.object = object;
state->update.value = action;
return_PTR (state);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_pkg_state
*
* PARAMETERS: Object - Initial Object to be installed in the state
* Action - Update action to be performed
*
* RETURN: New state object, null on failure
*
* DESCRIPTION: Create a "Package State"
*
******************************************************************************/
union acpi_generic_state *
acpi_ut_create_pkg_state (
void *internal_object,
void *external_object,
u16 index)
{
union acpi_generic_state *state;
ACPI_FUNCTION_TRACE_PTR ("ut_create_pkg_state", internal_object);
/* Create the generic state object */
state = acpi_ut_create_generic_state ();
if (!state) {
return_PTR (NULL);
}
/* Init fields specific to the update struct */
state->common.data_type = ACPI_DESC_TYPE_STATE_PACKAGE;
state->pkg.source_object = (union acpi_operand_object *) internal_object;
state->pkg.dest_object = external_object;
state->pkg.index = index;
state->pkg.num_packages = 1;
return_PTR (state);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_control_state
*
* PARAMETERS: None
*
* RETURN: New state object, null on failure
*
* DESCRIPTION: Create a "Control State" - a flavor of the generic state used
* to support nested IF/WHILE constructs in the AML.
*
******************************************************************************/
union acpi_generic_state *
acpi_ut_create_control_state (
void)
{
union acpi_generic_state *state;
ACPI_FUNCTION_TRACE ("ut_create_control_state");
/* Create the generic state object */
state = acpi_ut_create_generic_state ();
if (!state) {
return_PTR (NULL);
}
/* Init fields specific to the control struct */
state->common.data_type = ACPI_DESC_TYPE_STATE_CONTROL;
state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING;
return_PTR (state);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_delete_generic_state
*
* PARAMETERS: State - The state object to be deleted
*
* RETURN: None
*
* DESCRIPTION: Put a state object back into the global state cache. The object
* is not actually freed at this time.
*
******************************************************************************/
void
acpi_ut_delete_generic_state (
union acpi_generic_state *state)
{
ACPI_FUNCTION_TRACE ("ut_delete_generic_state");
(void) acpi_os_release_object (acpi_gbl_state_cache, state);
return_VOID;
}

View file

@ -46,8 +46,6 @@
#include <acpi/acpi.h>
#include <acpi/acevents.h>
#include <acpi/acnamesp.h>
#include <acpi/acparser.h>
#include <acpi/acdispat.h>
#include <acpi/acdebug.h>
#define _COMPONENT ACPI_UTILITIES
@ -79,11 +77,6 @@ acpi_initialize_subsystem (
ACPI_DEBUG_EXEC (acpi_ut_init_stack_ptr_trace ());
/* Initialize all globals used by the subsystem */
acpi_ut_init_globals ();
/* Initialize the OS-Dependent layer */
status = acpi_os_initialize ();
@ -93,6 +86,10 @@ acpi_initialize_subsystem (
return_ACPI_STATUS (status);
}
/* Initialize all globals used by the subsystem */
acpi_ut_init_globals ();
/* Create the default mutex objects */
status = acpi_ut_mutex_initialize ();
@ -522,13 +519,9 @@ acpi_purge_cached_objects (
{
ACPI_FUNCTION_TRACE ("acpi_purge_cached_objects");
#ifdef ACPI_ENABLE_OBJECT_CACHE
acpi_ut_delete_generic_state_cache ();
acpi_ut_delete_object_cache ();
acpi_ds_delete_walk_state_cache ();
acpi_ps_delete_parse_cache ();
#endif
(void) acpi_os_purge_cache (acpi_gbl_state_cache);
(void) acpi_os_purge_cache (acpi_gbl_operand_cache);
(void) acpi_os_purge_cache (acpi_gbl_ps_node_cache);
(void) acpi_os_purge_cache (acpi_gbl_ps_node_ext_cache);
return_ACPI_STATUS (AE_OK);
}

View file

@ -64,7 +64,7 @@
/* Version string */
#define ACPI_CA_VERSION 0x20050526
#define ACPI_CA_VERSION 0x20050624
/*
* OS name, used for the _OS object. The _OS object is essentially obsolete,
@ -78,11 +78,10 @@
/* Maximum objects in the various object caches */
#define ACPI_MAX_STATE_CACHE_DEPTH 64 /* State objects */
#define ACPI_MAX_STATE_CACHE_DEPTH 96 /* State objects */
#define ACPI_MAX_PARSE_CACHE_DEPTH 96 /* Parse tree objects */
#define ACPI_MAX_EXTPARSE_CACHE_DEPTH 64 /* Parse tree objects */
#define ACPI_MAX_OBJECT_CACHE_DEPTH 64 /* Interpreter operand objects */
#define ACPI_MAX_WALK_CACHE_DEPTH 4 /* Objects for parse tree walks */
#define ACPI_MAX_EXTPARSE_CACHE_DEPTH 96 /* Parse tree objects */
#define ACPI_MAX_OBJECT_CACHE_DEPTH 96 /* Interpreter operand objects */
/*
* Should the subystem abort the loading of an ACPI table if the

View file

@ -113,6 +113,10 @@ void
acpi_db_set_method_call_breakpoint (
union acpi_parse_object *op);
void
acpi_db_get_bus_info (
void);
void
acpi_db_disassemble_aml (
char *statements,
@ -327,7 +331,7 @@ acpi_db_set_output_destination (
u32 where);
void
acpi_db_dump_object (
acpi_db_dump_external_object (
union acpi_object *obj_desc,
u32 level);

View file

@ -90,6 +90,7 @@ struct acpi_op_walk_info
{
u32 level;
u32 bit_offset;
struct acpi_walk_state *walk_state;
};
typedef

View file

@ -450,10 +450,4 @@ acpi_ds_result_pop_from_bottom (
union acpi_operand_object **object,
struct acpi_walk_state *walk_state);
#ifdef ACPI_ENABLE_OBJECT_CACHE
void
acpi_ds_delete_walk_state_cache (
void);
#endif
#endif /* _ACDISPAT_H_ */

View file

@ -122,8 +122,7 @@ acpi_ev_valid_gpe_event (
acpi_status
acpi_ev_walk_gpe_list (
ACPI_GPE_CALLBACK gpe_walk_callback,
u32 flags);
ACPI_GPE_CALLBACK gpe_walk_callback);
acpi_status
acpi_ev_delete_gpe_handlers (

View file

@ -151,6 +151,13 @@ ACPI_EXTERN struct acpi_common_facs acpi_gbl_common_fACS;
*/
/* The root table can be either an RSDT or an XSDT */
ACPI_EXTERN u8 acpi_gbl_root_table_type;
#define ACPI_TABLE_TYPE_RSDT 'R'
#define ACPI_TABLE_TYPE_XSDT 'X'
/*
* Handle both ACPI 1.0 and ACPI 2.0 Integer widths:
* If we are executing a method that exists in a 32-bit ACPI table,
@ -180,8 +187,23 @@ ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[NUM_MUTEX];
*
****************************************************************************/
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
/* Lists for tracking memory allocations */
ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list;
ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
#endif
/* Object caches */
ACPI_EXTERN acpi_cache_t *acpi_gbl_state_cache;
ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_cache;
ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_ext_cache;
ACPI_EXTERN acpi_cache_t *acpi_gbl_operand_cache;
/* Global handlers */
ACPI_EXTERN struct acpi_memory_list acpi_gbl_memory_lists[ACPI_NUM_MEM_LISTS];
ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_device_notify;
ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_system_notify;
ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler;
@ -189,6 +211,8 @@ ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
ACPI_EXTERN acpi_handle acpi_gbl_global_lock_semaphore;
/* Misc */
ACPI_EXTERN u32 acpi_gbl_global_lock_thread_count;
ACPI_EXTERN u32 acpi_gbl_original_mode;
ACPI_EXTERN u32 acpi_gbl_rsdp_original_location;

View file

@ -143,15 +143,15 @@ acpi_hw_get_gpe_status (
acpi_status
acpi_hw_disable_all_gpes (
u32 flags);
void);
acpi_status
acpi_hw_enable_all_runtime_gpes (
u32 flags);
void);
acpi_status
acpi_hw_enable_all_wakeup_gpes (
u32 flags);
void);
acpi_status
acpi_hw_enable_runtime_gpe_block (

View file

@ -953,24 +953,18 @@ struct acpi_debug_mem_block
#define ACPI_MEM_LIST_GLOBAL 0
#define ACPI_MEM_LIST_NSNODE 1
#define ACPI_MEM_LIST_FIRST_CACHE_LIST 2
#define ACPI_MEM_LIST_STATE 2
#define ACPI_MEM_LIST_PSNODE 3
#define ACPI_MEM_LIST_PSNODE_EXT 4
#define ACPI_MEM_LIST_OPERAND 5
#define ACPI_MEM_LIST_WALK 6
#define ACPI_MEM_LIST_MAX 6
#define ACPI_NUM_MEM_LISTS 7
#define ACPI_MEM_LIST_MAX 1
#define ACPI_NUM_MEM_LISTS 2
struct acpi_memory_list
{
char *list_name;
void *list_head;
u16 link_offset;
u16 max_cache_depth;
u16 cache_depth;
u16 object_size;
u16 max_depth;
u16 current_depth;
u16 link_offset;
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
@ -979,11 +973,9 @@ struct acpi_memory_list
u32 total_allocated;
u32 total_freed;
u32 current_total_size;
u32 cache_requests;
u32 cache_hits;
char *list_name;
u32 requests;
u32 hits;
#endif
};
#endif /* __ACLOCAL_H__ */

View file

@ -63,6 +63,7 @@
#define ACPI_PARSE_MODE_MASK 0x0030
#define ACPI_PARSE_DEFERRED_OP 0x0100
#define ACPI_PARSE_DISASSEMBLE 0x0200
/******************************************************************************
@ -158,6 +159,25 @@ u16
acpi_ps_peek_opcode (
struct acpi_parse_state *state);
acpi_status
acpi_ps_complete_this_op (
struct acpi_walk_state *walk_state,
union acpi_parse_object *op);
acpi_status
acpi_ps_next_parse_state (
struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
acpi_status callback_status);
/*
* psloop - main parse loop
*/
acpi_status
acpi_ps_parse_loop (
struct acpi_walk_state *walk_state);
/*
* psscope - Scope stack management routines
@ -291,12 +311,6 @@ acpi_ps_set_name(
union acpi_parse_object *op,
u32 name);
#ifdef ACPI_ENABLE_OBJECT_CACHE
void
acpi_ps_delete_parse_cache (
void);
#endif
/*
* psdump - display parser tree

View file

@ -139,15 +139,14 @@ void
acpi_os_delete_lock (
acpi_handle handle);
void
unsigned long
acpi_os_acquire_lock (
acpi_handle handle,
u32 flags);
acpi_handle handle);
void
acpi_os_release_lock (
acpi_handle handle,
u32 flags);
unsigned long flags);
/*
@ -180,6 +179,34 @@ acpi_os_get_physical_address (
#endif
/*
* Memory/Object Cache
*/
acpi_status
acpi_os_create_cache (
char *cache_name,
u16 object_size,
u16 max_depth,
acpi_cache_t **return_cache);
acpi_status
acpi_os_delete_cache (
acpi_cache_t *cache);
acpi_status
acpi_os_purge_cache (
acpi_cache_t *cache);
void *
acpi_os_acquire_object (
acpi_cache_t *cache);
acpi_status
acpi_os_release_object (
acpi_cache_t *cache,
void *object);
/*
* Interrupt handlers
*/

View file

@ -162,6 +162,9 @@ struct acpi_walk_info
#define ACPI_DISPLAY_SUMMARY 0
#define ACPI_DISPLAY_OBJECTS 1
#define ACPI_DISPLAY_MASK 1
#define ACPI_DISPLAY_SHORT 2
struct acpi_get_devices_info
{

View file

@ -243,6 +243,11 @@ struct acpi_pointer
#define ACPI_LOGMODE_PHYSPTR ACPI_LOGICAL_ADDRESSING | ACPI_PHYSICAL_POINTER
#define ACPI_LOGMODE_LOGPTR ACPI_LOGICAL_ADDRESSING | ACPI_LOGICAL_POINTER
/* Types for the OS interface layer (OSL) */
#ifdef ACPI_USE_LOCAL_CACHE
#define acpi_cache_t struct acpi_memory_list
#endif
/*
* Useful defines

View file

@ -557,16 +557,6 @@ void
acpi_ut_delete_generic_state (
union acpi_generic_state *state);
#ifdef ACPI_ENABLE_OBJECT_CACHE
void
acpi_ut_delete_generic_state_cache (
void);
void
acpi_ut_delete_object_cache (
void);
#endif
/*
* utmath
@ -622,22 +612,6 @@ acpi_ut_strtoul64 (
#define ACPI_ANY_BASE 0
acpi_status
acpi_ut_mutex_initialize (
void);
void
acpi_ut_mutex_terminate (
void);
acpi_status
acpi_ut_acquire_mutex (
acpi_mutex_handle mutex_id);
acpi_status
acpi_ut_release_mutex (
acpi_mutex_handle mutex_id);
u8 *
acpi_ut_get_resource_end_tag (
union acpi_operand_object *obj_desc);
@ -665,23 +639,36 @@ acpi_ut_display_init_pathname (
#endif
/*
* utmutex - mutex support
*/
acpi_status
acpi_ut_mutex_initialize (
void);
void
acpi_ut_mutex_terminate (
void);
acpi_status
acpi_ut_acquire_mutex (
acpi_mutex_handle mutex_id);
acpi_status
acpi_ut_release_mutex (
acpi_mutex_handle mutex_id);
/*
* utalloc - memory allocation and object caching
*/
void *
acpi_ut_acquire_from_cache (
u32 list_id);
acpi_status
acpi_ut_create_caches (
void);
void
acpi_ut_release_to_cache (
u32 list_id,
void *object);
#ifdef ACPI_ENABLE_OBJECT_CACHE
void
acpi_ut_delete_generic_cache (
u32 list_id);
#endif
acpi_status
acpi_ut_delete_caches (
void);
acpi_status
acpi_ut_validate_buffer (

View file

@ -69,7 +69,7 @@
#define AML_MULTI_NAME_PREFIX_OP (u16) 0x2f
#define AML_NAME_CHAR_SUBSEQ (u16) 0x30
#define AML_NAME_CHAR_FIRST (u16) 0x41
#define AML_OP_PREFIX (u16) 0x5b
#define AML_EXTENDED_OP_PREFIX (u16) 0x5b
#define AML_ROOT_PREFIX (u16) 0x5c
#define AML_PARENT_PREFIX (u16) 0x5e
#define AML_LOCAL_OP (u16) 0x60
@ -146,7 +146,7 @@
/* prefixed opcodes */
#define AML_EXTOP (u16) 0x005b /* prefix for 2-byte opcodes */
#define AML_EXTENDED_OPCODE (u16) 0x5b00 /* prefix for 2-byte opcodes */
#define AML_MUTEX_OP (u16) 0x5b01
#define AML_EVENT_OP (u16) 0x5b02

View file

@ -49,35 +49,38 @@
* Configuration for ACPI tools and utilities
*/
#ifdef _ACPI_DUMP_APP
#ifdef ACPI_LIBRARY
#define ACPI_USE_LOCAL_CACHE
#endif
#ifdef ACPI_DUMP_APP
#ifndef MSDOS
#define ACPI_DEBUG_OUTPUT
#endif
#define ACPI_APPLICATION
#define ACPI_DISASSEMBLER
#define ACPI_NO_METHOD_EXECUTION
#define ACPI_USE_SYSTEM_CLIBRARY
#define ACPI_ENABLE_OBJECT_CACHE
#endif
#ifdef _ACPI_EXEC_APP
#ifdef ACPI_EXEC_APP
#undef DEBUGGER_THREADING
#define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED
#define ACPI_DEBUG_OUTPUT
#define ACPI_APPLICATION
#define ACPI_DEBUGGER
#define ACPI_DISASSEMBLER
#define ACPI_USE_SYSTEM_CLIBRARY
#define ACPI_ENABLE_OBJECT_CACHE
#endif
#ifdef _ACPI_ASL_COMPILER
#ifdef ACPI_ASL_COMPILER
#define ACPI_DEBUG_OUTPUT
#define ACPI_APPLICATION
#define ACPI_DISASSEMBLER
#define ACPI_CONSTANT_EVAL_ONLY
#endif
#ifdef ACPI_APPLICATION
#define ACPI_USE_SYSTEM_CLIBRARY
#define ACPI_ENABLE_OBJECT_CACHE
#define ACPI_USE_LOCAL_CACHE
#endif
/*

View file

@ -62,6 +62,17 @@
#define ACPI_MACHINE_WIDTH BITS_PER_LONG
/* Type(s) for the OSL */
#ifdef ACPI_USE_LOCAL_CACHE
#define acpi_cache_t struct acpi_memory_list
#else
#include <linux/slab.h>
#define acpi_cache_t kmem_cache_t
#endif
#else /* !__KERNEL__ */
#include <stdarg.h>