* If this is the last thread executing the method,
* we have additional cleanup to perform
*/
- status = acpi_ut_acquire_mutex(ACPI_MTX_PARSER);
+ status = acpi_ut_acquire_mutex(ACPI_MTX_CONTROL_METHOD);
if (ACPI_FAILURE(status)) {
return_VOID;
}
}
exit:
- (void)acpi_ut_release_mutex(ACPI_MTX_PARSER);
+ (void)acpi_ut_release_mutex(ACPI_MTX_CONTROL_METHOD);
return_VOID;
}
u32 status_reg;
u32 enable_reg;
acpi_cpu_flags flags;
+ acpi_cpu_flags hw_flags;
acpi_native_uint i;
acpi_native_uint j;
return (int_status);
}
- /* Examine all GPE blocks attached to this interrupt level */
+ /* We need to hold the GPE lock now, hardware lock in the loop */
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Examine all GPE blocks attached to this interrupt level */
+
gpe_block = gpe_xrupt_list->gpe_block_list_head;
while (gpe_block) {
/*
gpe_register_info = &gpe_block->register_info[i];
+ hw_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+
/* Read the Status Register */
status =
&gpe_register_info->
status_address);
if (ACPI_FAILURE(status)) {
+ acpi_os_release_lock(acpi_gbl_hardware_lock,
+ hw_flags);
goto unlock_and_exit;
}
&enable_reg,
&gpe_register_info->
enable_address);
+ acpi_os_release_lock(acpi_gbl_hardware_lock, hw_flags);
+
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
{
struct acpi_gpe_event_info *gpe_event_info = (void *)context;
- u32 gpe_number = 0;
acpi_status status;
struct acpi_gpe_event_info local_gpe_event_info;
struct acpi_evaluate_info *info;
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "While evaluating method [%4.4s] for GPE[%2X]",
+ "While evaluating GPE method [%4.4s]",
acpi_ut_get_node_name
(local_gpe_event_info.dispatch.
- method_node), gpe_number));
+ method_node)));
}
}
{
acpi_status status;
acpi_integer mask;
+ acpi_integer width_mask;
acpi_integer merged_datum;
acpi_integer raw_datum = 0;
u32 field_offset = 0;
/* Compute the number of datums (access width data items) */
+ width_mask =
+ ACPI_MASK_BITS_ABOVE(obj_desc->common_field.access_bit_width);
mask =
- ACPI_MASK_BITS_BELOW(obj_desc->common_field.start_field_bit_offset);
+ width_mask & ACPI_MASK_BITS_BELOW(obj_desc->common_field.
+ start_field_bit_offset);
datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
obj_desc->common_field.access_bit_width);
merged_datum = raw_datum >>
(obj_desc->common_field.access_bit_width -
obj_desc->common_field.start_field_bit_offset);
- mask = ACPI_INTEGER_MAX;
+ mask = width_mask;
if (i == datum_count) {
break;
/* Since the bit position is one-based, subtract from 33 (65) */
- return_desc->integer.value = temp32 == 0 ? 0 :
- (ACPI_INTEGER_BIT_SIZE + 1) - temp32;
+ return_desc->integer.value =
+ temp32 ==
+ 0 ? 0 : (ACPI_INTEGER_BIT_SIZE + 1) - temp32;
break;
case AML_FROM_BCD_OP: /* from_bcd (BCDValue, Result) */
if (ACPI_FAILURE(status)) {
goto cleanup;
}
+
/* Allocate a descriptor to hold the type. */
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
acpi_ut_add_reference
(return_desc);
}
-
break;
default:
if (ACPI_GET_DESCRIPTOR_TYPE(return_desc) ==
ACPI_DESC_TYPE_NAMED) {
-
return_desc =
acpi_ns_get_attached_object((struct
acpi_namespace_node
default:
ACPI_ERROR((AE_INFO,
- "Unknown opcode in ref(%p) - %X",
+ "Unknown opcode in reference(%p) - %X",
operand[0],
operand[0]->reference.opcode));
/*
* Several object types require no further processing:
- * 1) Devices rarely have an attached object, return the Node
+ * 1) Device/Thermal objects don't have a "real" subobject, return the Node
* 2) Method locals and arguments have a pseudo-Node
*/
- if (entry_type == ACPI_TYPE_DEVICE ||
+ if ((entry_type == ACPI_TYPE_DEVICE) ||
+ (entry_type == ACPI_TYPE_THERMAL) ||
(node->flags & (ANOBJ_METHOD_ARG | ANOBJ_METHOD_LOCAL))) {
return_ACPI_STATUS(AE_OK);
}
case ACPI_TYPE_METHOD:
case ACPI_TYPE_POWER:
case ACPI_TYPE_PROCESSOR:
- case ACPI_TYPE_THERMAL:
case ACPI_TYPE_EVENT:
case ACPI_TYPE_REGION:
case AML_INT_NAMEPATH_OP: /* Reference to a named object */
- /* Get the object pointed to by the namespace node */
+ /* Dereference the name */
+
+ if ((stack_desc->reference.node->type ==
+ ACPI_TYPE_DEVICE)
+ || (stack_desc->reference.node->type ==
+ ACPI_TYPE_THERMAL)) {
+
+ /* These node types do not have 'real' subobjects */
+
+ *stack_ptr = (void *)stack_desc->reference.node;
+ } else {
+ /* Get the object pointed to by the namespace node */
+
+ *stack_ptr =
+ (stack_desc->reference.node)->object;
+ acpi_ut_add_reference(*stack_ptr);
+ }
- *stack_ptr = (stack_desc->reference.node)->object;
- acpi_ut_add_reference(*stack_ptr);
acpi_ut_remove_reference(stack_desc);
break;
ACPI_FUNCTION_TRACE(ex_enter_interpreter);
- status = acpi_ut_acquire_mutex(ACPI_MTX_EXECUTE);
+ status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not acquire interpreter mutex"));
}
ACPI_FUNCTION_TRACE(ex_exit_interpreter);
- status = acpi_ut_release_mutex(ACPI_MTX_EXECUTE);
+ status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not release interpreter mutex"));
}
* DESCRIPTION: Clears all fixed and general purpose status bits
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
+ * NOTE: TBD: Flags parameter is obsolete, to be removed
+ *
******************************************************************************/
acpi_status acpi_hw_clear_acpi_status(u32 flags)
{
acpi_status status;
+ acpi_cpu_flags lock_flags = 0;
ACPI_FUNCTION_TRACE(hw_clear_acpi_status);
ACPI_BITMASK_ALL_FIXED_STATUS,
(u16) acpi_gbl_FADT->xpm1a_evt_blk.address));
- if (flags & ACPI_MTX_LOCK) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
+ lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1_STATUS,
status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block);
unlock_and_exit:
- if (flags & ACPI_MTX_LOCK) {
- (void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
- }
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
return_ACPI_STATUS(status);
}
*
* DESCRIPTION: ACPI bit_register read function.
*
+ * NOTE: TBD: Flags parameter is obsolete, to be removed
+ *
******************************************************************************/
acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags)
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- if (flags & ACPI_MTX_LOCK) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
/* Read from the register */
- status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
+ status = acpi_hw_register_read(ACPI_MTX_LOCK,
bit_reg_info->parent_register,
®ister_value);
- if (flags & ACPI_MTX_LOCK) {
- (void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
- }
-
if (ACPI_SUCCESS(status)) {
/* Normalize the value that was read */
*
* DESCRIPTION: ACPI Bit Register write function.
*
+ * NOTE: TBD: Flags parameter is obsolete, to be removed
+ *
******************************************************************************/
acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
{
u32 register_value = 0;
struct acpi_bit_register_info *bit_reg_info;
acpi_status status;
+ acpi_cpu_flags lock_flags;
ACPI_FUNCTION_TRACE_U32(acpi_set_register, register_id);
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- if (flags & ACPI_MTX_LOCK) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
+ lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
/* Always do a register read first so we can insert the new bits */
unlock_and_exit:
- if (flags & ACPI_MTX_LOCK) {
- (void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
- }
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
/* Normalize the value that was read */
u32 value1 = 0;
u32 value2 = 0;
acpi_status status;
+ acpi_cpu_flags lock_flags = 0;
ACPI_FUNCTION_TRACE(hw_register_read);
if (ACPI_MTX_LOCK == use_lock) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
+ lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
}
switch (register_id) {
unlock_and_exit:
if (ACPI_MTX_LOCK == use_lock) {
- (void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
}
if (ACPI_SUCCESS(status)) {
acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
{
acpi_status status;
+ acpi_cpu_flags lock_flags = 0;
ACPI_FUNCTION_TRACE(hw_register_write);
if (ACPI_MTX_LOCK == use_lock) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
+ lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
}
switch (register_id) {
unlock_and_exit:
if (ACPI_MTX_LOCK == use_lock) {
- (void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
}
return_ACPI_STATUS(status);
ACPI_EXCEPTION((AE_INFO, status, "during %s._INI execution",
scope_name));
ACPI_FREE(scope_name);
- status = AE_OK;
}
#endif
+ /* Ignore errors from above */
+
+ status = AE_OK;
+
/*
* The _INI method has been run if present; call the Global Initialization
* Handler for this device.
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
handle, units, timeout));
- if (in_atomic())
- timeout = 0;
-
switch (timeout) {
/*
* No Wait:
ACPI_MOVE_32_TO_32(&signature, table_header->signature);
if (!acpi_ut_valid_acpi_name(signature)) {
- ACPI_ERROR((AE_INFO,
- "Table signature at %p [%p] has invalid characters",
- table_header, &signature));
-
- ACPI_WARNING((AE_INFO, "Invalid table signature found: [%4.4s]",
- ACPI_CAST_PTR(char, &signature)));
+ ACPI_ERROR((AE_INFO, "Invalid table signature 0x%8.8X",
+ signature));
ACPI_DUMP_BUFFER(table_header,
sizeof(struct acpi_table_header));
if (table_header->length < sizeof(struct acpi_table_header)) {
ACPI_ERROR((AE_INFO,
- "Invalid length in table header %p name %4.4s",
- table_header, (char *)&signature));
-
- ACPI_WARNING((AE_INFO,
- "Invalid table header length (0x%X) found",
- (u32) table_header->length));
+ "Invalid length 0x%X in table with signature %4.4s",
+ (u32) table_header->length,
+ ACPI_CAST_PTR(char, &signature)));
ACPI_DUMP_BUFFER(table_header,
sizeof(struct acpi_table_header));
char *acpi_ut_get_mutex_name(u32 mutex_id)
{
- if (mutex_id > MAX_MUTEX) {
+ if (mutex_id > ACPI_MAX_MUTEX) {
return ("Invalid Mutex ID");
}
/* Mutex locked flags */
- for (i = 0; i < NUM_MUTEX; i++) {
+ for (i = 0; i < ACPI_NUM_MUTEX; i++) {
acpi_gbl_mutex_info[i].mutex = NULL;
acpi_gbl_mutex_info[i].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
acpi_gbl_mutex_info[i].use_count = 0;
/*
* Create each of the predefined mutex objects
*/
- for (i = 0; i < NUM_MUTEX; i++) {
+ for (i = 0; i < ACPI_NUM_MUTEX; i++) {
status = acpi_ut_create_mutex(i);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
+ /* Create the spinlocks for use at interrupt level */
+
status = acpi_os_create_lock(&acpi_gbl_gpe_lock);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_os_create_lock(&acpi_gbl_hardware_lock);
return_ACPI_STATUS(status);
}
/*
* Delete each predefined mutex object
*/
- for (i = 0; i < NUM_MUTEX; i++) {
+ for (i = 0; i < ACPI_NUM_MUTEX; i++) {
(void)acpi_ut_delete_mutex(i);
}
+ /* Delete the spinlocks */
+
acpi_os_delete_lock(acpi_gbl_gpe_lock);
+ acpi_os_delete_lock(acpi_gbl_hardware_lock);
return_VOID;
}
ACPI_FUNCTION_TRACE_U32(ut_create_mutex, mutex_id);
- if (mutex_id > MAX_MUTEX) {
+ if (mutex_id > ACPI_MAX_MUTEX) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
ACPI_FUNCTION_TRACE_U32(ut_delete_mutex, mutex_id);
- if (mutex_id > MAX_MUTEX) {
+ if (mutex_id > ACPI_MAX_MUTEX) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
ACPI_FUNCTION_NAME(ut_acquire_mutex);
- if (mutex_id > MAX_MUTEX) {
+ if (mutex_id > ACPI_MAX_MUTEX) {
return (AE_BAD_PARAMETER);
}
* the mutex ordering rule. This indicates a coding error somewhere in
* the ACPI subsystem code.
*/
- for (i = mutex_id; i < MAX_MUTEX; i++) {
+ for (i = mutex_id; i < ACPI_MAX_MUTEX; i++) {
if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
if (i == mutex_id) {
ACPI_ERROR((AE_INFO,
"Thread %X releasing Mutex [%s]\n", this_thread_id,
acpi_ut_get_mutex_name(mutex_id)));
- if (mutex_id > MAX_MUTEX) {
+ if (mutex_id > ACPI_MAX_MUTEX) {
return (AE_BAD_PARAMETER);
}
* ordering rule. This indicates a coding error somewhere in
* the ACPI subsystem code.
*/
- for (i = mutex_id; i < MAX_MUTEX; i++) {
+ for (i = mutex_id; i < ACPI_MAX_MUTEX; i++) {
if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
if (i == mutex_id) {
continue;
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20060526
+#define ACPI_CA_VERSION 0x20060608
/*
* OS name, used for the _OS object. The _OS object is essentially obsolete,
#define ACPI_MAX_ADDRESS_SPACE 255
/* Array sizes. Used for range checking also */
-#define ACPI_MAX_MATCH_OPCODE 5
-#if 0
-#define ACPI_NUM_ACCESS_TYPES 6
-#define ACPI_NUM_UPDATE_RULES 3
-#define ACPI_NUM_LOCK_RULES 2
-#define ACPI_NUM_FIELD_NAMES 2
-#define ACPI_NUM_OPCODES 256
-#endif
+#define ACPI_MAX_MATCH_OPCODE 5
/* RSDP checksums */
* actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs.
* (The table maps local handles to the real OS handles)
*/
-ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[NUM_MUTEX];
+ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
/*****************************************************************************
*
ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
ACPI_EXTERN struct acpi_gpe_block_info
*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
+
+/* Spinlocks */
+
ACPI_EXTERN acpi_handle acpi_gbl_gpe_lock;
+ACPI_EXTERN acpi_handle acpi_gbl_hardware_lock;
/*****************************************************************************
*
* Predefined handles for the mutex objects used within the subsystem
* All mutex objects are automatically created by acpi_ut_mutex_initialize.
*
- * The acquire/release ordering protocol is implied via this list. Mutexes
+ * The acquire/release ordering protocol is implied via this list. Mutexes
* with a lower value must be acquired before mutexes with a higher value.
*
- * NOTE: any changes here must be reflected in the acpi_gbl_mutex_names table also!
+ * NOTE: any changes here must be reflected in the acpi_gbl_mutex_names
+ * table below also!
*/
-#define ACPI_MTX_EXECUTE 0
-#define ACPI_MTX_INTERPRETER 1
-#define ACPI_MTX_PARSER 2
-#define ACPI_MTX_DISPATCHER 3
-#define ACPI_MTX_TABLES 4
-#define ACPI_MTX_OP_REGIONS 5
-#define ACPI_MTX_NAMESPACE 6
-#define ACPI_MTX_EVENTS 7
-#define ACPI_MTX_HARDWARE 8
-#define ACPI_MTX_CACHES 9
-#define ACPI_MTX_MEMORY 10
-#define ACPI_MTX_DEBUG_CMD_COMPLETE 11
-#define ACPI_MTX_DEBUG_CMD_READY 12
-
-#define MAX_MUTEX 12
-#define NUM_MUTEX MAX_MUTEX+1
+#define ACPI_MTX_INTERPRETER 0 /* AML Interpreter, main lock */
+#define ACPI_MTX_CONTROL_METHOD 1 /* Control method termination [TBD: may no longer be necessary] */
+#define ACPI_MTX_TABLES 2 /* Data for ACPI tables */
+#define ACPI_MTX_NAMESPACE 3 /* ACPI Namespace */
+#define ACPI_MTX_EVENTS 4 /* Data for ACPI events */
+#define ACPI_MTX_CACHES 5 /* Internal caches, general purposes */
+#define ACPI_MTX_MEMORY 6 /* Debug memory tracking lists */
+#define ACPI_MTX_DEBUG_CMD_COMPLETE 7 /* AML debugger */
+#define ACPI_MTX_DEBUG_CMD_READY 8 /* AML debugger */
+
+#define ACPI_MAX_MUTEX 8
+#define ACPI_NUM_MUTEX ACPI_MAX_MUTEX+1
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
#ifdef DEFINE_ACPI_GLOBALS
-/* Names for the mutexes used in the subsystem */
+/* Debug names for the mutexes above */
-static char *acpi_gbl_mutex_names[] = {
- "ACPI_MTX_Execute",
+static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = {
"ACPI_MTX_Interpreter",
- "ACPI_MTX_Parser",
- "ACPI_MTX_Dispatcher",
+ "ACPI_MTX_Method",
"ACPI_MTX_Tables",
- "ACPI_MTX_OpRegions",
"ACPI_MTX_Namespace",
"ACPI_MTX_Events",
- "ACPI_MTX_Hardware",
"ACPI_MTX_Caches",
"ACPI_MTX_Memory",
"ACPI_MTX_DebugCmdComplete",
- "ACPI_MTX_DebugCmdReady",
+ "ACPI_MTX_DebugCmdReady"
};
#endif
#endif
+/*
+ * Predefined handles for spinlocks used within the subsystem.
+ * These spinlocks are created by acpi_ut_mutex_initialize
+ */
+#define ACPI_LOCK_GPES 0
+#define ACPI_LOCK_HARDWARE 1
+
+#define ACPI_MAX_LOCK 1
+#define ACPI_NUM_LOCK ACPI_MAX_LOCK+1
+
/* Owner IDs are used to track namespace nodes for selective deletion */
typedef u8 acpi_owner_id;