Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / acpi / acpica / dsmethod.c
1 /******************************************************************************
2 *
3 * Module Name: dsmethod - Parser/Interpreter interface - control method parsing
4 *
5 *****************************************************************************/
6
7 /*
8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44 #include <acpi/acpi.h>
45 #include "accommon.h"
46 #include "acdispat.h"
47 #include "acinterp.h"
48 #include "acnamesp.h"
49 #ifdef ACPI_DISASSEMBLER
50 #include "acdisasm.h"
51 #endif
52
53 #define _COMPONENT ACPI_DISPATCHER
54 ACPI_MODULE_NAME("dsmethod")
55
56 /* Local prototypes */
57 static acpi_status
58 acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
59
60 /*******************************************************************************
61 *
62 * FUNCTION: acpi_ds_method_error
63 *
64 * PARAMETERS: status - Execution status
65 * walk_state - Current state
66 *
67 * RETURN: Status
68 *
69 * DESCRIPTION: Called on method error. Invoke the global exception handler if
70 * present, dump the method data if the disassembler is configured
71 *
72 * Note: Allows the exception handler to change the status code
73 *
74 ******************************************************************************/
75
76 acpi_status
77 acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
78 {
79 ACPI_FUNCTION_ENTRY();
80
81 /* Ignore AE_OK and control exception codes */
82
83 if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) {
84 return (status);
85 }
86
87 /* Invoke the global exception handler */
88
89 if (acpi_gbl_exception_handler) {
90
91 /* Exit the interpreter, allow handler to execute methods */
92
93 acpi_ex_exit_interpreter();
94
95 /*
96 * Handler can map the exception code to anything it wants, including
97 * AE_OK, in which case the executing method will not be aborted.
98 */
99 status = acpi_gbl_exception_handler(status,
100 walk_state->method_node ?
101 walk_state->method_node->
102 name.integer : 0,
103 walk_state->opcode,
104 walk_state->aml_offset,
105 NULL);
106 acpi_ex_enter_interpreter();
107 }
108
109 acpi_ds_clear_implicit_return(walk_state);
110
111 #ifdef ACPI_DISASSEMBLER
112 if (ACPI_FAILURE(status)) {
113
114 /* Display method locals/args if disassembler is present */
115
116 acpi_dm_dump_method_info(status, walk_state, walk_state->op);
117 }
118 #endif
119
120 return (status);
121 }
122
123 /*******************************************************************************
124 *
125 * FUNCTION: acpi_ds_create_method_mutex
126 *
127 * PARAMETERS: obj_desc - The method object
128 *
129 * RETURN: Status
130 *
131 * DESCRIPTION: Create a mutex object for a serialized control method
132 *
133 ******************************************************************************/
134
135 static acpi_status
136 acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
137 {
138 union acpi_operand_object *mutex_desc;
139 acpi_status status;
140
141 ACPI_FUNCTION_TRACE(ds_create_method_mutex);
142
143 /* Create the new mutex object */
144
145 mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
146 if (!mutex_desc) {
147 return_ACPI_STATUS(AE_NO_MEMORY);
148 }
149
150 /* Create the actual OS Mutex */
151
152 status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
153 if (ACPI_FAILURE(status)) {
154 acpi_ut_delete_object_desc(mutex_desc);
155 return_ACPI_STATUS(status);
156 }
157
158 mutex_desc->mutex.sync_level = method_desc->method.sync_level;
159 method_desc->method.mutex = mutex_desc;
160 return_ACPI_STATUS(AE_OK);
161 }
162
163 /*******************************************************************************
164 *
165 * FUNCTION: acpi_ds_begin_method_execution
166 *
167 * PARAMETERS: method_node - Node of the method
168 * obj_desc - The method object
169 * walk_state - current state, NULL if not yet executing
170 * a method.
171 *
172 * RETURN: Status
173 *
174 * DESCRIPTION: Prepare a method for execution. Parses the method if necessary,
175 * increments the thread count, and waits at the method semaphore
176 * for clearance to execute.
177 *
178 ******************************************************************************/
179
180 acpi_status
181 acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
182 union acpi_operand_object *obj_desc,
183 struct acpi_walk_state *walk_state)
184 {
185 acpi_status status = AE_OK;
186
187 ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
188
189 if (!method_node) {
190 return_ACPI_STATUS(AE_NULL_ENTRY);
191 }
192
193 /* Prevent wraparound of thread count */
194
195 if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
196 ACPI_ERROR((AE_INFO,
197 "Method reached maximum reentrancy limit (255)"));
198 return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
199 }
200
201 /*
202 * If this method is serialized, we need to acquire the method mutex.
203 */
204 if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) {
205 /*
206 * Create a mutex for the method if it is defined to be Serialized
207 * and a mutex has not already been created. We defer the mutex creation
208 * until a method is actually executed, to minimize the object count
209 */
210 if (!obj_desc->method.mutex) {
211 status = acpi_ds_create_method_mutex(obj_desc);
212 if (ACPI_FAILURE(status)) {
213 return_ACPI_STATUS(status);
214 }
215 }
216
217 /*
218 * The current_sync_level (per-thread) must be less than or equal to
219 * the sync level of the method. This mechanism provides some
220 * deadlock prevention
221 *
222 * Top-level method invocation has no walk state at this point
223 */
224 if (walk_state &&
225 (walk_state->thread->current_sync_level >
226 obj_desc->method.mutex->mutex.sync_level)) {
227 ACPI_ERROR((AE_INFO,
228 "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%u)",
229 acpi_ut_get_node_name(method_node),
230 walk_state->thread->current_sync_level));
231
232 return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
233 }
234
235 /*
236 * Obtain the method mutex if necessary. Do not acquire mutex for a
237 * recursive call.
238 */
239 if (!walk_state ||
240 !obj_desc->method.mutex->mutex.thread_id ||
241 (walk_state->thread->thread_id !=
242 obj_desc->method.mutex->mutex.thread_id)) {
243 /*
244 * Acquire the method mutex. This releases the interpreter if we
245 * block (and reacquires it before it returns)
246 */
247 status =
248 acpi_ex_system_wait_mutex(obj_desc->method.mutex->
249 mutex.os_mutex,
250 ACPI_WAIT_FOREVER);
251 if (ACPI_FAILURE(status)) {
252 return_ACPI_STATUS(status);
253 }
254
255 /* Update the mutex and walk info and save the original sync_level */
256
257 if (walk_state) {
258 obj_desc->method.mutex->mutex.
259 original_sync_level =
260 walk_state->thread->current_sync_level;
261
262 obj_desc->method.mutex->mutex.thread_id =
263 walk_state->thread->thread_id;
264 walk_state->thread->current_sync_level =
265 obj_desc->method.sync_level;
266 } else {
267 obj_desc->method.mutex->mutex.
268 original_sync_level =
269 obj_desc->method.mutex->mutex.sync_level;
270 }
271 }
272
273 /* Always increase acquisition depth */
274
275 obj_desc->method.mutex->mutex.acquisition_depth++;
276 }
277
278 /*
279 * Allocate an Owner ID for this method, only if this is the first thread
280 * to begin concurrent execution. We only need one owner_id, even if the
281 * method is invoked recursively.
282 */
283 if (!obj_desc->method.owner_id) {
284 status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
285 if (ACPI_FAILURE(status)) {
286 goto cleanup;
287 }
288 }
289
290 /*
291 * Increment the method parse tree thread count since it has been
292 * reentered one more time (even if it is the same thread)
293 */
294 obj_desc->method.thread_count++;
295 return_ACPI_STATUS(status);
296
297 cleanup:
298 /* On error, must release the method mutex (if present) */
299
300 if (obj_desc->method.mutex) {
301 acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
302 }
303 return_ACPI_STATUS(status);
304 }
305
306 /*******************************************************************************
307 *
308 * FUNCTION: acpi_ds_call_control_method
309 *
310 * PARAMETERS: thread - Info for this thread
311 * this_walk_state - Current walk state
312 * op - Current Op to be walked
313 *
314 * RETURN: Status
315 *
316 * DESCRIPTION: Transfer execution to a called control method
317 *
318 ******************************************************************************/
319
320 acpi_status
321 acpi_ds_call_control_method(struct acpi_thread_state *thread,
322 struct acpi_walk_state *this_walk_state,
323 union acpi_parse_object *op)
324 {
325 acpi_status status;
326 struct acpi_namespace_node *method_node;
327 struct acpi_walk_state *next_walk_state = NULL;
328 union acpi_operand_object *obj_desc;
329 struct acpi_evaluate_info *info;
330 u32 i;
331
332 ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
333
334 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
335 "Calling method %p, currentstate=%p\n",
336 this_walk_state->prev_op, this_walk_state));
337
338 /*
339 * Get the namespace entry for the control method we are about to call
340 */
341 method_node = this_walk_state->method_call_node;
342 if (!method_node) {
343 return_ACPI_STATUS(AE_NULL_ENTRY);
344 }
345
346 obj_desc = acpi_ns_get_attached_object(method_node);
347 if (!obj_desc) {
348 return_ACPI_STATUS(AE_NULL_OBJECT);
349 }
350
351 /* Init for new method, possibly wait on method mutex */
352
353 status = acpi_ds_begin_method_execution(method_node, obj_desc,
354 this_walk_state);
355 if (ACPI_FAILURE(status)) {
356 return_ACPI_STATUS(status);
357 }
358
359 /* Begin method parse/execution. Create a new walk state */
360
361 next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
362 NULL, obj_desc, thread);
363 if (!next_walk_state) {
364 status = AE_NO_MEMORY;
365 goto cleanup;
366 }
367
368 /*
369 * The resolved arguments were put on the previous walk state's operand
370 * stack. Operands on the previous walk state stack always
371 * start at index 0. Also, null terminate the list of arguments
372 */
373 this_walk_state->operands[this_walk_state->num_operands] = NULL;
374
375 /*
376 * Allocate and initialize the evaluation information block
377 * TBD: this is somewhat inefficient, should change interface to
378 * ds_init_aml_walk. For now, keeps this struct off the CPU stack
379 */
380 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
381 if (!info) {
382 status = AE_NO_MEMORY;
383 goto cleanup;
384 }
385
386 info->parameters = &this_walk_state->operands[0];
387
388 status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
389 obj_desc->method.aml_start,
390 obj_desc->method.aml_length, info,
391 ACPI_IMODE_EXECUTE);
392
393 ACPI_FREE(info);
394 if (ACPI_FAILURE(status)) {
395 goto cleanup;
396 }
397
398 /*
399 * Delete the operands on the previous walkstate operand stack
400 * (they were copied to new objects)
401 */
402 for (i = 0; i < obj_desc->method.param_count; i++) {
403 acpi_ut_remove_reference(this_walk_state->operands[i]);
404 this_walk_state->operands[i] = NULL;
405 }
406
407 /* Clear the operand stack */
408
409 this_walk_state->num_operands = 0;
410
411 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
412 "**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
413 method_node->name.ascii, next_walk_state));
414
415 /* Invoke an internal method if necessary */
416
417 if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
418 status =
419 obj_desc->method.dispatch.implementation(next_walk_state);
420 if (status == AE_OK) {
421 status = AE_CTRL_TERMINATE;
422 }
423 }
424
425 return_ACPI_STATUS(status);
426
427 cleanup:
428
429 /* On error, we must terminate the method properly */
430
431 acpi_ds_terminate_control_method(obj_desc, next_walk_state);
432 if (next_walk_state) {
433 acpi_ds_delete_walk_state(next_walk_state);
434 }
435
436 return_ACPI_STATUS(status);
437 }
438
439 /*******************************************************************************
440 *
441 * FUNCTION: acpi_ds_restart_control_method
442 *
443 * PARAMETERS: walk_state - State for preempted method (caller)
444 * return_desc - Return value from the called method
445 *
446 * RETURN: Status
447 *
448 * DESCRIPTION: Restart a method that was preempted by another (nested) method
449 * invocation. Handle the return value (if any) from the callee.
450 *
451 ******************************************************************************/
452
453 acpi_status
454 acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
455 union acpi_operand_object *return_desc)
456 {
457 acpi_status status;
458 int same_as_implicit_return;
459
460 ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
461
462 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
463 "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
464 acpi_ut_get_node_name(walk_state->method_node),
465 walk_state->method_call_op, return_desc));
466
467 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
468 " ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
469 walk_state->return_used,
470 walk_state->results, walk_state));
471
472 /* Did the called method return a value? */
473
474 if (return_desc) {
475
476 /* Is the implicit return object the same as the return desc? */
477
478 same_as_implicit_return =
479 (walk_state->implicit_return_obj == return_desc);
480
481 /* Are we actually going to use the return value? */
482
483 if (walk_state->return_used) {
484
485 /* Save the return value from the previous method */
486
487 status = acpi_ds_result_push(return_desc, walk_state);
488 if (ACPI_FAILURE(status)) {
489 acpi_ut_remove_reference(return_desc);
490 return_ACPI_STATUS(status);
491 }
492
493 /*
494 * Save as THIS method's return value in case it is returned
495 * immediately to yet another method
496 */
497 walk_state->return_desc = return_desc;
498 }
499
500 /*
501 * The following code is the optional support for the so-called
502 * "implicit return". Some AML code assumes that the last value of the
503 * method is "implicitly" returned to the caller, in the absence of an
504 * explicit return value.
505 *
506 * Just save the last result of the method as the return value.
507 *
508 * NOTE: this is optional because the ASL language does not actually
509 * support this behavior.
510 */
511 else if (!acpi_ds_do_implicit_return
512 (return_desc, walk_state, FALSE)
513 || same_as_implicit_return) {
514 /*
515 * Delete the return value if it will not be used by the
516 * calling method or remove one reference if the explicit return
517 * is the same as the implicit return value.
518 */
519 acpi_ut_remove_reference(return_desc);
520 }
521 }
522
523 return_ACPI_STATUS(AE_OK);
524 }
525
526 /*******************************************************************************
527 *
528 * FUNCTION: acpi_ds_terminate_control_method
529 *
530 * PARAMETERS: method_desc - Method object
531 * walk_state - State associated with the method
532 *
533 * RETURN: None
534 *
535 * DESCRIPTION: Terminate a control method. Delete everything that the method
536 * created, delete all locals and arguments, and delete the parse
537 * tree if requested.
538 *
539 * MUTEX: Interpreter is locked
540 *
541 ******************************************************************************/
542
543 void
544 acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
545 struct acpi_walk_state *walk_state)
546 {
547
548 ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
549
550 /* method_desc is required, walk_state is optional */
551
552 if (!method_desc) {
553 return_VOID;
554 }
555
556 if (walk_state) {
557
558 /* Delete all arguments and locals */
559
560 acpi_ds_method_data_delete_all(walk_state);
561
562 /*
563 * If method is serialized, release the mutex and restore the
564 * current sync level for this thread
565 */
566 if (method_desc->method.mutex) {
567
568 /* Acquisition Depth handles recursive calls */
569
570 method_desc->method.mutex->mutex.acquisition_depth--;
571 if (!method_desc->method.mutex->mutex.acquisition_depth) {
572 walk_state->thread->current_sync_level =
573 method_desc->method.mutex->mutex.
574 original_sync_level;
575
576 acpi_os_release_mutex(method_desc->method.
577 mutex->mutex.os_mutex);
578 method_desc->method.mutex->mutex.thread_id = 0;
579 }
580 }
581
582 /*
583 * Delete any namespace objects created anywhere within the
584 * namespace by the execution of this method. Unless:
585 * 1) This method is a module-level executable code method, in which
586 * case we want make the objects permanent.
587 * 2) There are other threads executing the method, in which case we
588 * will wait until the last thread has completed.
589 */
590 if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL)
591 && (method_desc->method.thread_count == 1)) {
592
593 /* Delete any direct children of (created by) this method */
594
595 acpi_ns_delete_namespace_subtree(walk_state->
596 method_node);
597
598 /*
599 * Delete any objects that were created by this method
600 * elsewhere in the namespace (if any were created).
601 * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the
602 * deletion such that we don't have to perform an entire
603 * namespace walk for every control method execution.
604 */
605 if (method_desc->method.
606 info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) {
607 acpi_ns_delete_namespace_by_owner(method_desc->
608 method.
609 owner_id);
610 method_desc->method.info_flags &=
611 ~ACPI_METHOD_MODIFIED_NAMESPACE;
612 }
613 }
614 }
615
616 /* Decrement the thread count on the method */
617
618 if (method_desc->method.thread_count) {
619 method_desc->method.thread_count--;
620 } else {
621 ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
622 }
623
624 /* Are there any other threads currently executing this method? */
625
626 if (method_desc->method.thread_count) {
627 /*
628 * Additional threads. Do not release the owner_id in this case,
629 * we immediately reuse it for the next thread executing this method
630 */
631 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
632 "*** Completed execution of one thread, %u threads remaining\n",
633 method_desc->method.thread_count));
634 } else {
635 /* This is the only executing thread for this method */
636
637 /*
638 * Support to dynamically change a method from not_serialized to
639 * Serialized if it appears that the method is incorrectly written and
640 * does not support multiple thread execution. The best example of this
641 * is if such a method creates namespace objects and blocks. A second
642 * thread will fail with an AE_ALREADY_EXISTS exception.
643 *
644 * This code is here because we must wait until the last thread exits
645 * before marking the method as serialized.
646 */
647 if (method_desc->method.
648 info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
649 if (walk_state) {
650 ACPI_INFO((AE_INFO,
651 "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
652 walk_state->method_node->name.
653 ascii));
654 }
655
656 /*
657 * Method tried to create an object twice and was marked as
658 * "pending serialized". The probable cause is that the method
659 * cannot handle reentrancy.
660 *
661 * The method was created as not_serialized, but it tried to create
662 * a named object and then blocked, causing the second thread
663 * entrance to begin and then fail. Workaround this problem by
664 * marking the method permanently as Serialized when the last
665 * thread exits here.
666 */
667 method_desc->method.info_flags &=
668 ~ACPI_METHOD_SERIALIZED_PENDING;
669 method_desc->method.info_flags |=
670 ACPI_METHOD_SERIALIZED;
671 method_desc->method.sync_level = 0;
672 }
673
674 /* No more threads, we can free the owner_id */
675
676 if (!
677 (method_desc->method.
678 info_flags & ACPI_METHOD_MODULE_LEVEL)) {
679 acpi_ut_release_owner_id(&method_desc->method.owner_id);
680 }
681 }
682
683 return_VOID;
684 }