Merge tag 'pci-v3.9-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / acpi / acpica / psparse.c
1 /******************************************************************************
2 *
3 * Module Name: psparse - Parser top level AML parse routines
4 *
5 *****************************************************************************/
6
7 /*
8 * Copyright (C) 2000 - 2013, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44 /*
45 * Parse the AML and build an operation tree as most interpreters,
46 * like Perl, do. Parsing is done by hand rather than with a YACC
47 * generated parser to tightly constrain stack and dynamic memory
48 * usage. At the same time, parsing is kept flexible and the code
49 * fairly compact by parsing based on a list of AML opcode
50 * templates in aml_op_info[]
51 */
52
53 #include <acpi/acpi.h>
54 #include "accommon.h"
55 #include "acparser.h"
56 #include "acdispat.h"
57 #include "amlcode.h"
58 #include "acinterp.h"
59
60 #define _COMPONENT ACPI_PARSER
61 ACPI_MODULE_NAME("psparse")
62
63 /*******************************************************************************
64 *
65 * FUNCTION: acpi_ps_get_opcode_size
66 *
67 * PARAMETERS: opcode - An AML opcode
68 *
69 * RETURN: Size of the opcode, in bytes (1 or 2)
70 *
71 * DESCRIPTION: Get the size of the current opcode.
72 *
73 ******************************************************************************/
74 u32 acpi_ps_get_opcode_size(u32 opcode)
75 {
76
77 /* Extended (2-byte) opcode if > 255 */
78
79 if (opcode > 0x00FF) {
80 return (2);
81 }
82
83 /* Otherwise, just a single byte opcode */
84
85 return (1);
86 }
87
88 /*******************************************************************************
89 *
90 * FUNCTION: acpi_ps_peek_opcode
91 *
92 * PARAMETERS: parser_state - A parser state object
93 *
94 * RETURN: Next AML opcode
95 *
96 * DESCRIPTION: Get next AML opcode (without incrementing AML pointer)
97 *
98 ******************************************************************************/
99
100 u16 acpi_ps_peek_opcode(struct acpi_parse_state * parser_state)
101 {
102 u8 *aml;
103 u16 opcode;
104
105 aml = parser_state->aml;
106 opcode = (u16) ACPI_GET8(aml);
107
108 if (opcode == AML_EXTENDED_OP_PREFIX) {
109
110 /* Extended opcode, get the second opcode byte */
111
112 aml++;
113 opcode = (u16) ((opcode << 8) | ACPI_GET8(aml));
114 }
115
116 return (opcode);
117 }
118
119 /*******************************************************************************
120 *
121 * FUNCTION: acpi_ps_complete_this_op
122 *
123 * PARAMETERS: walk_state - Current State
124 * op - Op to complete
125 *
126 * RETURN: Status
127 *
128 * DESCRIPTION: Perform any cleanup at the completion of an Op.
129 *
130 ******************************************************************************/
131
132 acpi_status
133 acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
134 union acpi_parse_object * op)
135 {
136 union acpi_parse_object *prev;
137 union acpi_parse_object *next;
138 const struct acpi_opcode_info *parent_info;
139 union acpi_parse_object *replacement_op = NULL;
140 acpi_status status = AE_OK;
141
142 ACPI_FUNCTION_TRACE_PTR(ps_complete_this_op, op);
143
144 /* Check for null Op, can happen if AML code is corrupt */
145
146 if (!op) {
147 return_ACPI_STATUS(AE_OK); /* OK for now */
148 }
149
150 /* Delete this op and the subtree below it if asked to */
151
152 if (((walk_state->parse_flags & ACPI_PARSE_TREE_MASK) !=
153 ACPI_PARSE_DELETE_TREE)
154 || (walk_state->op_info->class == AML_CLASS_ARGUMENT)) {
155 return_ACPI_STATUS(AE_OK);
156 }
157
158 /* Make sure that we only delete this subtree */
159
160 if (op->common.parent) {
161 prev = op->common.parent->common.value.arg;
162 if (!prev) {
163
164 /* Nothing more to do */
165
166 goto cleanup;
167 }
168
169 /*
170 * Check if we need to replace the operator and its subtree
171 * with a return value op (placeholder op)
172 */
173 parent_info =
174 acpi_ps_get_opcode_info(op->common.parent->common.
175 aml_opcode);
176
177 switch (parent_info->class) {
178 case AML_CLASS_CONTROL:
179 break;
180
181 case AML_CLASS_CREATE:
182
183 /*
184 * These opcodes contain term_arg operands. The current
185 * op must be replaced by a placeholder return op
186 */
187 replacement_op =
188 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
189 if (!replacement_op) {
190 status = AE_NO_MEMORY;
191 }
192 break;
193
194 case AML_CLASS_NAMED_OBJECT:
195
196 /*
197 * These opcodes contain term_arg operands. The current
198 * op must be replaced by a placeholder return op
199 */
200 if ((op->common.parent->common.aml_opcode ==
201 AML_REGION_OP)
202 || (op->common.parent->common.aml_opcode ==
203 AML_DATA_REGION_OP)
204 || (op->common.parent->common.aml_opcode ==
205 AML_BUFFER_OP)
206 || (op->common.parent->common.aml_opcode ==
207 AML_PACKAGE_OP)
208 || (op->common.parent->common.aml_opcode ==
209 AML_BANK_FIELD_OP)
210 || (op->common.parent->common.aml_opcode ==
211 AML_VAR_PACKAGE_OP)) {
212 replacement_op =
213 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
214 if (!replacement_op) {
215 status = AE_NO_MEMORY;
216 }
217 } else
218 if ((op->common.parent->common.aml_opcode ==
219 AML_NAME_OP)
220 && (walk_state->pass_number <=
221 ACPI_IMODE_LOAD_PASS2)) {
222 if ((op->common.aml_opcode == AML_BUFFER_OP)
223 || (op->common.aml_opcode == AML_PACKAGE_OP)
224 || (op->common.aml_opcode ==
225 AML_VAR_PACKAGE_OP)) {
226 replacement_op =
227 acpi_ps_alloc_op(op->common.
228 aml_opcode);
229 if (!replacement_op) {
230 status = AE_NO_MEMORY;
231 } else {
232 replacement_op->named.data =
233 op->named.data;
234 replacement_op->named.length =
235 op->named.length;
236 }
237 }
238 }
239 break;
240
241 default:
242
243 replacement_op =
244 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
245 if (!replacement_op) {
246 status = AE_NO_MEMORY;
247 }
248 }
249
250 /* We must unlink this op from the parent tree */
251
252 if (prev == op) {
253
254 /* This op is the first in the list */
255
256 if (replacement_op) {
257 replacement_op->common.parent =
258 op->common.parent;
259 replacement_op->common.value.arg = NULL;
260 replacement_op->common.node = op->common.node;
261 op->common.parent->common.value.arg =
262 replacement_op;
263 replacement_op->common.next = op->common.next;
264 } else {
265 op->common.parent->common.value.arg =
266 op->common.next;
267 }
268 }
269
270 /* Search the parent list */
271
272 else
273 while (prev) {
274
275 /* Traverse all siblings in the parent's argument list */
276
277 next = prev->common.next;
278 if (next == op) {
279 if (replacement_op) {
280 replacement_op->common.parent =
281 op->common.parent;
282 replacement_op->common.value.
283 arg = NULL;
284 replacement_op->common.node =
285 op->common.node;
286 prev->common.next =
287 replacement_op;
288 replacement_op->common.next =
289 op->common.next;
290 next = NULL;
291 } else {
292 prev->common.next =
293 op->common.next;
294 next = NULL;
295 }
296 }
297 prev = next;
298 }
299 }
300
301 cleanup:
302
303 /* Now we can actually delete the subtree rooted at Op */
304
305 acpi_ps_delete_parse_tree(op);
306 return_ACPI_STATUS(status);
307 }
308
309 /*******************************************************************************
310 *
311 * FUNCTION: acpi_ps_next_parse_state
312 *
313 * PARAMETERS: walk_state - Current state
314 * op - Current parse op
315 * callback_status - Status from previous operation
316 *
317 * RETURN: Status
318 *
319 * DESCRIPTION: Update the parser state based upon the return exception from
320 * the parser callback.
321 *
322 ******************************************************************************/
323
324 acpi_status
325 acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
326 union acpi_parse_object *op,
327 acpi_status callback_status)
328 {
329 struct acpi_parse_state *parser_state = &walk_state->parser_state;
330 acpi_status status = AE_CTRL_PENDING;
331
332 ACPI_FUNCTION_TRACE_PTR(ps_next_parse_state, op);
333
334 switch (callback_status) {
335 case AE_CTRL_TERMINATE:
336 /*
337 * A control method was terminated via a RETURN statement.
338 * The walk of this method is complete.
339 */
340 parser_state->aml = parser_state->aml_end;
341 status = AE_CTRL_TERMINATE;
342 break;
343
344 case AE_CTRL_BREAK:
345
346 parser_state->aml = walk_state->aml_last_while;
347 walk_state->control_state->common.value = FALSE;
348 status = AE_CTRL_BREAK;
349 break;
350
351 case AE_CTRL_CONTINUE:
352
353 parser_state->aml = walk_state->aml_last_while;
354 status = AE_CTRL_CONTINUE;
355 break;
356
357 case AE_CTRL_PENDING:
358
359 parser_state->aml = walk_state->aml_last_while;
360 break;
361
362 #if 0
363 case AE_CTRL_SKIP:
364
365 parser_state->aml = parser_state->scope->parse_scope.pkg_end;
366 status = AE_OK;
367 break;
368 #endif
369
370 case AE_CTRL_TRUE:
371 /*
372 * Predicate of an IF was true, and we are at the matching ELSE.
373 * Just close out this package
374 */
375 parser_state->aml = acpi_ps_get_next_package_end(parser_state);
376 status = AE_CTRL_PENDING;
377 break;
378
379 case AE_CTRL_FALSE:
380 /*
381 * Either an IF/WHILE Predicate was false or we encountered a BREAK
382 * opcode. In both cases, we do not execute the rest of the
383 * package; We simply close out the parent (finishing the walk of
384 * this branch of the tree) and continue execution at the parent
385 * level.
386 */
387 parser_state->aml = parser_state->scope->parse_scope.pkg_end;
388
389 /* In the case of a BREAK, just force a predicate (if any) to FALSE */
390
391 walk_state->control_state->common.value = FALSE;
392 status = AE_CTRL_END;
393 break;
394
395 case AE_CTRL_TRANSFER:
396
397 /* A method call (invocation) -- transfer control */
398
399 status = AE_CTRL_TRANSFER;
400 walk_state->prev_op = op;
401 walk_state->method_call_op = op;
402 walk_state->method_call_node =
403 (op->common.value.arg)->common.node;
404
405 /* Will return value (if any) be used by the caller? */
406
407 walk_state->return_used =
408 acpi_ds_is_result_used(op, walk_state);
409 break;
410
411 default:
412
413 status = callback_status;
414 if ((callback_status & AE_CODE_MASK) == AE_CODE_CONTROL) {
415 status = AE_OK;
416 }
417 break;
418 }
419
420 return_ACPI_STATUS(status);
421 }
422
423 /*******************************************************************************
424 *
425 * FUNCTION: acpi_ps_parse_aml
426 *
427 * PARAMETERS: walk_state - Current state
428 *
429 *
430 * RETURN: Status
431 *
432 * DESCRIPTION: Parse raw AML and return a tree of ops
433 *
434 ******************************************************************************/
435
436 acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
437 {
438 acpi_status status;
439 struct acpi_thread_state *thread;
440 struct acpi_thread_state *prev_walk_list = acpi_gbl_current_walk_list;
441 struct acpi_walk_state *previous_walk_state;
442
443 ACPI_FUNCTION_TRACE(ps_parse_aml);
444
445 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
446 "Entered with WalkState=%p Aml=%p size=%X\n",
447 walk_state, walk_state->parser_state.aml,
448 walk_state->parser_state.aml_size));
449
450 if (!walk_state->parser_state.aml) {
451 return_ACPI_STATUS(AE_NULL_OBJECT);
452 }
453
454 /* Create and initialize a new thread state */
455
456 thread = acpi_ut_create_thread_state();
457 if (!thread) {
458 if (walk_state->method_desc) {
459
460 /* Executing a control method - additional cleanup */
461
462 acpi_ds_terminate_control_method(walk_state->
463 method_desc,
464 walk_state);
465 }
466
467 acpi_ds_delete_walk_state(walk_state);
468 return_ACPI_STATUS(AE_NO_MEMORY);
469 }
470
471 walk_state->thread = thread;
472
473 /*
474 * If executing a method, the starting sync_level is this method's
475 * sync_level
476 */
477 if (walk_state->method_desc) {
478 walk_state->thread->current_sync_level =
479 walk_state->method_desc->method.sync_level;
480 }
481
482 acpi_ds_push_walk_state(walk_state, thread);
483
484 /*
485 * This global allows the AML debugger to get a handle to the currently
486 * executing control method.
487 */
488 acpi_gbl_current_walk_list = thread;
489
490 /*
491 * Execute the walk loop as long as there is a valid Walk State. This
492 * handles nested control method invocations without recursion.
493 */
494 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "State=%p\n", walk_state));
495
496 status = AE_OK;
497 while (walk_state) {
498 if (ACPI_SUCCESS(status)) {
499 /*
500 * The parse_loop executes AML until the method terminates
501 * or calls another method.
502 */
503 status = acpi_ps_parse_loop(walk_state);
504 }
505
506 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
507 "Completed one call to walk loop, %s State=%p\n",
508 acpi_format_exception(status), walk_state));
509
510 if (status == AE_CTRL_TRANSFER) {
511 /*
512 * A method call was detected.
513 * Transfer control to the called control method
514 */
515 status =
516 acpi_ds_call_control_method(thread, walk_state,
517 NULL);
518 if (ACPI_FAILURE(status)) {
519 status =
520 acpi_ds_method_error(status, walk_state);
521 }
522
523 /*
524 * If the transfer to the new method method call worked, a new walk
525 * state was created -- get it
526 */
527 walk_state = acpi_ds_get_current_walk_state(thread);
528 continue;
529 } else if (status == AE_CTRL_TERMINATE) {
530 status = AE_OK;
531 } else if ((status != AE_OK) && (walk_state->method_desc)) {
532
533 /* Either the method parse or actual execution failed */
534
535 ACPI_ERROR_METHOD("Method parse/execution failed",
536 walk_state->method_node, NULL,
537 status);
538
539 /* Check for possible multi-thread reentrancy problem */
540
541 if ((status == AE_ALREADY_EXISTS) &&
542 (!(walk_state->method_desc->method.
543 info_flags & ACPI_METHOD_SERIALIZED))) {
544 /*
545 * Method is not serialized and tried to create an object
546 * twice. The probable cause is that the method cannot
547 * handle reentrancy. Mark as "pending serialized" now, and
548 * then mark "serialized" when the last thread exits.
549 */
550 walk_state->method_desc->method.info_flags |=
551 ACPI_METHOD_SERIALIZED_PENDING;
552 }
553 }
554
555 /* We are done with this walk, move on to the parent if any */
556
557 walk_state = acpi_ds_pop_walk_state(thread);
558
559 /* Reset the current scope to the beginning of scope stack */
560
561 acpi_ds_scope_stack_clear(walk_state);
562
563 /*
564 * If we just returned from the execution of a control method or if we
565 * encountered an error during the method parse phase, there's lots of
566 * cleanup to do
567 */
568 if (((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
569 ACPI_PARSE_EXECUTE) || (ACPI_FAILURE(status))) {
570 acpi_ds_terminate_control_method(walk_state->
571 method_desc,
572 walk_state);
573 }
574
575 /* Delete this walk state and all linked control states */
576
577 acpi_ps_cleanup_scope(&walk_state->parser_state);
578 previous_walk_state = walk_state;
579
580 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
581 "ReturnValue=%p, ImplicitValue=%p State=%p\n",
582 walk_state->return_desc,
583 walk_state->implicit_return_obj, walk_state));
584
585 /* Check if we have restarted a preempted walk */
586
587 walk_state = acpi_ds_get_current_walk_state(thread);
588 if (walk_state) {
589 if (ACPI_SUCCESS(status)) {
590 /*
591 * There is another walk state, restart it.
592 * If the method return value is not used by the parent,
593 * The object is deleted
594 */
595 if (!previous_walk_state->return_desc) {
596 /*
597 * In slack mode execution, if there is no return value
598 * we should implicitly return zero (0) as a default value.
599 */
600 if (acpi_gbl_enable_interpreter_slack &&
601 !previous_walk_state->
602 implicit_return_obj) {
603 previous_walk_state->
604 implicit_return_obj =
605 acpi_ut_create_integer_object
606 ((u64) 0);
607 if (!previous_walk_state->
608 implicit_return_obj) {
609 return_ACPI_STATUS
610 (AE_NO_MEMORY);
611 }
612 }
613
614 /* Restart the calling control method */
615
616 status =
617 acpi_ds_restart_control_method
618 (walk_state,
619 previous_walk_state->
620 implicit_return_obj);
621 } else {
622 /*
623 * We have a valid return value, delete any implicit
624 * return value.
625 */
626 acpi_ds_clear_implicit_return
627 (previous_walk_state);
628
629 status =
630 acpi_ds_restart_control_method
631 (walk_state,
632 previous_walk_state->return_desc);
633 }
634 if (ACPI_SUCCESS(status)) {
635 walk_state->walk_type |=
636 ACPI_WALK_METHOD_RESTART;
637 }
638 } else {
639 /* On error, delete any return object or implicit return */
640
641 acpi_ut_remove_reference(previous_walk_state->
642 return_desc);
643 acpi_ds_clear_implicit_return
644 (previous_walk_state);
645 }
646 }
647
648 /*
649 * Just completed a 1st-level method, save the final internal return
650 * value (if any)
651 */
652 else if (previous_walk_state->caller_return_desc) {
653 if (previous_walk_state->implicit_return_obj) {
654 *(previous_walk_state->caller_return_desc) =
655 previous_walk_state->implicit_return_obj;
656 } else {
657 /* NULL if no return value */
658
659 *(previous_walk_state->caller_return_desc) =
660 previous_walk_state->return_desc;
661 }
662 } else {
663 if (previous_walk_state->return_desc) {
664
665 /* Caller doesn't want it, must delete it */
666
667 acpi_ut_remove_reference(previous_walk_state->
668 return_desc);
669 }
670 if (previous_walk_state->implicit_return_obj) {
671
672 /* Caller doesn't want it, must delete it */
673
674 acpi_ut_remove_reference(previous_walk_state->
675 implicit_return_obj);
676 }
677 }
678
679 acpi_ds_delete_walk_state(previous_walk_state);
680 }
681
682 /* Normal exit */
683
684 acpi_ex_release_all_mutexes(thread);
685 acpi_ut_delete_generic_state(ACPI_CAST_PTR
686 (union acpi_generic_state, thread));
687 acpi_gbl_current_walk_list = prev_walk_list;
688 return_ACPI_STATUS(status);
689 }