]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/acpi/acpica/psparse.c
ACPICA: All acpica: Update copyrights to 2018
[mirror_ubuntu-hirsute-kernel.git] / drivers / acpi / acpica / psparse.c
1 /******************************************************************************
2 *
3 * Module Name: psparse - Parser top level AML parse routines
4 *
5 *****************************************************************************/
6
7 /*
8 * Copyright (C) 2000 - 2018, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44 /*
45 * Parse the AML and build an operation tree as most interpreters,
46 * like Perl, do. Parsing is done by hand rather than with a YACC
47 * generated parser to tightly constrain stack and dynamic memory
48 * usage. At the same time, parsing is kept flexible and the code
49 * fairly compact by parsing based on a list of AML opcode
50 * templates in aml_op_info[]
51 */
52
53 #include <acpi/acpi.h>
54 #include "accommon.h"
55 #include "acparser.h"
56 #include "acdispat.h"
57 #include "amlcode.h"
58 #include "acinterp.h"
59 #include "acnamesp.h"
60
61 #define _COMPONENT ACPI_PARSER
62 ACPI_MODULE_NAME("psparse")
63
64 /*******************************************************************************
65 *
66 * FUNCTION: acpi_ps_get_opcode_size
67 *
68 * PARAMETERS: opcode - An AML opcode
69 *
70 * RETURN: Size of the opcode, in bytes (1 or 2)
71 *
72 * DESCRIPTION: Get the size of the current opcode.
73 *
74 ******************************************************************************/
75 u32 acpi_ps_get_opcode_size(u32 opcode)
76 {
77
78 /* Extended (2-byte) opcode if > 255 */
79
80 if (opcode > 0x00FF) {
81 return (2);
82 }
83
84 /* Otherwise, just a single byte opcode */
85
86 return (1);
87 }
88
89 /*******************************************************************************
90 *
91 * FUNCTION: acpi_ps_peek_opcode
92 *
93 * PARAMETERS: parser_state - A parser state object
94 *
95 * RETURN: Next AML opcode
96 *
97 * DESCRIPTION: Get next AML opcode (without incrementing AML pointer)
98 *
99 ******************************************************************************/
100
101 u16 acpi_ps_peek_opcode(struct acpi_parse_state * parser_state)
102 {
103 u8 *aml;
104 u16 opcode;
105
106 aml = parser_state->aml;
107 opcode = (u16) ACPI_GET8(aml);
108
109 if (opcode == AML_EXTENDED_PREFIX) {
110
111 /* Extended opcode, get the second opcode byte */
112
113 aml++;
114 opcode = (u16) ((opcode << 8) | ACPI_GET8(aml));
115 }
116
117 return (opcode);
118 }
119
120 /*******************************************************************************
121 *
122 * FUNCTION: acpi_ps_complete_this_op
123 *
124 * PARAMETERS: walk_state - Current State
125 * op - Op to complete
126 *
127 * RETURN: Status
128 *
129 * DESCRIPTION: Perform any cleanup at the completion of an Op.
130 *
131 ******************************************************************************/
132
133 acpi_status
134 acpi_ps_complete_this_op(struct acpi_walk_state *walk_state,
135 union acpi_parse_object *op)
136 {
137 union acpi_parse_object *prev;
138 union acpi_parse_object *next;
139 const struct acpi_opcode_info *parent_info;
140 union acpi_parse_object *replacement_op = NULL;
141 acpi_status status = AE_OK;
142
143 ACPI_FUNCTION_TRACE_PTR(ps_complete_this_op, op);
144
145 /* Check for null Op, can happen if AML code is corrupt */
146
147 if (!op) {
148 return_ACPI_STATUS(AE_OK); /* OK for now */
149 }
150
151 acpi_ex_stop_trace_opcode(op, walk_state);
152
153 /* Delete this op and the subtree below it if asked to */
154
155 if (((walk_state->parse_flags & ACPI_PARSE_TREE_MASK) !=
156 ACPI_PARSE_DELETE_TREE)
157 || (walk_state->op_info->class == AML_CLASS_ARGUMENT)) {
158 return_ACPI_STATUS(AE_OK);
159 }
160
161 /* Make sure that we only delete this subtree */
162
163 if (op->common.parent) {
164 prev = op->common.parent->common.value.arg;
165 if (!prev) {
166
167 /* Nothing more to do */
168
169 goto cleanup;
170 }
171
172 /*
173 * Check if we need to replace the operator and its subtree
174 * with a return value op (placeholder op)
175 */
176 parent_info =
177 acpi_ps_get_opcode_info(op->common.parent->common.
178 aml_opcode);
179
180 switch (parent_info->class) {
181 case AML_CLASS_CONTROL:
182
183 break;
184
185 case AML_CLASS_CREATE:
186 /*
187 * These opcodes contain term_arg operands. The current
188 * op must be replaced by a placeholder return op
189 */
190 replacement_op =
191 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
192 op->common.aml);
193 if (!replacement_op) {
194 status = AE_NO_MEMORY;
195 }
196 break;
197
198 case AML_CLASS_NAMED_OBJECT:
199 /*
200 * These opcodes contain term_arg operands. The current
201 * op must be replaced by a placeholder return op
202 */
203 if ((op->common.parent->common.aml_opcode ==
204 AML_REGION_OP)
205 || (op->common.parent->common.aml_opcode ==
206 AML_DATA_REGION_OP)
207 || (op->common.parent->common.aml_opcode ==
208 AML_BUFFER_OP)
209 || (op->common.parent->common.aml_opcode ==
210 AML_PACKAGE_OP)
211 || (op->common.parent->common.aml_opcode ==
212 AML_BANK_FIELD_OP)
213 || (op->common.parent->common.aml_opcode ==
214 AML_VARIABLE_PACKAGE_OP)) {
215 replacement_op =
216 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
217 op->common.aml);
218 if (!replacement_op) {
219 status = AE_NO_MEMORY;
220 }
221 } else
222 if ((op->common.parent->common.aml_opcode ==
223 AML_NAME_OP)
224 && (walk_state->pass_number <=
225 ACPI_IMODE_LOAD_PASS2)) {
226 if ((op->common.aml_opcode == AML_BUFFER_OP)
227 || (op->common.aml_opcode == AML_PACKAGE_OP)
228 || (op->common.aml_opcode ==
229 AML_VARIABLE_PACKAGE_OP)) {
230 replacement_op =
231 acpi_ps_alloc_op(op->common.
232 aml_opcode,
233 op->common.aml);
234 if (!replacement_op) {
235 status = AE_NO_MEMORY;
236 } else {
237 replacement_op->named.data =
238 op->named.data;
239 replacement_op->named.length =
240 op->named.length;
241 }
242 }
243 }
244 break;
245
246 default:
247
248 replacement_op =
249 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
250 op->common.aml);
251 if (!replacement_op) {
252 status = AE_NO_MEMORY;
253 }
254 }
255
256 /* We must unlink this op from the parent tree */
257
258 if (prev == op) {
259
260 /* This op is the first in the list */
261
262 if (replacement_op) {
263 replacement_op->common.parent =
264 op->common.parent;
265 replacement_op->common.value.arg = NULL;
266 replacement_op->common.node = op->common.node;
267 op->common.parent->common.value.arg =
268 replacement_op;
269 replacement_op->common.next = op->common.next;
270 } else {
271 op->common.parent->common.value.arg =
272 op->common.next;
273 }
274 }
275
276 /* Search the parent list */
277
278 else
279 while (prev) {
280
281 /* Traverse all siblings in the parent's argument list */
282
283 next = prev->common.next;
284 if (next == op) {
285 if (replacement_op) {
286 replacement_op->common.parent =
287 op->common.parent;
288 replacement_op->common.value.
289 arg = NULL;
290 replacement_op->common.node =
291 op->common.node;
292 prev->common.next =
293 replacement_op;
294 replacement_op->common.next =
295 op->common.next;
296 next = NULL;
297 } else {
298 prev->common.next =
299 op->common.next;
300 next = NULL;
301 }
302 }
303 prev = next;
304 }
305 }
306
307 cleanup:
308
309 /* Now we can actually delete the subtree rooted at Op */
310
311 acpi_ps_delete_parse_tree(op);
312 return_ACPI_STATUS(status);
313 }
314
315 /*******************************************************************************
316 *
317 * FUNCTION: acpi_ps_next_parse_state
318 *
319 * PARAMETERS: walk_state - Current state
320 * op - Current parse op
321 * callback_status - Status from previous operation
322 *
323 * RETURN: Status
324 *
325 * DESCRIPTION: Update the parser state based upon the return exception from
326 * the parser callback.
327 *
328 ******************************************************************************/
329
330 acpi_status
331 acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
332 union acpi_parse_object *op,
333 acpi_status callback_status)
334 {
335 struct acpi_parse_state *parser_state = &walk_state->parser_state;
336 acpi_status status = AE_CTRL_PENDING;
337
338 ACPI_FUNCTION_TRACE_PTR(ps_next_parse_state, op);
339
340 switch (callback_status) {
341 case AE_CTRL_TERMINATE:
342 /*
343 * A control method was terminated via a RETURN statement.
344 * The walk of this method is complete.
345 */
346 parser_state->aml = parser_state->aml_end;
347 status = AE_CTRL_TERMINATE;
348 break;
349
350 case AE_CTRL_BREAK:
351
352 parser_state->aml = walk_state->aml_last_while;
353 walk_state->control_state->common.value = FALSE;
354 status = AE_CTRL_BREAK;
355 break;
356
357 case AE_CTRL_CONTINUE:
358
359 parser_state->aml = walk_state->aml_last_while;
360 status = AE_CTRL_CONTINUE;
361 break;
362
363 case AE_CTRL_PENDING:
364
365 parser_state->aml = walk_state->aml_last_while;
366 break;
367
368 #if 0
369 case AE_CTRL_SKIP:
370
371 parser_state->aml = parser_state->scope->parse_scope.pkg_end;
372 status = AE_OK;
373 break;
374 #endif
375
376 case AE_CTRL_TRUE:
377 /*
378 * Predicate of an IF was true, and we are at the matching ELSE.
379 * Just close out this package
380 */
381 parser_state->aml = acpi_ps_get_next_package_end(parser_state);
382 status = AE_CTRL_PENDING;
383 break;
384
385 case AE_CTRL_FALSE:
386 /*
387 * Either an IF/WHILE Predicate was false or we encountered a BREAK
388 * opcode. In both cases, we do not execute the rest of the
389 * package; We simply close out the parent (finishing the walk of
390 * this branch of the tree) and continue execution at the parent
391 * level.
392 */
393 parser_state->aml = parser_state->scope->parse_scope.pkg_end;
394
395 /* In the case of a BREAK, just force a predicate (if any) to FALSE */
396
397 walk_state->control_state->common.value = FALSE;
398 status = AE_CTRL_END;
399 break;
400
401 case AE_CTRL_TRANSFER:
402
403 /* A method call (invocation) -- transfer control */
404
405 status = AE_CTRL_TRANSFER;
406 walk_state->prev_op = op;
407 walk_state->method_call_op = op;
408 walk_state->method_call_node =
409 (op->common.value.arg)->common.node;
410
411 /* Will return value (if any) be used by the caller? */
412
413 walk_state->return_used =
414 acpi_ds_is_result_used(op, walk_state);
415 break;
416
417 default:
418
419 status = callback_status;
420 if ((callback_status & AE_CODE_MASK) == AE_CODE_CONTROL) {
421 status = AE_OK;
422 }
423 break;
424 }
425
426 return_ACPI_STATUS(status);
427 }
428
429 /*******************************************************************************
430 *
431 * FUNCTION: acpi_ps_parse_aml
432 *
433 * PARAMETERS: walk_state - Current state
434 *
435 *
436 * RETURN: Status
437 *
438 * DESCRIPTION: Parse raw AML and return a tree of ops
439 *
440 ******************************************************************************/
441
442 acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
443 {
444 acpi_status status;
445 struct acpi_thread_state *thread;
446 struct acpi_thread_state *prev_walk_list = acpi_gbl_current_walk_list;
447 struct acpi_walk_state *previous_walk_state;
448
449 ACPI_FUNCTION_TRACE(ps_parse_aml);
450
451 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
452 "Entered with WalkState=%p Aml=%p size=%X\n",
453 walk_state, walk_state->parser_state.aml,
454 walk_state->parser_state.aml_size));
455
456 if (!walk_state->parser_state.aml) {
457 return_ACPI_STATUS(AE_NULL_OBJECT);
458 }
459
460 /* Create and initialize a new thread state */
461
462 thread = acpi_ut_create_thread_state();
463 if (!thread) {
464 if (walk_state->method_desc) {
465
466 /* Executing a control method - additional cleanup */
467
468 acpi_ds_terminate_control_method(walk_state->
469 method_desc,
470 walk_state);
471 }
472
473 acpi_ds_delete_walk_state(walk_state);
474 return_ACPI_STATUS(AE_NO_MEMORY);
475 }
476
477 walk_state->thread = thread;
478
479 /*
480 * If executing a method, the starting sync_level is this method's
481 * sync_level
482 */
483 if (walk_state->method_desc) {
484 walk_state->thread->current_sync_level =
485 walk_state->method_desc->method.sync_level;
486 }
487
488 acpi_ds_push_walk_state(walk_state, thread);
489
490 /*
491 * This global allows the AML debugger to get a handle to the currently
492 * executing control method.
493 */
494 acpi_gbl_current_walk_list = thread;
495
496 /*
497 * Execute the walk loop as long as there is a valid Walk State. This
498 * handles nested control method invocations without recursion.
499 */
500 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "State=%p\n", walk_state));
501
502 status = AE_OK;
503 while (walk_state) {
504 if (ACPI_SUCCESS(status)) {
505 /*
506 * The parse_loop executes AML until the method terminates
507 * or calls another method.
508 */
509 status = acpi_ps_parse_loop(walk_state);
510 }
511
512 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
513 "Completed one call to walk loop, %s State=%p\n",
514 acpi_format_exception(status), walk_state));
515
516 if (status == AE_CTRL_TRANSFER) {
517 /*
518 * A method call was detected.
519 * Transfer control to the called control method
520 */
521 status =
522 acpi_ds_call_control_method(thread, walk_state,
523 NULL);
524 if (ACPI_FAILURE(status)) {
525 status =
526 acpi_ds_method_error(status, walk_state);
527 }
528
529 /*
530 * If the transfer to the new method method call worked
531 *, a new walk state was created -- get it
532 */
533 walk_state = acpi_ds_get_current_walk_state(thread);
534 continue;
535 } else if (status == AE_CTRL_TERMINATE) {
536 status = AE_OK;
537 } else if ((status != AE_OK) && (walk_state->method_desc)) {
538
539 /* Either the method parse or actual execution failed */
540
541 acpi_ex_exit_interpreter();
542 if (status == AE_ABORT_METHOD) {
543 acpi_ns_print_node_pathname(walk_state->
544 method_node,
545 "Method aborted:");
546 acpi_os_printf("\n");
547 } else {
548 ACPI_ERROR_METHOD
549 ("Method parse/execution failed",
550 walk_state->method_node, NULL, status);
551 }
552 acpi_ex_enter_interpreter();
553
554 /* Check for possible multi-thread reentrancy problem */
555
556 if ((status == AE_ALREADY_EXISTS) &&
557 (!(walk_state->method_desc->method.info_flags &
558 ACPI_METHOD_SERIALIZED))) {
559 /*
560 * Method is not serialized and tried to create an object
561 * twice. The probable cause is that the method cannot
562 * handle reentrancy. Mark as "pending serialized" now, and
563 * then mark "serialized" when the last thread exits.
564 */
565 walk_state->method_desc->method.info_flags |=
566 ACPI_METHOD_SERIALIZED_PENDING;
567 }
568 }
569
570 /* We are done with this walk, move on to the parent if any */
571
572 walk_state = acpi_ds_pop_walk_state(thread);
573
574 /* Reset the current scope to the beginning of scope stack */
575
576 acpi_ds_scope_stack_clear(walk_state);
577
578 /*
579 * If we just returned from the execution of a control method or if we
580 * encountered an error during the method parse phase, there's lots of
581 * cleanup to do
582 */
583 if (((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
584 ACPI_PARSE_EXECUTE &&
585 !(walk_state->parse_flags & ACPI_PARSE_MODULE_LEVEL)) ||
586 (ACPI_FAILURE(status))) {
587 acpi_ds_terminate_control_method(walk_state->
588 method_desc,
589 walk_state);
590 }
591
592 /* Delete this walk state and all linked control states */
593
594 acpi_ps_cleanup_scope(&walk_state->parser_state);
595 previous_walk_state = walk_state;
596
597 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
598 "ReturnValue=%p, ImplicitValue=%p State=%p\n",
599 walk_state->return_desc,
600 walk_state->implicit_return_obj, walk_state));
601
602 /* Check if we have restarted a preempted walk */
603
604 walk_state = acpi_ds_get_current_walk_state(thread);
605 if (walk_state) {
606 if (ACPI_SUCCESS(status)) {
607 /*
608 * There is another walk state, restart it.
609 * If the method return value is not used by the parent,
610 * The object is deleted
611 */
612 if (!previous_walk_state->return_desc) {
613 /*
614 * In slack mode execution, if there is no return value
615 * we should implicitly return zero (0) as a default value.
616 */
617 if (acpi_gbl_enable_interpreter_slack &&
618 !previous_walk_state->
619 implicit_return_obj) {
620 previous_walk_state->
621 implicit_return_obj =
622 acpi_ut_create_integer_object
623 ((u64) 0);
624 if (!previous_walk_state->
625 implicit_return_obj) {
626 return_ACPI_STATUS
627 (AE_NO_MEMORY);
628 }
629 }
630
631 /* Restart the calling control method */
632
633 status =
634 acpi_ds_restart_control_method
635 (walk_state,
636 previous_walk_state->
637 implicit_return_obj);
638 } else {
639 /*
640 * We have a valid return value, delete any implicit
641 * return value.
642 */
643 acpi_ds_clear_implicit_return
644 (previous_walk_state);
645
646 status =
647 acpi_ds_restart_control_method
648 (walk_state,
649 previous_walk_state->return_desc);
650 }
651 if (ACPI_SUCCESS(status)) {
652 walk_state->walk_type |=
653 ACPI_WALK_METHOD_RESTART;
654 }
655 } else {
656 /* On error, delete any return object or implicit return */
657
658 acpi_ut_remove_reference(previous_walk_state->
659 return_desc);
660 acpi_ds_clear_implicit_return
661 (previous_walk_state);
662 }
663 }
664
665 /*
666 * Just completed a 1st-level method, save the final internal return
667 * value (if any)
668 */
669 else if (previous_walk_state->caller_return_desc) {
670 if (previous_walk_state->implicit_return_obj) {
671 *(previous_walk_state->caller_return_desc) =
672 previous_walk_state->implicit_return_obj;
673 } else {
674 /* NULL if no return value */
675
676 *(previous_walk_state->caller_return_desc) =
677 previous_walk_state->return_desc;
678 }
679 } else {
680 if (previous_walk_state->return_desc) {
681
682 /* Caller doesn't want it, must delete it */
683
684 acpi_ut_remove_reference(previous_walk_state->
685 return_desc);
686 }
687 if (previous_walk_state->implicit_return_obj) {
688
689 /* Caller doesn't want it, must delete it */
690
691 acpi_ut_remove_reference(previous_walk_state->
692 implicit_return_obj);
693 }
694 }
695
696 acpi_ds_delete_walk_state(previous_walk_state);
697 }
698
699 /* Normal exit */
700
701 acpi_ex_release_all_mutexes(thread);
702 acpi_ut_delete_generic_state(ACPI_CAST_PTR
703 (union acpi_generic_state, thread));
704 acpi_gbl_current_walk_list = prev_walk_list;
705 return_ACPI_STATUS(status);
706 }