1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /******************************************************************************
3 *
4 * Module Name: psparse - Parser top level AML parse routines
5 *
6 * Copyright (C) 2000 - 2019, Intel Corp.
7 *
8 *****************************************************************************/
9
10 /*
11 * Parse the AML and build an operation tree as most interpreters,
12 * like Perl, do. Parsing is done by hand rather than with a YACC
13 * generated parser to tightly constrain stack and dynamic memory
14 * usage. At the same time, parsing is kept flexible and the code
15 * fairly compact by parsing based on a list of AML opcode
16 * templates in aml_op_info[]
17 */
18
19 #include <acpi/acpi.h>
20 #include "accommon.h"
21 #include "acparser.h"
22 #include "acdispat.h"
23 #include "amlcode.h"
24 #include "acinterp.h"
25 #include "acnamesp.h"
26
27 #define _COMPONENT ACPI_PARSER
28 ACPI_MODULE_NAME("psparse")
29
30 /*******************************************************************************
31 *
32 * FUNCTION: acpi_ps_get_opcode_size
33 *
34 * PARAMETERS: opcode - An AML opcode
35 *
36 * RETURN: Size of the opcode, in bytes (1 or 2)
37 *
38 * DESCRIPTION: Get the size of the current opcode.
39 *
40 ******************************************************************************/
acpi_ps_get_opcode_size(u32 opcode)41 u32 acpi_ps_get_opcode_size(u32 opcode)
42 {
43
44 /* Extended (2-byte) opcode if > 255 */
45
46 if (opcode > 0x00FF) {
47 return (2);
48 }
49
50 /* Otherwise, just a single byte opcode */
51
52 return (1);
53 }
54
55 /*******************************************************************************
56 *
57 * FUNCTION: acpi_ps_peek_opcode
58 *
59 * PARAMETERS: parser_state - A parser state object
60 *
61 * RETURN: Next AML opcode
62 *
63 * DESCRIPTION: Get next AML opcode (without incrementing AML pointer)
64 *
65 ******************************************************************************/
66
acpi_ps_peek_opcode(struct acpi_parse_state * parser_state)67 u16 acpi_ps_peek_opcode(struct acpi_parse_state * parser_state)
68 {
69 u8 *aml;
70 u16 opcode;
71
72 aml = parser_state->aml;
73 opcode = (u16) ACPI_GET8(aml);
74
75 if (opcode == AML_EXTENDED_PREFIX) {
76
77 /* Extended opcode, get the second opcode byte */
78
79 aml++;
80 opcode = (u16) ((opcode << 8) | ACPI_GET8(aml));
81 }
82
83 return (opcode);
84 }
85
86 /*******************************************************************************
87 *
88 * FUNCTION: acpi_ps_complete_this_op
89 *
90 * PARAMETERS: walk_state - Current State
91 * op - Op to complete
92 *
93 * RETURN: Status
94 *
95 * DESCRIPTION: Perform any cleanup at the completion of an Op.
96 *
97 ******************************************************************************/
98
99 acpi_status
acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,union acpi_parse_object * op)100 acpi_ps_complete_this_op(struct acpi_walk_state *walk_state,
101 union acpi_parse_object *op)
102 {
103 union acpi_parse_object *prev;
104 union acpi_parse_object *next;
105 const struct acpi_opcode_info *parent_info;
106 union acpi_parse_object *replacement_op = NULL;
107 acpi_status status = AE_OK;
108
109 ACPI_FUNCTION_TRACE_PTR(ps_complete_this_op, op);
110
111 /* Check for null Op, can happen if AML code is corrupt */
112
113 if (!op) {
114 return_ACPI_STATUS(AE_OK); /* OK for now */
115 }
116
117 acpi_ex_stop_trace_opcode(op, walk_state);
118
119 /* Delete this op and the subtree below it if asked to */
120
121 if (((walk_state->parse_flags & ACPI_PARSE_TREE_MASK) !=
122 ACPI_PARSE_DELETE_TREE)
123 || (walk_state->op_info->class == AML_CLASS_ARGUMENT)) {
124 return_ACPI_STATUS(AE_OK);
125 }
126
127 /* Make sure that we only delete this subtree */
128
129 if (op->common.parent) {
130 prev = op->common.parent->common.value.arg;
131 if (!prev) {
132
133 /* Nothing more to do */
134
135 goto cleanup;
136 }
137
138 /*
139 * Check if we need to replace the operator and its subtree
140 * with a return value op (placeholder op)
141 */
142 parent_info =
143 acpi_ps_get_opcode_info(op->common.parent->common.
144 aml_opcode);
145
146 switch (parent_info->class) {
147 case AML_CLASS_CONTROL:
148
149 break;
150
151 case AML_CLASS_CREATE:
152 /*
153 * These opcodes contain term_arg operands. The current
154 * op must be replaced by a placeholder return op
155 */
156 replacement_op =
157 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
158 op->common.aml);
159 if (!replacement_op) {
160 status = AE_NO_MEMORY;
161 }
162 break;
163
164 case AML_CLASS_NAMED_OBJECT:
165 /*
166 * These opcodes contain term_arg operands. The current
167 * op must be replaced by a placeholder return op
168 */
169 if ((op->common.parent->common.aml_opcode ==
170 AML_REGION_OP)
171 || (op->common.parent->common.aml_opcode ==
172 AML_DATA_REGION_OP)
173 || (op->common.parent->common.aml_opcode ==
174 AML_BUFFER_OP)
175 || (op->common.parent->common.aml_opcode ==
176 AML_PACKAGE_OP)
177 || (op->common.parent->common.aml_opcode ==
178 AML_BANK_FIELD_OP)
179 || (op->common.parent->common.aml_opcode ==
180 AML_VARIABLE_PACKAGE_OP)) {
181 replacement_op =
182 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
183 op->common.aml);
184 if (!replacement_op) {
185 status = AE_NO_MEMORY;
186 }
187 } else
188 if ((op->common.parent->common.aml_opcode ==
189 AML_NAME_OP)
190 && (walk_state->pass_number <=
191 ACPI_IMODE_LOAD_PASS2)) {
192 if ((op->common.aml_opcode == AML_BUFFER_OP)
193 || (op->common.aml_opcode == AML_PACKAGE_OP)
194 || (op->common.aml_opcode ==
195 AML_VARIABLE_PACKAGE_OP)) {
196 replacement_op =
197 acpi_ps_alloc_op(op->common.
198 aml_opcode,
199 op->common.aml);
200 if (!replacement_op) {
201 status = AE_NO_MEMORY;
202 } else {
203 replacement_op->named.data =
204 op->named.data;
205 replacement_op->named.length =
206 op->named.length;
207 }
208 }
209 }
210 break;
211
212 default:
213
214 replacement_op =
215 acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
216 op->common.aml);
217 if (!replacement_op) {
218 status = AE_NO_MEMORY;
219 }
220 }
221
222 /* We must unlink this op from the parent tree */
223
224 if (prev == op) {
225
226 /* This op is the first in the list */
227
228 if (replacement_op) {
229 replacement_op->common.parent =
230 op->common.parent;
231 replacement_op->common.value.arg = NULL;
232 replacement_op->common.node = op->common.node;
233 op->common.parent->common.value.arg =
234 replacement_op;
235 replacement_op->common.next = op->common.next;
236 } else {
237 op->common.parent->common.value.arg =
238 op->common.next;
239 }
240 }
241
242 /* Search the parent list */
243
244 else
245 while (prev) {
246
247 /* Traverse all siblings in the parent's argument list */
248
249 next = prev->common.next;
250 if (next == op) {
251 if (replacement_op) {
252 replacement_op->common.parent =
253 op->common.parent;
254 replacement_op->common.value.
255 arg = NULL;
256 replacement_op->common.node =
257 op->common.node;
258 prev->common.next =
259 replacement_op;
260 replacement_op->common.next =
261 op->common.next;
262 next = NULL;
263 } else {
264 prev->common.next =
265 op->common.next;
266 next = NULL;
267 }
268 }
269 prev = next;
270 }
271 }
272
273 cleanup:
274
275 /* Now we can actually delete the subtree rooted at Op */
276
277 acpi_ps_delete_parse_tree(op);
278 return_ACPI_STATUS(status);
279 }
280
281 /*******************************************************************************
282 *
283 * FUNCTION: acpi_ps_next_parse_state
284 *
285 * PARAMETERS: walk_state - Current state
286 * op - Current parse op
287 * callback_status - Status from previous operation
288 *
289 * RETURN: Status
290 *
291 * DESCRIPTION: Update the parser state based upon the return exception from
292 * the parser callback.
293 *
294 ******************************************************************************/
295
296 acpi_status
acpi_ps_next_parse_state(struct acpi_walk_state * walk_state,union acpi_parse_object * op,acpi_status callback_status)297 acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
298 union acpi_parse_object *op,
299 acpi_status callback_status)
300 {
301 struct acpi_parse_state *parser_state = &walk_state->parser_state;
302 acpi_status status = AE_CTRL_PENDING;
303
304 ACPI_FUNCTION_TRACE_PTR(ps_next_parse_state, op);
305
306 switch (callback_status) {
307 case AE_CTRL_TERMINATE:
308 /*
309 * A control method was terminated via a RETURN statement.
310 * The walk of this method is complete.
311 */
312 parser_state->aml = parser_state->aml_end;
313 status = AE_CTRL_TERMINATE;
314 break;
315
316 case AE_CTRL_BREAK:
317
318 parser_state->aml = walk_state->aml_last_while;
319 walk_state->control_state->common.value = FALSE;
320 status = AE_CTRL_BREAK;
321 break;
322
323 case AE_CTRL_CONTINUE:
324
325 parser_state->aml = walk_state->aml_last_while;
326 status = AE_CTRL_CONTINUE;
327 break;
328
329 case AE_CTRL_PENDING:
330
331 parser_state->aml = walk_state->aml_last_while;
332 break;
333
334 #if 0
335 case AE_CTRL_SKIP:
336
337 parser_state->aml = parser_state->scope->parse_scope.pkg_end;
338 status = AE_OK;
339 break;
340 #endif
341
342 case AE_CTRL_TRUE:
343 /*
344 * Predicate of an IF was true, and we are at the matching ELSE.
345 * Just close out this package
346 */
347 parser_state->aml = acpi_ps_get_next_package_end(parser_state);
348 status = AE_CTRL_PENDING;
349 break;
350
351 case AE_CTRL_FALSE:
352 /*
353 * Either an IF/WHILE Predicate was false or we encountered a BREAK
354 * opcode. In both cases, we do not execute the rest of the
355 * package; We simply close out the parent (finishing the walk of
356 * this branch of the tree) and continue execution at the parent
357 * level.
358 */
359 parser_state->aml = parser_state->scope->parse_scope.pkg_end;
360
361 /* In the case of a BREAK, just force a predicate (if any) to FALSE */
362
363 walk_state->control_state->common.value = FALSE;
364 status = AE_CTRL_END;
365 break;
366
367 case AE_CTRL_TRANSFER:
368
369 /* A method call (invocation) -- transfer control */
370
371 status = AE_CTRL_TRANSFER;
372 walk_state->prev_op = op;
373 walk_state->method_call_op = op;
374 walk_state->method_call_node =
375 (op->common.value.arg)->common.node;
376
377 /* Will return value (if any) be used by the caller? */
378
379 walk_state->return_used =
380 acpi_ds_is_result_used(op, walk_state);
381 break;
382
383 default:
384
385 status = callback_status;
386 if ((callback_status & AE_CODE_MASK) == AE_CODE_CONTROL) {
387 status = AE_OK;
388 }
389 break;
390 }
391
392 return_ACPI_STATUS(status);
393 }
394
395 /*******************************************************************************
396 *
397 * FUNCTION: acpi_ps_parse_aml
398 *
399 * PARAMETERS: walk_state - Current state
400 *
401 *
402 * RETURN: Status
403 *
404 * DESCRIPTION: Parse raw AML and return a tree of ops
405 *
406 ******************************************************************************/
407
acpi_ps_parse_aml(struct acpi_walk_state * walk_state)408 acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
409 {
410 acpi_status status;
411 struct acpi_thread_state *thread;
412 struct acpi_thread_state *prev_walk_list = acpi_gbl_current_walk_list;
413 struct acpi_walk_state *previous_walk_state;
414
415 ACPI_FUNCTION_TRACE(ps_parse_aml);
416
417 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
418 "Entered with WalkState=%p Aml=%p size=%X\n",
419 walk_state, walk_state->parser_state.aml,
420 walk_state->parser_state.aml_size));
421
422 if (!walk_state->parser_state.aml) {
423 return_ACPI_STATUS(AE_BAD_ADDRESS);
424 }
425
426 /* Create and initialize a new thread state */
427
428 thread = acpi_ut_create_thread_state();
429 if (!thread) {
430 if (walk_state->method_desc) {
431
432 /* Executing a control method - additional cleanup */
433
434 acpi_ds_terminate_control_method(walk_state->
435 method_desc,
436 walk_state);
437 }
438
439 acpi_ds_delete_walk_state(walk_state);
440 return_ACPI_STATUS(AE_NO_MEMORY);
441 }
442
443 walk_state->thread = thread;
444
445 /*
446 * If executing a method, the starting sync_level is this method's
447 * sync_level
448 */
449 if (walk_state->method_desc) {
450 walk_state->thread->current_sync_level =
451 walk_state->method_desc->method.sync_level;
452 }
453
454 acpi_ds_push_walk_state(walk_state, thread);
455
456 /*
457 * This global allows the AML debugger to get a handle to the currently
458 * executing control method.
459 */
460 acpi_gbl_current_walk_list = thread;
461
462 /*
463 * Execute the walk loop as long as there is a valid Walk State. This
464 * handles nested control method invocations without recursion.
465 */
466 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "State=%p\n", walk_state));
467
468 status = AE_OK;
469 while (walk_state) {
470 if (ACPI_SUCCESS(status)) {
471 /*
472 * The parse_loop executes AML until the method terminates
473 * or calls another method.
474 */
475 status = acpi_ps_parse_loop(walk_state);
476 }
477
478 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
479 "Completed one call to walk loop, %s State=%p\n",
480 acpi_format_exception(status), walk_state));
481
482 if (walk_state->method_pathname && walk_state->method_is_nested) {
483
484 /* Optional object evaluation log */
485
486 ACPI_DEBUG_PRINT_RAW((ACPI_DB_EVALUATION,
487 "%-26s: %*s%s\n",
488 " Exit nested method",
489 (walk_state->
490 method_nesting_depth + 1) * 3,
491 " ",
492 &walk_state->method_pathname[1]));
493
494 ACPI_FREE(walk_state->method_pathname);
495 walk_state->method_is_nested = FALSE;
496 }
497 if (status == AE_CTRL_TRANSFER) {
498 /*
499 * A method call was detected.
500 * Transfer control to the called control method
501 */
502 status =
503 acpi_ds_call_control_method(thread, walk_state,
504 NULL);
505 if (ACPI_FAILURE(status)) {
506 status =
507 acpi_ds_method_error(status, walk_state);
508 }
509
510 /*
511 * If the transfer to the new method method call worked
512 *, a new walk state was created -- get it
513 */
514 walk_state = acpi_ds_get_current_walk_state(thread);
515 continue;
516 } else if (status == AE_CTRL_TERMINATE) {
517 status = AE_OK;
518 } else if ((status != AE_OK) && (walk_state->method_desc)) {
519
520 /* Either the method parse or actual execution failed */
521
522 acpi_ex_exit_interpreter();
523 if (status == AE_ABORT_METHOD) {
524 acpi_ns_print_node_pathname(walk_state->
525 method_node,
526 "Aborting method");
527 acpi_os_printf("\n");
528 } else {
529 ACPI_ERROR_METHOD("Aborting method",
530 walk_state->method_node, NULL,
531 status);
532 }
533 acpi_ex_enter_interpreter();
534
535 /* Check for possible multi-thread reentrancy problem */
536
537 if ((status == AE_ALREADY_EXISTS) &&
538 (!(walk_state->method_desc->method.info_flags &
539 ACPI_METHOD_SERIALIZED))) {
540 /*
541 * Method is not serialized and tried to create an object
542 * twice. The probable cause is that the method cannot
543 * handle reentrancy. Mark as "pending serialized" now, and
544 * then mark "serialized" when the last thread exits.
545 */
546 walk_state->method_desc->method.info_flags |=
547 ACPI_METHOD_SERIALIZED_PENDING;
548 }
549 }
550
551 /* We are done with this walk, move on to the parent if any */
552
553 walk_state = acpi_ds_pop_walk_state(thread);
554
555 /* Reset the current scope to the beginning of scope stack */
556
557 acpi_ds_scope_stack_clear(walk_state);
558
559 /*
560 * If we just returned from the execution of a control method or if we
561 * encountered an error during the method parse phase, there's lots of
562 * cleanup to do
563 */
564 if (((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
565 ACPI_PARSE_EXECUTE &&
566 !(walk_state->parse_flags & ACPI_PARSE_MODULE_LEVEL)) ||
567 (ACPI_FAILURE(status))) {
568 acpi_ds_terminate_control_method(walk_state->
569 method_desc,
570 walk_state);
571 }
572
573 /* Delete this walk state and all linked control states */
574
575 acpi_ps_cleanup_scope(&walk_state->parser_state);
576 previous_walk_state = walk_state;
577
578 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
579 "ReturnValue=%p, ImplicitValue=%p State=%p\n",
580 walk_state->return_desc,
581 walk_state->implicit_return_obj, walk_state));
582
583 /* Check if we have restarted a preempted walk */
584
585 walk_state = acpi_ds_get_current_walk_state(thread);
586 if (walk_state) {
587 if (ACPI_SUCCESS(status)) {
588 /*
589 * There is another walk state, restart it.
590 * If the method return value is not used by the parent,
591 * The object is deleted
592 */
593 if (!previous_walk_state->return_desc) {
594 /*
595 * In slack mode execution, if there is no return value
596 * we should implicitly return zero (0) as a default value.
597 */
598 if (acpi_gbl_enable_interpreter_slack &&
599 !previous_walk_state->
600 implicit_return_obj) {
601 previous_walk_state->
602 implicit_return_obj =
603 acpi_ut_create_integer_object
604 ((u64) 0);
605 if (!previous_walk_state->
606 implicit_return_obj) {
607 return_ACPI_STATUS
608 (AE_NO_MEMORY);
609 }
610 }
611
612 /* Restart the calling control method */
613
614 status =
615 acpi_ds_restart_control_method
616 (walk_state,
617 previous_walk_state->
618 implicit_return_obj);
619 } else {
620 /*
621 * We have a valid return value, delete any implicit
622 * return value.
623 */
624 acpi_ds_clear_implicit_return
625 (previous_walk_state);
626
627 status =
628 acpi_ds_restart_control_method
629 (walk_state,
630 previous_walk_state->return_desc);
631 }
632 if (ACPI_SUCCESS(status)) {
633 walk_state->walk_type |=
634 ACPI_WALK_METHOD_RESTART;
635 }
636 } else {
637 /* On error, delete any return object or implicit return */
638
639 acpi_ut_remove_reference(previous_walk_state->
640 return_desc);
641 acpi_ds_clear_implicit_return
642 (previous_walk_state);
643 }
644 }
645
646 /*
647 * Just completed a 1st-level method, save the final internal return
648 * value (if any)
649 */
650 else if (previous_walk_state->caller_return_desc) {
651 if (previous_walk_state->implicit_return_obj) {
652 *(previous_walk_state->caller_return_desc) =
653 previous_walk_state->implicit_return_obj;
654 } else {
655 /* NULL if no return value */
656
657 *(previous_walk_state->caller_return_desc) =
658 previous_walk_state->return_desc;
659 }
660 } else {
661 if (previous_walk_state->return_desc) {
662
663 /* Caller doesn't want it, must delete it */
664
665 acpi_ut_remove_reference(previous_walk_state->
666 return_desc);
667 }
668 if (previous_walk_state->implicit_return_obj) {
669
670 /* Caller doesn't want it, must delete it */
671
672 acpi_ut_remove_reference(previous_walk_state->
673 implicit_return_obj);
674 }
675 }
676
677 acpi_ds_delete_walk_state(previous_walk_state);
678 }
679
680 /* Normal exit */
681
682 acpi_ex_release_all_mutexes(thread);
683 acpi_ut_delete_generic_state(ACPI_CAST_PTR
684 (union acpi_generic_state, thread));
685 acpi_gbl_current_walk_list = prev_walk_list;
686 return_ACPI_STATUS(status);
687 }
688