1# Copyright (c) 2019, Nordic Semiconductor 2# SPDX-License-Identifier: BSD-3-Clause 3 4# Tip: You can view just the documentation with 'pydoc3 devicetree.dtlib' 5 6""" 7A library for extracting information from .dts (devicetree) files. See the 8documentation for the DT and Node classes for more information. 9 10The top-level entry point of the library is the DT class. DT.__init__() takes a 11.dts file to parse and a list of directories to search for any /include/d 12files. 13""" 14 15import collections 16import enum 17import errno 18import os 19import re 20import string 21import sys 22import textwrap 23from typing import (Any, Iterable, 24 NamedTuple, NoReturn, Optional, 25 TYPE_CHECKING, Union) 26 27# NOTE: tests/test_dtlib.py is the test suite for this library. 28 29class DTError(Exception): 30 "Exception raised for devicetree-related errors" 31 32class Node: 33 r""" 34 Represents a node in the devicetree ('node-name { ... };'). 35 36 These attributes are available on Node instances: 37 38 name: 39 The name of the node (a string). 40 41 filename: 42 The name of the .dts file where the node is defined 43 44 lineno: 45 The line number in the .dts file where the node starts. 46 47 unit_addr: 48 The portion after the '@' in the node's name, or the empty string if the 49 name has no '@' in it. 50 51 Note that this is a string. Run int(node.unit_addr, 16) to get an 52 integer. 53 54 props: 55 A dict that maps the properties defined on the node to 56 their values. 'props' is indexed by property name (a string), and values 57 are Property objects. 58 59 To convert property values to Python numbers or strings, use 60 dtlib.to_num(), dtlib.to_nums(), or dtlib.to_string(). 61 62 Property values are represented as 'bytes' arrays to support the full 63 generality of DTS, which allows assignments like 64 65 x = "foo", < 0x12345678 >, [ 9A ]; 66 67 This gives x the value b"foo\0\x12\x34\x56\x78\x9A". Numbers in DTS are 68 stored in big-endian format. 69 70 nodes: 71 A dict containing the subnodes of the node, indexed by name. 72 73 labels: 74 A list with all labels pointing to the node, in the same order as the 75 labels appear, but with duplicates removed. 76 77 'label_1: label_2: node { ... };' gives 'labels' the value 78 ["label_1", "label_2"]. 79 80 parent: 81 The parent Node of the node. 'None' for the root node. 82 83 path: 84 The path to the node as a string, e.g. "/foo/bar". 85 86 dt: 87 The DT instance this node belongs to. 88 """ 89 90 # 91 # Public interface 92 # 93 94 def __init__(self, name: str, parent: Optional["Node"], dt: "DT", filename: str, lineno: int): 95 """ 96 Node constructor. Not meant to be called directly by clients. 97 """ 98 # Remember to update DT.__deepcopy__() if you change this. 99 100 self._name = name 101 self._filename = filename 102 self._lineno = lineno 103 self.props: dict[str, Property] = {} 104 self.nodes: dict[str, Node] = {} 105 self.labels: list[str] = [] 106 self.parent = parent 107 self.dt = dt 108 109 self._omit_if_no_ref = False 110 self._is_referenced = False 111 112 if name.count("@") > 1: 113 dt._parse_error("multiple '@' in node name") 114 if not name == "/": 115 for char in name: 116 if char not in _nodename_chars: 117 dt._parse_error(f"{self.path}: bad character '{char}' " 118 "in node name") 119 120 @property 121 def name(self) -> str: 122 """ 123 See the class documentation. 124 """ 125 # Converted to a property to discourage renaming -- that has to be done 126 # via DT.move_node. 127 return self._name 128 129 @property 130 def lineno(self) -> int: 131 """ 132 See the class documentation. 133 """ 134 return self._lineno 135 136 @property 137 def filename(self) -> str: 138 """ 139 See the class documentation. 140 """ 141 return self._filename 142 143 @property 144 def unit_addr(self) -> str: 145 """ 146 See the class documentation. 147 """ 148 return self.name.partition("@")[2] 149 150 @property 151 def path(self) -> str: 152 """ 153 See the class documentation. 154 """ 155 node_names = [] 156 157 # This dynamic computation is required to be able to move 158 # nodes in the DT class. 159 cur = self 160 while cur.parent: 161 node_names.append(cur.name) 162 cur = cur.parent 163 164 return "/" + "/".join(reversed(node_names)) 165 166 def node_iter(self) -> Iterable['Node']: 167 """ 168 Returns a generator for iterating over the node and its children, 169 recursively. 170 171 For example, this will iterate over all nodes in the tree (like 172 dt.node_iter()). 173 174 for node in dt.root.node_iter(): 175 ... 176 """ 177 yield self 178 for node in self.nodes.values(): 179 yield from node.node_iter() 180 181 def _get_prop(self, name: str) -> 'Property': 182 # Returns the property named 'name' on the node, creating it if it 183 # doesn't already exist 184 185 prop = self.props.get(name) 186 if not prop: 187 prop = Property(self, name) 188 self.props[name] = prop 189 return prop 190 191 def _del(self) -> None: 192 # Removes the node from the tree. When called on the root node, 193 # this method will leave it empty but still part of the tree. 194 195 if self.parent is None: 196 self.nodes.clear() 197 self.props.clear() 198 return 199 self.parent.nodes.pop(self.name) # type: ignore 200 201 def __str__(self): 202 """ 203 Returns a DTS representation of the node. Called automatically if the 204 node is print()ed. 205 """ 206 s = "".join(label + ": " for label in self.labels) 207 208 s += f"{self.name} {{\n" 209 210 for prop in self.props.values(): 211 s += "\t" + str(prop) + "\n" 212 213 for child in self.nodes.values(): 214 s += textwrap.indent(child.__str__(), "\t") + "\n" 215 216 s += "};" 217 218 return s 219 220 def __repr__(self): 221 """ 222 Returns some information about the Node instance. Called automatically 223 if the Node instance is evaluated. 224 """ 225 return f"<Node {self.path} in '{self.dt.filename}'>" 226 227# See Property.type 228class Type(enum.IntEnum): 229 EMPTY = 0 230 BYTES = 1 231 NUM = 2 232 NUMS = 3 233 STRING = 4 234 STRINGS = 5 235 PATH = 6 236 PHANDLE = 7 237 PHANDLES = 8 238 PHANDLES_AND_NUMS = 9 239 COMPOUND = 10 240 241class _MarkerType(enum.IntEnum): 242 # Types of markers in property values 243 244 # References 245 PATH = 0 # &foo 246 PHANDLE = 1 # <&foo> 247 LABEL = 2 # foo: <1 2 3> 248 249 # Start of data blocks of specific type 250 UINT8 = 3 # [00 01 02] (and also used for /incbin/) 251 UINT16 = 4 # /bits/ 16 <1 2 3> 252 UINT32 = 5 # <1 2 3> 253 UINT64 = 6 # /bits/ 64 <1 2 3> 254 STRING = 7 # "foo" 255 256class Property: 257 """ 258 Represents a property ('x = ...'). 259 260 These attributes are available on Property instances: 261 262 name: 263 The name of the property (a string). 264 265 filename: 266 The name of the .dts file where the property is defined 267 268 lineno: 269 The line number in the .dts file where the property starts. 270 271 value: 272 The value of the property, as a 'bytes' string. Numbers are stored in 273 big-endian format, and strings are null-terminated. Putting multiple 274 comma-separated values in an assignment (e.g., 'x = < 1 >, "foo"') will 275 concatenate the values. 276 277 See the to_*() methods for converting the value to other types. 278 279 type: 280 The type of the property, inferred from the syntax used in the 281 assignment. This is one of the following constants (with example 282 assignments): 283 284 Assignment | Property.type 285 ----------------------------+------------------------ 286 foo; | dtlib.Type.EMPTY 287 foo = []; | dtlib.Type.BYTES 288 foo = [01 02]; | dtlib.Type.BYTES 289 foo = /bits/ 8 <1>; | dtlib.Type.BYTES 290 foo = <1>; | dtlib.Type.NUM 291 foo = <>; | dtlib.Type.NUMS 292 foo = <1 2 3>; | dtlib.Type.NUMS 293 foo = <1 2>, <3>; | dtlib.Type.NUMS 294 foo = "foo"; | dtlib.Type.STRING 295 foo = "foo", "bar"; | dtlib.Type.STRINGS 296 foo = <&l>; | dtlib.Type.PHANDLE 297 foo = <&l1 &l2 &l3>; | dtlib.Type.PHANDLES 298 foo = <&l1 &l2>, <&l3>; | dtlib.Type.PHANDLES 299 foo = <&l1 1 2 &l2 3 4>; | dtlib.Type.PHANDLES_AND_NUMS 300 foo = <&l1 1 2>, <&l2 3 4>; | dtlib.Type.PHANDLES_AND_NUMS 301 foo = &l; | dtlib.Type.PATH 302 *Anything else* | dtlib.Type.COMPOUND 303 304 *Anything else* includes properties mixing phandle (<&label>) and node 305 path (&label) references with other data. 306 307 Data labels in the property value do not influence the type. 308 309 labels: 310 A list with all labels pointing to the property, in the same order as the 311 labels appear, but with duplicates removed. 312 313 'label_1: label2: x = ...' gives 'labels' the value 314 ["label_1", "label_2"]. 315 316 offset_labels: 317 A dictionary that maps any labels within the property's value to their 318 offset, in bytes. For example, 'x = < 0 label_1: 1 label_2: >' gives 319 'offset_labels' the value {"label_1": 4, "label_2": 8}. 320 321 Iteration order will match the order of the labels on Python versions 322 that preserve dict insertion order. 323 324 node: 325 The Node the property is on. 326 """ 327 328 # 329 # Public interface 330 # 331 332 def __init__(self, node: Node, name: str): 333 # Remember to update DT.__deepcopy__() if you change this. 334 335 if "@" in name: 336 node.dt._parse_error("'@' is only allowed in node names") 337 338 self.name = name 339 self.filename = "" 340 self.lineno = -1 341 self.value = b"" 342 self.labels: list[str] = [] 343 # We have to wait to set this until later, when we've got 344 # the entire tree. 345 self.offset_labels: dict[str, int] = {} 346 self.node: Node = node 347 348 self._label_offset_lst: list[tuple[str, int]] = [] 349 350 # A list of [offset, label, type] lists (sorted by offset), 351 # giving the locations of references within the value. 'type' 352 # is either _MarkerType.PATH, for a node path reference, 353 # _MarkerType.PHANDLE, for a phandle reference, or 354 # _MarkerType.LABEL, for a label on/within data. Node paths 355 # and phandles need to be patched in after parsing. 356 self._markers: list[list] = [] 357 358 @property 359 def type(self) -> Type: 360 """ 361 See the class documentation. 362 """ 363 # Data labels (e.g. 'foo = label: <3>') are irrelevant, so filter them 364 # out 365 types = [marker[1] for marker in self._markers 366 if marker[1] != _MarkerType.LABEL] 367 368 if not types: 369 return Type.EMPTY 370 371 if types == [_MarkerType.UINT8]: 372 return Type.BYTES 373 374 if types == [_MarkerType.UINT32]: 375 return Type.NUM if len(self.value) == 4 else Type.NUMS 376 377 # Treat 'foo = <1 2 3>, <4 5>, ...' as Type.NUMS too 378 if set(types) == {_MarkerType.UINT32}: 379 return Type.NUMS 380 381 if set(types) == {_MarkerType.STRING}: 382 return Type.STRING if len(types) == 1 else Type.STRINGS 383 384 if types == [_MarkerType.PATH]: 385 return Type.PATH 386 387 if (types == [_MarkerType.UINT32, _MarkerType.PHANDLE] 388 and len(self.value) == 4): 389 return Type.PHANDLE 390 391 if set(types) == {_MarkerType.UINT32, _MarkerType.PHANDLE}: 392 if len(self.value) == 4*types.count(_MarkerType.PHANDLE): 393 # Array with just phandles in it 394 return Type.PHANDLES 395 # Array with both phandles and numbers 396 return Type.PHANDLES_AND_NUMS 397 398 return Type.COMPOUND 399 400 def to_num(self, signed=False) -> int: 401 """ 402 Returns the value of the property as a number. 403 404 Raises DTError if the property was not assigned with this syntax (has 405 Property.type Type.NUM): 406 407 foo = < 1 >; 408 409 signed (default: False): 410 If True, the value will be interpreted as signed rather than 411 unsigned. 412 """ 413 if self.type is not Type.NUM: 414 _err(f"expected property '{self.name}' on {self.node.path} in " 415 f"{self.node.dt.filename} to be assigned with " 416 f"'{self.name} = < (number) >;', not '{self}'") 417 418 return int.from_bytes(self.value, "big", signed=signed) 419 420 def to_nums(self, signed=False) -> list[int]: 421 """ 422 Returns the value of the property as a list of numbers. 423 424 Raises DTError if the property was not assigned with this syntax (has 425 Property.type Type.NUM or Type.NUMS): 426 427 foo = < 1 2 ... >; 428 429 signed (default: False): 430 If True, the values will be interpreted as signed rather than 431 unsigned. 432 """ 433 if self.type not in (Type.NUM, Type.NUMS): 434 _err(f"expected property '{self.name}' on {self.node.path} in " 435 f"{self.node.dt.filename} to be assigned with " 436 f"'{self.name} = < (number) (number) ... >;', not '{self}'") 437 438 return [int.from_bytes(self.value[i:i + 4], "big", signed=signed) 439 for i in range(0, len(self.value), 4)] 440 441 def to_bytes(self) -> bytes: 442 """ 443 Returns the value of the property as a raw 'bytes', like 444 Property.value, except with added type checking. 445 446 Raises DTError if the property was not assigned with this syntax (has 447 Property.type Type.BYTES): 448 449 foo = [ 01 ... ]; 450 """ 451 if self.type is not Type.BYTES: 452 _err(f"expected property '{self.name}' on {self.node.path} " 453 f"in {self.node.dt.filename} to be assigned with " 454 f"'{self.name} = [ (byte) (byte) ... ];', not '{self}'") 455 456 return self.value 457 458 def to_string(self) -> str: 459 """ 460 Returns the value of the property as a string. 461 462 Raises DTError if the property was not assigned with this syntax (has 463 Property.type Type.STRING): 464 465 foo = "string"; 466 467 This function might also raise UnicodeDecodeError if the string is 468 not valid UTF-8. 469 """ 470 if self.type is not Type.STRING: 471 _err(f"expected property '{self.name}' on {self.node.path} " 472 f"in {self.node.dt.filename} to be assigned with " 473 f"'{self.name} = \"string\";', not '{self}'") 474 475 try: 476 ret = self.value.decode("utf-8")[:-1] # Strip null 477 except UnicodeDecodeError: 478 _err(f"value of property '{self.name}' ({self.value!r}) " 479 f"on {self.node.path} in {self.node.dt.filename} " 480 "is not valid UTF-8") 481 482 return ret # The separate 'return' appeases the type checker. 483 484 def to_strings(self) -> list[str]: 485 """ 486 Returns the value of the property as a list of strings. 487 488 Raises DTError if the property was not assigned with this syntax (has 489 Property.type Type.STRING or Type.STRINGS): 490 491 foo = "string", "string", ... ; 492 493 Also raises DTError if any of the strings are not valid UTF-8. 494 """ 495 if self.type not in (Type.STRING, Type.STRINGS): 496 _err(f"expected property '{self.name}' on {self.node.path} in " 497 f"{self.node.dt.filename} to be assigned with " 498 f"'{self.name} = \"string\", \"string\", ... ;', not '{self}'") 499 500 try: 501 ret = self.value.decode("utf-8").split("\0")[:-1] 502 except UnicodeDecodeError: 503 _err(f"value of property '{self.name}' ({self.value!r}) " 504 f"on {self.node.path} in {self.node.dt.filename} " 505 "is not valid UTF-8") 506 507 return ret # The separate 'return' appeases the type checker. 508 509 def to_node(self) -> Node: 510 """ 511 Returns the Node the phandle in the property points to. 512 513 Raises DTError if the property was not assigned with this syntax (has 514 Property.type Type.PHANDLE). 515 516 foo = < &bar >; 517 """ 518 if self.type is not Type.PHANDLE: 519 _err(f"expected property '{self.name}' on {self.node.path} in " 520 f"{self.node.dt.filename} to be assigned with " 521 f"'{self.name} = < &foo >;', not '{self}'") 522 523 return self.node.dt.phandle2node[int.from_bytes(self.value, "big")] 524 525 def to_nodes(self) -> list[Node]: 526 """ 527 Returns a list with the Nodes the phandles in the property point to. 528 529 Raises DTError if the property value contains anything other than 530 phandles. All of the following are accepted: 531 532 foo = < > 533 foo = < &bar >; 534 foo = < &bar &baz ... >; 535 foo = < &bar ... >, < &baz ... >; 536 """ 537 def type_ok(): 538 if self.type in (Type.PHANDLE, Type.PHANDLES): 539 return True 540 # Also accept 'foo = < >;' 541 return self.type is Type.NUMS and not self.value 542 543 if not type_ok(): 544 _err(f"expected property '{self.name}' on {self.node.path} in " 545 f"{self.node.dt.filename} to be assigned with " 546 f"'{self.name} = < &foo &bar ... >;', not '{self}'") 547 548 return [self.node.dt.phandle2node[int.from_bytes(self.value[i:i + 4], 549 "big")] 550 for i in range(0, len(self.value), 4)] 551 552 def to_path(self) -> Node: 553 """ 554 Returns the Node referenced by the path stored in the property. 555 556 Raises DTError if the property was not assigned with either of these 557 syntaxes (has Property.type Type.PATH or Type.STRING): 558 559 foo = &bar; 560 foo = "/bar"; 561 562 For the second case, DTError is raised if the path does not exist. 563 """ 564 if self.type not in (Type.PATH, Type.STRING): 565 _err(f"expected property '{self.name}' on {self.node.path} in " 566 f"{self.node.dt.filename} to be assigned with either " 567 f"'{self.name} = &foo' or '{self.name} = \"/path/to/node\"', " 568 f"not '{self}'") 569 570 try: 571 path = self.value.decode("utf-8")[:-1] 572 except UnicodeDecodeError: 573 _err(f"value of property '{self.name}' ({self.value!r}) " 574 f"on {self.node.path} in {self.node.dt.filename} " 575 "is not valid UTF-8") 576 577 try: 578 ret = self.node.dt.get_node(path) 579 except DTError: 580 _err(f"property '{self.name}' on {self.node.path} in " 581 f"{self.node.dt.filename} points to the non-existent node " 582 f'"{path}"') 583 584 return ret # The separate 'return' appeases the type checker. 585 586 def __str__(self): 587 s = "".join(label + ": " for label in self.labels) + self.name 588 if not self.value: 589 return s + ";" 590 591 s += " =" 592 593 for i, (pos, marker_type, ref) in enumerate(self._markers): 594 if i < len(self._markers) - 1: 595 next_marker = self._markers[i + 1] 596 else: 597 next_marker = None 598 599 # End of current marker 600 end = next_marker[0] if next_marker else len(self.value) 601 602 if marker_type is _MarkerType.STRING: 603 # end - 1 to strip off the null terminator 604 s += f' "{_decode_and_escape(self.value[pos:end - 1])}"' 605 if end != len(self.value): 606 s += "," 607 elif marker_type is _MarkerType.PATH: 608 s += " &" + ref 609 if end != len(self.value): 610 s += "," 611 else: 612 # <> or [] 613 614 if marker_type is _MarkerType.LABEL: 615 s += f" {ref}:" 616 elif marker_type is _MarkerType.PHANDLE: 617 s += " &" + ref 618 pos += 4 619 # Subtle: There might be more data between the phandle and 620 # the next marker, so we can't 'continue' here 621 else: # marker_type is _MarkerType.UINT* 622 elm_size = _TYPE_TO_N_BYTES[marker_type] 623 s += _N_BYTES_TO_START_STR[elm_size] 624 625 while pos != end: 626 num = int.from_bytes(self.value[pos:pos + elm_size], 627 "big") 628 if elm_size == 1: 629 s += f" {num:02X}" 630 else: 631 s += f" {hex(num)}" 632 633 pos += elm_size 634 635 if (pos != 0 636 and (not next_marker 637 or next_marker[1] 638 not in (_MarkerType.PHANDLE, _MarkerType.LABEL))): 639 640 s += _N_BYTES_TO_END_STR[elm_size] 641 if pos != len(self.value): 642 s += "," 643 644 return s + ";" 645 646 def __repr__(self): 647 return (f"<Property '{self.name}' at '{self.node.path}' in " 648 f"'{self.node.dt.filename}'>") 649 650 # 651 # Internal functions 652 # 653 654 def _add_marker(self, marker_type: _MarkerType, data: Any = None): 655 # Helper for registering markers in the value that are processed after 656 # parsing. See _fixup_props(). 'marker_type' identifies the type of 657 # marker, and 'data' has any optional data associated with the marker. 658 659 # len(self.value) gives the current offset. This function is called 660 # while the value is built. We use a list instead of a tuple to be able 661 # to fix up offsets later (they might increase if the value includes 662 # path references, e.g. 'foo = &bar, <3>;', which are expanded later). 663 self._markers.append([len(self.value), marker_type, data]) 664 665 # For phandle references, add a dummy value with the same length as a 666 # phandle. This is handy for the length check in _register_phandles(). 667 if marker_type is _MarkerType.PHANDLE: 668 self.value += b"\0\0\0\0" 669 670class _T(enum.IntEnum): 671 # Token IDs used by the DT lexer. 672 673 # These values must be contiguous and start from 1. 674 INCLUDE = 1 675 LINE = 2 676 STRING = 3 677 DTS_V1 = 4 678 PLUGIN = 5 679 MEMRESERVE = 6 680 BITS = 7 681 DEL_PROP = 8 682 DEL_NODE = 9 683 OMIT_IF_NO_REF = 10 684 LABEL = 11 685 CHAR_LITERAL = 12 686 REF = 13 687 INCBIN = 14 688 SKIP = 15 689 EOF = 16 690 691 # These values must be larger than the above contiguous range. 692 NUM = 17 693 PROPNODENAME = 18 694 MISC = 19 695 BYTE = 20 696 BAD = 21 697 698class _FileStackElt(NamedTuple): 699 # Used for maintaining the /include/ stack. 700 701 filename: str 702 lineno: int 703 contents: str 704 pos: int 705 706_TokVal = Union[int, str] 707 708class _Token(NamedTuple): 709 id: int 710 val: _TokVal 711 712 def __repr__(self): 713 id_repr = _T(self.id).name 714 return f'Token(id=_T.{id_repr}, val={repr(self.val)})' 715 716class DT: 717 """ 718 Represents a devicetree parsed from a .dts file (or from many files, if the 719 .dts file /include/s other files). Creating many instances of this class is 720 fine. The library has no global state. 721 722 These attributes are available on DT instances: 723 724 root: 725 A Node instance representing the root (/) node. 726 727 alias2node: 728 A dictionary that maps maps alias strings (from /aliases) to Node 729 instances 730 731 label2node: 732 A dictionary that maps each node label (a string) to the Node instance 733 for the node. 734 735 label2prop: 736 A dictionary that maps each property label (a string) to a Property 737 instance. 738 739 label2prop_offset: 740 A dictionary that maps each label (a string) within a property value 741 (e.g., 'x = label_1: < 1 label2: 2 >;') to a (prop, offset) tuple, where 742 'prop' is a Property instance and 'offset' the byte offset (0 for label_1 743 and 4 for label_2 in the example). 744 745 phandle2node: 746 A dictionary that maps each phandle (a number) to a Node instance. 747 748 memreserves: 749 A list of (labels, address, length) tuples for the /memreserve/s in the 750 .dts file, in the same order as they appear in the file. 751 752 'labels' is a possibly empty set with all labels preceding the memreserve 753 (e.g., 'label1: label2: /memreserve/ ...'). 'address' and 'length' are 754 numbers. 755 756 filename: 757 The filename passed to the DT constructor. 758 """ 759 760 # 761 # Public interface 762 # 763 764 def __init__(self, filename: Optional[str], include_path: Iterable[str] = (), 765 force: bool = False): 766 """ 767 Parses a DTS file to create a DT instance. Raises OSError if 'filename' 768 can't be opened, and DTError for any parse errors. 769 770 filename: 771 Path to the .dts file to parse. (If None, an empty devicetree 772 is created; this is unlikely to be what you want.) 773 774 include_path: 775 An iterable (e.g. list or tuple) containing paths to search for 776 /include/d and /incbin/'d files. By default, files are only looked up 777 relative to the .dts file that contains the /include/ or /incbin/. 778 779 force: 780 Try not to raise DTError even if the input tree has errors. 781 For experimental use; results not guaranteed. 782 """ 783 # Remember to update __deepcopy__() if you change this. 784 785 self._root: Optional[Node] = None 786 self.alias2node: dict[str, Node] = {} 787 self.label2node: dict[str, Node] = {} 788 self.label2prop: dict[str, Property] = {} 789 self.label2prop_offset: dict[str, tuple[Property, int]] = {} 790 self.phandle2node: dict[int, Node] = {} 791 self.memreserves: list[tuple[set[str], int, int]] = [] 792 self.filename = filename 793 794 self._force = force 795 796 if filename is not None: 797 self._parse_file(filename, include_path) 798 else: 799 self._include_path: list[str] = [] 800 801 @property 802 def root(self) -> Node: 803 """ 804 See the class documentation. 805 """ 806 # This is necessary because mypy can't tell that we never 807 # treat self._root as a non-None value until it's initialized 808 # properly in _parse_dt(). 809 return self._root # type: ignore 810 811 def get_node(self, path: str) -> Node: 812 """ 813 Returns the Node instance for the node with path or alias 'path' (a 814 string). Raises DTError if the path or alias doesn't exist. 815 816 For example, both dt.get_node("/foo/bar") and dt.get_node("bar-alias") 817 will return the 'bar' node below: 818 819 /dts-v1/; 820 821 / { 822 foo { 823 bar_label: bar { 824 baz { 825 }; 826 }; 827 }; 828 829 aliases { 830 bar-alias = &bar-label; 831 }; 832 }; 833 834 Fetching subnodes via aliases is supported: 835 dt.get_node("bar-alias/baz") returns the 'baz' node. 836 """ 837 if path.startswith("/"): 838 return _root_and_path_to_node(self.root, path, path) 839 840 # Path does not start with '/'. First component must be an alias. 841 alias, _, rest = path.partition("/") 842 if alias not in self.alias2node: 843 _err(f"no alias '{alias}' found -- did you forget the leading " 844 "'/' in the node path?") 845 846 return _root_and_path_to_node(self.alias2node[alias], rest, path) 847 848 def has_node(self, path: str) -> bool: 849 """ 850 Returns True if the path or alias 'path' exists. See DT.get_node(). 851 """ 852 try: 853 self.get_node(path) 854 return True 855 except DTError: 856 return False 857 858 def move_node(self, node: Node, new_path: str): 859 """ 860 Move a node 'node' to a new path 'new_path'. The entire subtree 861 rooted at 'node' is moved along with it. 862 863 You cannot move the root node or provide a 'new_path' value 864 where a node already exists. This method raises an exception 865 in both cases. 866 867 As a restriction on the current implementation, the parent node 868 of the new path must exist. 869 """ 870 if node is self.root: 871 _err("the root node can't be moved") 872 873 if self.has_node(new_path): 874 _err(f"can't move '{node.path}' to '{new_path}': " 875 'destination node exists') 876 877 if not new_path.startswith('/'): 878 _err(f"path '{new_path}' doesn't start with '/'") 879 880 for component in new_path.split('/'): 881 for char in component: 882 if char not in _nodename_chars: 883 _err(f"new path '{new_path}': bad character '{char}'") 884 885 old_name = node.name 886 old_path = node.path 887 888 new_parent_path, _, new_name = new_path.rpartition('/') 889 if new_parent_path == '': 890 # '/foo'.rpartition('/') is ('', '/', 'foo'). 891 new_parent_path = '/' 892 if not self.has_node(new_parent_path): 893 _err(f"can't move '{old_path}' to '{new_path}': " 894 f"parent node '{new_parent_path}' doesn't exist") 895 new_parent = self.get_node(new_parent_path) 896 if TYPE_CHECKING: 897 assert new_parent is not None 898 assert node.parent is not None 899 900 del node.parent.nodes[old_name] 901 node._name = new_name 902 node.parent = new_parent 903 new_parent.nodes[new_name] = node 904 905 def node_iter(self) -> Iterable[Node]: 906 """ 907 Returns a generator for iterating over all nodes in the devicetree. 908 909 For example, this will print the name of each node that has a property 910 called 'foo': 911 912 for node in dt.node_iter(): 913 if "foo" in node.props: 914 print(node.name) 915 """ 916 yield from self.root.node_iter() 917 918 def __str__(self): 919 """ 920 Returns a DTS representation of the devicetree. Called automatically if 921 the DT instance is print()ed. 922 """ 923 s = "/dts-v1/;\n\n" 924 925 if self.memreserves: 926 for labels, address, offset in self.memreserves: 927 # List the labels in a consistent order to help with testing 928 for label in labels: 929 s += f"{label}: " 930 s += f"/memreserve/ {address:#018x} {offset:#018x};\n" 931 s += "\n" 932 933 return s + str(self.root) 934 935 def __repr__(self): 936 """ 937 Returns some information about the DT instance. Called automatically if 938 the DT instance is evaluated. 939 """ 940 if self.filename: 941 return (f"DT(filename='{self.filename}', " 942 f"include_path={self._include_path})") 943 return super().__repr__() 944 945 def __deepcopy__(self, memo): 946 """ 947 Implements support for the standard library copy.deepcopy() 948 function on DT instances. 949 """ 950 951 # We need a new DT, obviously. Make a new, empty one. 952 ret = DT(None, (), self._force) 953 954 # Now allocate new Node objects for every node in self, to use 955 # in the new DT. Set their parents to None for now and leave 956 # them without any properties. We will recursively initialize 957 # copies of parents before copies of children next. 958 path2node_copy = { 959 node.path: Node(node.name, None, ret, node.filename, node.lineno) 960 for node in self.node_iter() 961 } 962 963 # Point each copy of a node to the copy of its parent and set up 964 # copies of each property. 965 # 966 # Share data when possible. For example, Property.value has 967 # type 'bytes', which is immutable. We therefore don't need a 968 # copy and can just point to the original data. 969 970 for node in self.node_iter(): 971 node_copy = path2node_copy[node.path] 972 973 parent = node.parent 974 if parent is not None: 975 node_copy.parent = path2node_copy[parent.path] 976 977 prop_name2prop_copy = { 978 prop.name: Property(node_copy, prop.name) 979 for prop in node.props.values() 980 } 981 for prop_name, prop_copy in prop_name2prop_copy.items(): 982 prop = node.props[prop_name] 983 prop_copy.value = prop.value 984 prop_copy.labels = prop.labels[:] 985 prop_copy.offset_labels = prop.offset_labels.copy() 986 prop_copy._label_offset_lst = prop._label_offset_lst[:] 987 prop_copy._markers = [marker[:] for marker in prop._markers] 988 prop_copy.filename = prop.filename 989 prop_copy.lineno = prop.lineno 990 node_copy.props = prop_name2prop_copy 991 992 node_copy.nodes = { 993 child_name: path2node_copy[child_node.path] 994 for child_name, child_node in node.nodes.items() 995 } 996 997 node_copy.labels = node.labels[:] 998 999 node_copy._omit_if_no_ref = node._omit_if_no_ref 1000 node_copy._is_referenced = node._is_referenced 1001 1002 # The copied nodes and properties are initialized, so 1003 # we can finish initializing the copied DT object now. 1004 1005 ret._root = path2node_copy['/'] 1006 1007 def copy_node_lookup_table(attr_name): 1008 original = getattr(self, attr_name) 1009 copy = { 1010 key: path2node_copy[original[key].path] 1011 for key in original 1012 } 1013 setattr(ret, attr_name, copy) 1014 1015 copy_node_lookup_table('alias2node') 1016 copy_node_lookup_table('label2node') 1017 copy_node_lookup_table('phandle2node') 1018 1019 ret_label2prop = {} 1020 for label, prop in self.label2prop.items(): 1021 node_copy = path2node_copy[prop.node.path] 1022 prop_copy = node_copy.props[prop.name] 1023 ret_label2prop[label] = prop_copy 1024 ret.label2prop = ret_label2prop 1025 1026 ret_label2prop_offset = {} 1027 for label, prop_offset in self.label2prop_offset.items(): 1028 prop, offset = prop_offset 1029 node_copy = path2node_copy[prop.node.path] 1030 prop_copy = node_copy.props[prop.name] 1031 ret_label2prop_offset[label] = (prop_copy, offset) 1032 ret.label2prop_offset = ret_label2prop_offset 1033 1034 ret.memreserves = [ 1035 (set(memreserve[0]), memreserve[1], memreserve[2]) 1036 for memreserve in self.memreserves 1037 ] 1038 1039 ret.filename = self.filename 1040 1041 return ret 1042 1043 # 1044 # Parsing 1045 # 1046 1047 def _parse_file(self, filename: str, include_path: Iterable[str]): 1048 self._include_path = list(include_path) 1049 1050 with open(filename, encoding="utf-8") as f: 1051 self._file_contents = f.read() 1052 1053 self._tok_i = self._tok_end_i = 0 1054 self._filestack: list[_FileStackElt] = [] 1055 1056 self._lexer_state: int = _DEFAULT 1057 self._saved_token: Optional[_Token] = None 1058 1059 self._lineno: int = 1 1060 1061 self._parse_header() 1062 self._parse_memreserves() 1063 self._parse_dt() 1064 1065 self._register_phandles() 1066 self._fixup_props() 1067 self._register_aliases() 1068 self._remove_unreferenced() 1069 self._register_labels() 1070 1071 def _parse_header(self): 1072 # Parses /dts-v1/ (expected) and /plugin/ (unsupported) at the start of 1073 # files. There may be multiple /dts-v1/ at the start of a file. 1074 1075 has_dts_v1 = False 1076 1077 while self._peek_token().id == _T.DTS_V1: 1078 has_dts_v1 = True 1079 self._next_token() 1080 self._expect_token(";") 1081 # /plugin/ always comes after /dts-v1/ 1082 if self._peek_token().id == _T.PLUGIN: 1083 self._parse_error("/plugin/ is not supported") 1084 1085 if not has_dts_v1: 1086 self._parse_error("expected '/dts-v1/;' at start of file") 1087 1088 def _parse_memreserves(self): 1089 # Parses /memreserve/, which appears after /dts-v1/ 1090 1091 while True: 1092 # Labels before /memreserve/ 1093 labels = [] 1094 while self._peek_token().id == _T.LABEL: 1095 _append_no_dup(labels, self._next_token().val) 1096 1097 if self._peek_token().id == _T.MEMRESERVE: 1098 self._next_token() 1099 self.memreserves.append( 1100 (labels, self._eval_prim(), self._eval_prim())) 1101 self._expect_token(";") 1102 elif labels: 1103 self._parse_error("expected /memreserve/ after labels at " 1104 "beginning of file") 1105 else: 1106 return 1107 1108 def _parse_dt(self): 1109 # Top-level parsing loop 1110 1111 while True: 1112 tok = self._next_token() 1113 1114 if tok.val == "/": 1115 # '/ { ... };', the root node 1116 if not self._root: 1117 self._root = Node( 1118 name="/", parent=None, dt=self, filename=self.filename, lineno=self._lineno 1119 ) 1120 self._parse_node(self.root) 1121 1122 elif tok.id in (_T.LABEL, _T.REF): 1123 # '&foo { ... };' or 'label: &foo { ... };'. The C tools only 1124 # support a single label here too. 1125 1126 if tok.id == _T.LABEL: 1127 label = tok.val 1128 tok = self._next_token() 1129 if tok.id != _T.REF: 1130 self._parse_error("expected label reference (&foo)") 1131 else: 1132 label = None 1133 1134 try: 1135 node = self._ref2node(tok.val) 1136 except DTError as e: 1137 self._parse_error(e) 1138 self._parse_node(node) 1139 1140 if label: 1141 _append_no_dup(node.labels, label) 1142 1143 elif tok.id == _T.DEL_NODE: 1144 self._next_ref2node()._del() 1145 self._expect_token(";") 1146 1147 elif tok.id == _T.OMIT_IF_NO_REF: 1148 self._next_ref2node()._omit_if_no_ref = True 1149 self._expect_token(";") 1150 1151 elif tok.id == _T.EOF: 1152 if not self._root: 1153 self._parse_error("no root node defined") 1154 return 1155 1156 else: 1157 self._parse_error("expected '/' or label reference (&foo)") 1158 1159 def _parse_node(self, node): 1160 # Parses the '{ ... };' part of 'node-name { ... };'. 1161 1162 # We need to track which child nodes were defined in this set 1163 # of curly braces in order to reject duplicate node names. 1164 current_child_names = set() 1165 1166 self._expect_token("{") 1167 while True: 1168 labels, omit_if_no_ref = self._parse_propnode_labels() 1169 tok = self._next_token() 1170 1171 if tok.id == _T.PROPNODENAME: 1172 if self._peek_token().val == "{": 1173 # '<tok> { ...', expect node 1174 1175 # Fetch the existing node if it already exists. This 1176 # happens when overriding nodes. 1177 child = node.nodes.get(tok.val) 1178 if child: 1179 if child.name in current_child_names: 1180 self._parse_error(f'{child.path}: duplicate node name') 1181 else: 1182 child = Node( 1183 name=tok.val, 1184 parent=node, 1185 dt=self, 1186 filename=self.filename, 1187 lineno=self._lineno, 1188 ) 1189 current_child_names.add(tok.val) 1190 1191 for label in labels: 1192 _append_no_dup(child.labels, label) 1193 1194 if omit_if_no_ref: 1195 child._omit_if_no_ref = True 1196 1197 node.nodes[child.name] = child 1198 self._parse_node(child) 1199 1200 else: 1201 # Not '<tok> { ...', expect property assignment 1202 1203 if omit_if_no_ref: 1204 self._parse_error( 1205 "/omit-if-no-ref/ can only be used on nodes") 1206 1207 prop = node._get_prop(tok.val) 1208 prop.filename = self.filename 1209 prop.lineno = self._lineno 1210 1211 if self._check_token("="): 1212 self._parse_assignment(prop) 1213 elif not self._check_token(";"): 1214 # ';' is for an empty property, like 'foo;' 1215 self._parse_error("expected '{', '=', or ';'") 1216 1217 for label in labels: 1218 _append_no_dup(prop.labels, label) 1219 1220 elif tok.id == _T.DEL_NODE: 1221 tok2 = self._next_token() 1222 if tok2.id != _T.PROPNODENAME: 1223 self._parse_error("expected node name") 1224 if tok2.val in node.nodes: 1225 node.nodes[tok2.val]._del() 1226 self._expect_token(";") 1227 1228 elif tok.id == _T.DEL_PROP: 1229 tok2 = self._next_token() 1230 if tok2.id != _T.PROPNODENAME: 1231 self._parse_error("expected property name") 1232 node.props.pop(tok2.val, None) 1233 self._expect_token(";") 1234 1235 elif tok.val == "}": 1236 self._expect_token(";") 1237 return 1238 1239 else: 1240 self._parse_error("expected node name, property name, or '}'") 1241 1242 def _parse_propnode_labels(self): 1243 # _parse_node() helpers for parsing labels and /omit-if-no-ref/s before 1244 # nodes and properties. Returns a (<label list>, <omit-if-no-ref bool>) 1245 # tuple. 1246 1247 labels = [] 1248 omit_if_no_ref = False 1249 while True: 1250 tok = self._peek_token() 1251 if tok.id == _T.LABEL: 1252 _append_no_dup(labels, tok.val) 1253 elif tok.id == _T.OMIT_IF_NO_REF: 1254 omit_if_no_ref = True 1255 elif (labels or omit_if_no_ref) and tok.id != _T.PROPNODENAME: 1256 # Got something like 'foo: bar: }' 1257 self._parse_error("expected node or property name") 1258 else: 1259 return labels, omit_if_no_ref 1260 1261 self._next_token() 1262 1263 def _parse_assignment(self, prop): 1264 # Parses the right-hand side of property assignment 1265 # 1266 # prop: 1267 # 'Property' instance being assigned 1268 1269 # Remove any old value, path/phandle references, and in-value labels, 1270 # in case the property value is being overridden 1271 prop.value = b"" 1272 prop._markers = [] 1273 1274 while True: 1275 # Parse labels before the value (e.g., '..., label: < 0 >') 1276 self._parse_value_labels(prop) 1277 1278 tok = self._next_token() 1279 1280 if tok.val == "<": 1281 self._parse_cells(prop, 4) 1282 1283 elif tok.id == _T.BITS: 1284 n_bits = self._expect_num() 1285 if n_bits not in {8, 16, 32, 64}: 1286 self._parse_error("expected 8, 16, 32, or 64") 1287 self._expect_token("<") 1288 self._parse_cells(prop, n_bits//8) 1289 1290 elif tok.val == "[": 1291 self._parse_bytes(prop) 1292 1293 elif tok.id == _T.STRING: 1294 prop._add_marker(_MarkerType.STRING) 1295 prop.value += self._unescape(tok.val.encode("utf-8")) + b"\0" 1296 1297 elif tok.id == _T.REF: 1298 prop._add_marker(_MarkerType.PATH, tok.val) 1299 1300 elif tok.id == _T.INCBIN: 1301 self._parse_incbin(prop) 1302 1303 else: 1304 self._parse_error("malformed value") 1305 1306 # Parse labels after the value (e.g., '< 0 > label:, ...') 1307 self._parse_value_labels(prop) 1308 1309 tok = self._next_token() 1310 if tok.val == ";": 1311 return 1312 if tok.val == ",": 1313 continue 1314 self._parse_error("expected ';' or ','") 1315 1316 def _parse_cells(self, prop, n_bytes): 1317 # Parses '<...>' 1318 1319 prop._add_marker(_N_BYTES_TO_TYPE[n_bytes]) 1320 1321 while True: 1322 tok = self._peek_token() 1323 if tok.id == _T.REF: 1324 self._next_token() 1325 if n_bytes != 4: 1326 self._parse_error("phandle references are only allowed in " 1327 "arrays with 32-bit elements") 1328 prop._add_marker(_MarkerType.PHANDLE, tok.val) 1329 1330 elif tok.id == _T.LABEL: 1331 prop._add_marker(_MarkerType.LABEL, tok.val) 1332 self._next_token() 1333 1334 elif self._check_token(">"): 1335 return 1336 1337 else: 1338 # Literal value 1339 num = self._eval_prim() 1340 try: 1341 prop.value += num.to_bytes(n_bytes, "big") 1342 except OverflowError: 1343 try: 1344 # Try again as a signed number, in case it's negative 1345 prop.value += num.to_bytes(n_bytes, "big", signed=True) 1346 except OverflowError: 1347 self._parse_error( 1348 f"{num} does not fit in {8*n_bytes} bits") 1349 1350 def _parse_bytes(self, prop): 1351 # Parses '[ ... ]' 1352 1353 prop._add_marker(_MarkerType.UINT8) 1354 1355 while True: 1356 tok = self._next_token() 1357 if tok.id == _T.BYTE: 1358 prop.value += tok.val.to_bytes(1, "big") 1359 1360 elif tok.id == _T.LABEL: 1361 prop._add_marker(_MarkerType.LABEL, tok.val) 1362 1363 elif tok.val == "]": 1364 return 1365 1366 else: 1367 self._parse_error("expected two-digit byte or ']'") 1368 1369 def _parse_incbin(self, prop): 1370 # Parses 1371 # 1372 # /incbin/ ("filename") 1373 # 1374 # and 1375 # 1376 # /incbin/ ("filename", <offset>, <size>) 1377 1378 prop._add_marker(_MarkerType.UINT8) 1379 1380 self._expect_token("(") 1381 1382 tok = self._next_token() 1383 if tok.id != _T.STRING: 1384 self._parse_error("expected quoted filename") 1385 filename = tok.val 1386 1387 tok = self._next_token() 1388 if tok.val == ",": 1389 offset = self._eval_prim() 1390 self._expect_token(",") 1391 size = self._eval_prim() 1392 self._expect_token(")") 1393 else: 1394 if tok.val != ")": 1395 self._parse_error("expected ',' or ')'") 1396 offset = None 1397 1398 try: 1399 with self._open(filename, "rb") as f: 1400 if offset is None: 1401 prop.value += f.read() 1402 else: 1403 f.seek(offset) 1404 prop.value += f.read(size) 1405 except OSError as e: 1406 self._parse_error(f"could not read '{filename}': {e}") 1407 1408 def _parse_value_labels(self, prop): 1409 # _parse_assignment() helper for parsing labels before/after each 1410 # comma-separated value 1411 1412 while True: 1413 tok = self._peek_token() 1414 if tok.id != _T.LABEL: 1415 return 1416 prop._add_marker(_MarkerType.LABEL, tok.val) 1417 self._next_token() 1418 1419 def _node_phandle(self, node): 1420 # Returns the phandle for Node 'node', creating a new phandle if the 1421 # node has no phandle, and fixing up the value for existing 1422 # self-referential phandles (which get set to b'\0\0\0\0' initially). 1423 # Self-referential phandles must be rewritten instead of recreated, so 1424 # that labels are preserved. 1425 1426 if "phandle" in node.props: 1427 phandle_prop = node.props["phandle"] 1428 else: 1429 phandle_prop = Property(node, "phandle") 1430 phandle_prop._add_marker(_MarkerType.UINT32) # For displaying 1431 phandle_prop.value = b'\0\0\0\0' 1432 1433 if phandle_prop.value == b'\0\0\0\0': 1434 phandle_i = 1 1435 while phandle_i in self.phandle2node: 1436 phandle_i += 1 1437 self.phandle2node[phandle_i] = node 1438 1439 phandle_prop.value = phandle_i.to_bytes(4, "big") 1440 node.props["phandle"] = phandle_prop 1441 1442 return phandle_prop.value 1443 1444 # Expression evaluation 1445 1446 def _eval_prim(self): 1447 tok = self._peek_token() 1448 if tok.id in (_T.NUM, _T.CHAR_LITERAL): 1449 return self._next_token().val 1450 1451 tok = self._next_token() 1452 if tok.val != "(": 1453 self._parse_error("expected number or parenthesized expression") 1454 val = self._eval_ternary() 1455 self._expect_token(")") 1456 return val 1457 1458 def _eval_ternary(self): 1459 val = self._eval_or() 1460 if self._check_token("?"): 1461 if_val = self._eval_ternary() 1462 self._expect_token(":") 1463 else_val = self._eval_ternary() 1464 return if_val if val else else_val 1465 return val 1466 1467 def _eval_or(self): 1468 val = self._eval_and() 1469 while self._check_token("||"): 1470 val = 1 if self._eval_and() or val else 0 1471 return val 1472 1473 def _eval_and(self): 1474 val = self._eval_bitor() 1475 while self._check_token("&&"): 1476 val = 1 if self._eval_bitor() and val else 0 1477 return val 1478 1479 def _eval_bitor(self): 1480 val = self._eval_bitxor() 1481 while self._check_token("|"): 1482 val |= self._eval_bitxor() 1483 return val 1484 1485 def _eval_bitxor(self): 1486 val = self._eval_bitand() 1487 while self._check_token("^"): 1488 val ^= self._eval_bitand() 1489 return val 1490 1491 def _eval_bitand(self): 1492 val = self._eval_eq() 1493 while self._check_token("&"): 1494 val &= self._eval_eq() 1495 return val 1496 1497 def _eval_eq(self): 1498 val = self._eval_rela() 1499 while True: 1500 if self._check_token("=="): 1501 val = 1 if val == self._eval_rela() else 0 1502 elif self._check_token("!="): 1503 val = 1 if val != self._eval_rela() else 0 1504 else: 1505 return val 1506 1507 def _eval_rela(self): 1508 val = self._eval_shift() 1509 while True: 1510 if self._check_token("<"): 1511 val = 1 if val < self._eval_shift() else 0 1512 elif self._check_token(">"): 1513 val = 1 if val > self._eval_shift() else 0 1514 elif self._check_token("<="): 1515 val = 1 if val <= self._eval_shift() else 0 1516 elif self._check_token(">="): 1517 val = 1 if val >= self._eval_shift() else 0 1518 else: 1519 return val 1520 1521 def _eval_shift(self): 1522 val = self._eval_add() 1523 while True: 1524 if self._check_token("<<"): 1525 val <<= self._eval_add() 1526 elif self._check_token(">>"): 1527 val >>= self._eval_add() 1528 else: 1529 return val 1530 1531 def _eval_add(self): 1532 val = self._eval_mul() 1533 while True: 1534 if self._check_token("+"): 1535 val += self._eval_mul() 1536 elif self._check_token("-"): 1537 val -= self._eval_mul() 1538 else: 1539 return val 1540 1541 def _eval_mul(self): 1542 val = self._eval_unary() 1543 while True: 1544 if self._check_token("*"): 1545 val *= self._eval_unary() 1546 elif self._check_token("/"): 1547 denom = self._eval_unary() 1548 if not denom: 1549 self._parse_error("division by zero") 1550 val //= denom 1551 elif self._check_token("%"): 1552 denom = self._eval_unary() 1553 if not denom: 1554 self._parse_error("division by zero") 1555 val %= denom 1556 else: 1557 return val 1558 1559 def _eval_unary(self): 1560 if self._check_token("-"): 1561 return -self._eval_unary() 1562 if self._check_token("~"): 1563 return ~self._eval_unary() 1564 if self._check_token("!"): 1565 return 0 if self._eval_unary() else 1 1566 return self._eval_prim() 1567 1568 # 1569 # Lexing 1570 # 1571 1572 def _check_token(self, val): 1573 if self._peek_token().val == val: 1574 self._next_token() 1575 return True 1576 return False 1577 1578 def _peek_token(self): 1579 if not self._saved_token: 1580 self._saved_token = self._next_token() 1581 return self._saved_token 1582 1583 def _next_token(self): 1584 if self._saved_token: 1585 tmp = self._saved_token 1586 self._saved_token = None 1587 return tmp 1588 1589 while True: 1590 tok_id = None 1591 1592 match = _token_re.match(self._file_contents, self._tok_end_i) 1593 if match: 1594 tok_id = match.lastindex 1595 if tok_id == _T.CHAR_LITERAL: 1596 val = self._unescape(match.group(tok_id).encode("utf-8")) 1597 if len(val) != 1: 1598 self._parse_error("character literals must be length 1") 1599 tok_val = ord(val) 1600 else: 1601 tok_val = match.group(tok_id) 1602 1603 elif self._lexer_state is _DEFAULT: 1604 match = _num_re.match(self._file_contents, self._tok_end_i) 1605 if match: 1606 tok_id = _T.NUM 1607 num_s = match.group(1) 1608 tok_val = int(num_s, 1609 16 if num_s.startswith(("0x", "0X")) else 1610 8 if num_s[0] == "0" else 1611 10) 1612 1613 elif self._lexer_state is _EXPECT_PROPNODENAME: 1614 match = _propnodename_re.match(self._file_contents, 1615 self._tok_end_i) 1616 if match: 1617 tok_id = _T.PROPNODENAME 1618 tok_val = match.group(1) 1619 self._lexer_state = _DEFAULT 1620 1621 else: # self._lexer_state is _EXPECT_BYTE 1622 match = _byte_re.match(self._file_contents, self._tok_end_i) 1623 if match: 1624 tok_id = _T.BYTE 1625 tok_val = int(match.group(), 16) 1626 1627 if not tok_id: 1628 match = _misc_re.match(self._file_contents, self._tok_end_i) 1629 if match: 1630 tok_id = _T.MISC 1631 tok_val = match.group() 1632 else: 1633 self._tok_i = self._tok_end_i 1634 # Could get here due to a node/property naming appearing in 1635 # an unexpected context as well as for bad characters in 1636 # files. Generate a token for it so that the error can 1637 # trickle up to some context where we can give a more 1638 # helpful error message. 1639 return _Token(_T.BAD, "<unknown token>") 1640 1641 self._tok_i = match.start() 1642 self._tok_end_i = match.end() 1643 1644 if tok_id == _T.SKIP: 1645 self._lineno += tok_val.count("\n") 1646 continue 1647 1648 # /include/ is handled in the lexer in the C tools as well, and can 1649 # appear anywhere 1650 if tok_id == _T.INCLUDE: 1651 # Can have newlines between /include/ and the filename 1652 self._lineno += tok_val.count("\n") 1653 # Do this manual extraction instead of doing it in the regex so 1654 # that we can properly count newlines 1655 filename = tok_val[tok_val.find('"') + 1:-1] 1656 self._enter_file(filename) 1657 continue 1658 1659 if tok_id == _T.LINE: 1660 # #line directive 1661 self._lineno = int(tok_val.split()[0]) - 1 1662 self.filename = tok_val[tok_val.find('"') + 1:-1] 1663 continue 1664 1665 if tok_id == _T.EOF: 1666 if self._filestack: 1667 self._leave_file() 1668 continue 1669 return _Token(_T.EOF, "<EOF>") 1670 1671 # State handling 1672 1673 if (tok_id in (_T.DEL_PROP, _T.DEL_NODE, _T.OMIT_IF_NO_REF) 1674 or tok_val in ("{", ";")): 1675 1676 self._lexer_state = _EXPECT_PROPNODENAME 1677 1678 elif tok_val == "[": 1679 self._lexer_state = _EXPECT_BYTE 1680 1681 elif tok_id in (_T.MEMRESERVE, _T.BITS) or tok_val == "]": 1682 self._lexer_state = _DEFAULT 1683 1684 return _Token(tok_id, tok_val) 1685 1686 def _expect_token(self, tok_val): 1687 # Raises an error if the next token does not have the string value 1688 # 'tok_val'. Returns the token. 1689 1690 tok = self._next_token() 1691 if tok.val != tok_val: 1692 self._parse_error(f"expected '{tok_val}', not '{tok.val}'") 1693 1694 return tok 1695 1696 def _expect_num(self): 1697 # Raises an error if the next token is not a number. Returns the token. 1698 1699 tok = self._next_token() 1700 if tok.id != _T.NUM: 1701 self._parse_error("expected number") 1702 return tok.val 1703 1704 def _parse_error(self, s): 1705 # This works out for the first line of the file too, where rfind() 1706 # returns -1 1707 column = self._tok_i - self._file_contents.rfind("\n", 0, 1708 self._tok_i + 1) 1709 _err(f"{self.filename}:{self._lineno} (column {column}): " 1710 f"parse error: {s}") 1711 1712 def _enter_file(self, filename): 1713 # Enters the /include/d file 'filename', remembering the position in 1714 # the /include/ing file for later 1715 1716 self._filestack.append( 1717 _FileStackElt(self.filename, self._lineno, 1718 self._file_contents, self._tok_end_i)) 1719 1720 # Handle escapes in filenames, just for completeness 1721 filename = self._unescape(filename.encode("utf-8")) 1722 try: 1723 filename = filename.decode("utf-8") 1724 except UnicodeDecodeError: 1725 self._parse_error("filename is not valid UTF-8") 1726 1727 with self._open(filename, encoding="utf-8") as f: 1728 try: 1729 self._file_contents = f.read() 1730 except OSError as e: 1731 self._parse_error(e) 1732 1733 # Check for recursive /include/ 1734 for i, parent in enumerate(self._filestack): 1735 if filename == parent[0]: 1736 self._parse_error("recursive /include/:\n" + " ->\n".join( 1737 [f"{parent[0]}:{parent[1]}" 1738 for parent in self._filestack[i:]] + 1739 [filename])) 1740 1741 self.filename = f.name 1742 self._lineno = 1 1743 self._tok_end_i = 0 1744 1745 def _leave_file(self): 1746 # Leaves an /include/d file, returning to the file that /include/d it 1747 1748 self.filename, self._lineno, self._file_contents, self._tok_end_i = ( 1749 self._filestack.pop()) 1750 1751 def _next_ref2node(self): 1752 # Checks that the next token is a label/path reference and returns the 1753 # Node it points to. Only used during parsing, so uses _parse_error() 1754 # on errors to save some code in callers. 1755 1756 label = self._next_token() 1757 if label.id != _T.REF: 1758 self._parse_error( 1759 "expected label (&foo) or path (&{/foo/bar}) reference") 1760 try: 1761 return self._ref2node(label.val) 1762 except DTError as e: 1763 self._parse_error(e) 1764 1765 def _ref2node(self, s): 1766 # Returns the Node the label/path reference 's' points to 1767 1768 if s[0] == "{": 1769 # Path reference (&{/foo/bar}) 1770 path = s[1:-1] 1771 if not path.startswith("/"): 1772 _err(f"node path '{path}' does not start with '/'") 1773 # Will raise DTError if the path doesn't exist 1774 return _root_and_path_to_node(self.root, path, path) 1775 1776 # Label reference (&foo). 1777 1778 # label2node hasn't been filled in yet, and using it would get messy 1779 # when nodes are deleted 1780 for node in self.node_iter(): 1781 if s in node.labels: 1782 return node 1783 1784 _err(f"undefined node label '{s}'") 1785 1786 # 1787 # Post-processing 1788 # 1789 1790 def _register_phandles(self): 1791 # Registers any manually-inserted phandle properties in 1792 # self.phandle2node, so that we can avoid allocating any phandles from 1793 # that set. Also checks the format of the phandles and does misc. 1794 # sanity checking. 1795 1796 for node in self.node_iter(): 1797 phandle = node.props.get("phandle") 1798 if phandle: 1799 if len(phandle.value) != 4: 1800 _err(f"{node.path}: bad phandle length " 1801 f"({len(phandle.value)}), expected 4 bytes") 1802 1803 is_self_referential = False 1804 for marker in phandle._markers: 1805 _, marker_type, ref = marker 1806 if marker_type is _MarkerType.PHANDLE: 1807 # The phandle's value is itself a phandle reference 1808 if self._ref2node(ref) is node: 1809 # Alright to set a node's phandle equal to its own 1810 # phandle. It'll force a new phandle to be 1811 # allocated even if the node is otherwise 1812 # unreferenced. 1813 is_self_referential = True 1814 break 1815 1816 _err(f"{node.path}: {phandle.name} " 1817 "refers to another node") 1818 1819 # Could put on else on the 'for' above too, but keep it 1820 # somewhat readable 1821 if not is_self_referential: 1822 phandle_val = int.from_bytes(phandle.value, "big") 1823 1824 if phandle_val in {0, 0xFFFFFFFF}: 1825 _err(f"{node.path}: bad value {phandle_val:#010x} " 1826 f"for {phandle.name}") 1827 1828 if phandle_val in self.phandle2node: 1829 _err(f"{node.path}: duplicated phandle {phandle_val:#x} " 1830 "(seen before at " 1831 f"{self.phandle2node[phandle_val].path})") 1832 1833 self.phandle2node[phandle_val] = node 1834 1835 def _fixup_props(self): 1836 # Fills in node path and phandle references in property values, and 1837 # registers labels within values. This must be done after parsing, 1838 # since forwards references are allowed and nodes and properties might 1839 # be deleted. 1840 1841 for node in self.node_iter(): 1842 # The tuple() avoids a 'dictionary changed size during iteration' 1843 # error 1844 for prop in tuple(node.props.values()): 1845 # 'prev_pos' and 'pos' are indices in the unpatched 1846 # property value. The result is built up in 'res'. 1847 prev_pos = 0 1848 res = b"" 1849 1850 for marker in prop._markers: 1851 pos, marker_type, ref = marker 1852 1853 # Add data before the marker, reading from the unpatched 1854 # property value 1855 res += prop.value[prev_pos:pos] 1856 1857 # Fix the marker offset so that it's correct for the 1858 # patched property value, for later (not used in this 1859 # function). The offset might change due to path 1860 # references, which expand to something like "/foo/bar". 1861 marker[0] = len(res) 1862 1863 if marker_type is _MarkerType.LABEL: 1864 # This is a temporary format so that we can catch 1865 # duplicate references. prop._label_offset_lst is changed 1866 # to a dictionary that maps labels to offsets in 1867 # _register_labels(). 1868 _append_no_dup(prop._label_offset_lst, (ref, len(res))) 1869 elif marker_type in (_MarkerType.PATH, _MarkerType.PHANDLE): 1870 # Path or phandle reference 1871 try: 1872 ref_node = self._ref2node(ref) 1873 except DTError as e: 1874 _err(f"{prop.node.path}: {e}") 1875 1876 # For /omit-if-no-ref/ 1877 ref_node._is_referenced = True 1878 1879 if marker_type is _MarkerType.PATH: 1880 res += ref_node.path.encode("utf-8") + b'\0' 1881 else: # marker_type is PHANDLE 1882 res += self._node_phandle(ref_node) 1883 # Skip over the dummy phandle placeholder 1884 pos += 4 1885 1886 prev_pos = pos 1887 1888 # Store the final fixed-up value. Add the data after the last 1889 # marker. 1890 prop.value = res + prop.value[prev_pos:] 1891 1892 def _register_aliases(self): 1893 # Registers aliases from the /aliases node in self.alias2node. Also 1894 # checks the format of the alias properties. 1895 1896 # We copy this to self.alias2node at the end to avoid get_node() 1897 # looking up paths via other aliases while verifying aliases 1898 alias2node = {} 1899 1900 alias_re = re.compile("[0-9a-z-]+$") 1901 1902 aliases = self.root.nodes.get("aliases") 1903 if aliases: 1904 for prop in aliases.props.values(): 1905 if not alias_re.match(prop.name): 1906 _err(f"/aliases: alias property name '{prop.name}' " 1907 "should include only characters from [0-9a-z-]") 1908 1909 # Property.to_path() checks that the node exists, has 1910 # the right type, etc. Swallow errors for invalid 1911 # aliases with self._force. 1912 try: 1913 alias2node[prop.name] = prop.to_path() 1914 except DTError: 1915 if self._force: 1916 continue 1917 raise 1918 1919 self.alias2node = alias2node 1920 1921 def _remove_unreferenced(self): 1922 # Removes any unreferenced nodes marked with /omit-if-no-ref/ from the 1923 # tree 1924 1925 # tuple() is to avoid 'RuntimeError: dictionary changed size during 1926 # iteration' errors 1927 for node in tuple(self.node_iter()): 1928 if node._omit_if_no_ref and not node._is_referenced: 1929 node._del() 1930 1931 def _register_labels(self): 1932 # Checks for duplicate labels and registers labels in label2node, 1933 # label2prop, and label2prop_offset 1934 1935 label2things = collections.defaultdict(set) 1936 1937 # Register all labels and the nodes/props they point to in label2things 1938 for node in self.node_iter(): 1939 for label in node.labels: 1940 label2things[label].add(node) 1941 self.label2node[label] = node 1942 1943 for prop in node.props.values(): 1944 for label in prop.labels: 1945 label2things[label].add(prop) 1946 self.label2prop[label] = prop 1947 1948 for label, offset in prop._label_offset_lst: 1949 label2things[label].add((prop, offset)) 1950 self.label2prop_offset[label] = (prop, offset) 1951 1952 # See _fixup_props() 1953 prop.offset_labels = dict(prop._label_offset_lst) 1954 1955 for label, things in label2things.items(): 1956 if len(things) > 1: 1957 strings = [] 1958 for thing in things: 1959 if isinstance(thing, Node): 1960 strings.append(f"on {thing.path}") 1961 elif isinstance(thing, Property): 1962 strings.append(f"on property '{thing.name}' " 1963 f"of node {thing.node.path}") 1964 else: 1965 # Label within property value 1966 strings.append("in the value of property " 1967 f"'{thing[0].name}' of node " 1968 f"{thing[0].node.path}") 1969 1970 # Give consistent error messages to help with testing 1971 strings.sort() 1972 1973 _err(f"Label '{label}' appears " + " and ".join(strings)) 1974 1975 1976 # 1977 # Misc. 1978 # 1979 1980 def _unescape(self, b): 1981 # Replaces backslash escapes in the 'bytes' array 'b'. We can't do this at 1982 # the string level, because the result might not be valid UTF-8 when 1983 # octal/hex escapes are involved. 1984 1985 def sub(match): 1986 esc = match.group(1) 1987 if esc == b"a": return b"\a" 1988 if esc == b"b": return b"\b" 1989 if esc == b"t": return b"\t" 1990 if esc == b"n": return b"\n" 1991 if esc == b"v": return b"\v" 1992 if esc == b"f": return b"\f" 1993 if esc == b"r": return b"\r" 1994 1995 if esc[0] in b"01234567": 1996 # Octal escape 1997 try: 1998 return int(esc, 8).to_bytes(1, "big") 1999 except OverflowError: 2000 self._parse_error("octal escape out of range (> 255)") 2001 2002 if esc[0] == ord("x") and len(esc) > 1: 2003 # Hex escape 2004 return int(esc[1:], 16).to_bytes(1, "big") 2005 2006 # Return <char> as-is for other \<char> 2007 return esc[0].to_bytes(1, "big") 2008 2009 return _unescape_re.sub(sub, b) 2010 2011 def _open(self, filename, mode="r", **kwargs): 2012 # Wrapper around standard Python open(), accepting the same params. 2013 # But searches for a 'filename' file in the directory of the current 2014 # file and the include path. 2015 2016 # The C tools support specifying stdin with '-' too 2017 if filename == "-": 2018 return sys.stdin.buffer if "b" in mode else sys.stdin 2019 2020 # Try the directory of the current file first 2021 dirname = os.path.dirname(self.filename) 2022 try: 2023 return open(os.path.join(dirname, filename), mode, **kwargs) 2024 except OSError as e: 2025 if e.errno != errno.ENOENT: 2026 self._parse_error(e) 2027 2028 # Try each directory from the include path 2029 for path in self._include_path: 2030 try: 2031 return open(os.path.join(path, filename), mode, **kwargs) 2032 except OSError as e: 2033 if e.errno != errno.ENOENT: 2034 self._parse_error(e) 2035 continue 2036 2037 self._parse_error(f"'{filename}' could not be found") 2038 2039# 2040# Public functions 2041# 2042 2043def to_num(data: bytes, length: Optional[int] = None, 2044 signed: bool = False) -> int: 2045 """ 2046 Converts the 'bytes' array 'data' to a number. The value is expected to be 2047 in big-endian format, which is standard in devicetree. 2048 2049 length (default: None): 2050 The expected length of the value in bytes, as a simple type check. If 2051 None, the length check is skipped. 2052 2053 signed (default: False): 2054 If True, the value will be interpreted as signed rather than unsigned. 2055 """ 2056 _check_is_bytes(data) 2057 if length is not None: 2058 _check_length_positive(length) 2059 if len(data) != length: 2060 _err(f"{data!r} is {len(data)} bytes long, expected {length}") 2061 2062 return int.from_bytes(data, "big", signed=signed) 2063 2064def to_nums(data: bytes, length: int = 4, signed: bool = False) -> list[int]: 2065 """ 2066 Like Property.to_nums(), but takes an arbitrary 'bytes' array. The values 2067 are assumed to be in big-endian format, which is standard in devicetree. 2068 """ 2069 _check_is_bytes(data) 2070 _check_length_positive(length) 2071 2072 if len(data) % length: 2073 _err(f"{data!r} is {len(data)} bytes long, " 2074 f"expected a length that's a multiple of {length}") 2075 2076 return [int.from_bytes(data[i:i + length], "big", signed=signed) 2077 for i in range(0, len(data), length)] 2078 2079# 2080# Private helpers 2081# 2082 2083def _check_is_bytes(data): 2084 if not isinstance(data, bytes): 2085 _err(f"'{data}' has type '{type(data).__name__}', expected 'bytes'") 2086 2087def _check_length_positive(length): 2088 if length < 1: 2089 _err("'length' must be greater than zero, was " + str(length)) 2090 2091def _append_no_dup(lst, elm): 2092 # Appends 'elm' to 'lst', but only if it isn't already in 'lst'. Lets us 2093 # preserve order, which a set() doesn't. 2094 2095 if elm not in lst: 2096 lst.append(elm) 2097 2098def _decode_and_escape(b): 2099 # Decodes the 'bytes' array 'b' as UTF-8 and backslash-escapes special 2100 # characters 2101 2102 # Hacky but robust way to avoid double-escaping any '\' spit out by 2103 # 'backslashreplace' bytes.translate() can't map to more than a single 2104 # byte, but str.translate() can map to more than one character, so it's 2105 # nice here. There's probably a nicer way to do this. 2106 return (b.decode("utf-8", "surrogateescape") 2107 .translate(_escape_table) 2108 .encode("utf-8", "surrogateescape") 2109 .decode("utf-8", "backslashreplace")) 2110 2111def _root_and_path_to_node(cur, path, fullpath): 2112 # Returns the node pointed at by 'path', relative to the Node 'cur'. For 2113 # example, if 'cur' has path /foo/bar, and 'path' is "baz/qaz", then the 2114 # node with path /foo/bar/baz/qaz is returned. 'fullpath' is the path as 2115 # given in the .dts file, for error messages. 2116 2117 for component in path.split("/"): 2118 # Collapse multiple / in a row, and allow a / at the end 2119 if not component: 2120 continue 2121 2122 if component not in cur.nodes: 2123 _err(f"component '{component}' in path '{fullpath}' " 2124 "does not exist") 2125 2126 cur = cur.nodes[component] 2127 2128 return cur 2129 2130def _err(msg) -> NoReturn: 2131 raise DTError(msg) 2132 2133_escape_table = str.maketrans({ 2134 "\\": "\\\\", 2135 '"': '\\"', 2136 "\a": "\\a", 2137 "\b": "\\b", 2138 "\t": "\\t", 2139 "\n": "\\n", 2140 "\v": "\\v", 2141 "\f": "\\f", 2142 "\r": "\\r"}) 2143 2144# Lexer states 2145_DEFAULT = 0 2146_EXPECT_PROPNODENAME = 1 2147_EXPECT_BYTE = 2 2148 2149_num_re = re.compile(r"(0[xX][0-9a-fA-F]+|[0-9]+)(?:ULL|UL|LL|U|L)?") 2150 2151# A leading \ is allowed property and node names, probably to allow weird node 2152# names that would clash with other stuff 2153_propnodename_re = re.compile(r"\\?([a-zA-Z0-9,._+*#?@-]+)") 2154 2155# Node names are more restrictive than property names. 2156_nodename_chars = set(string.ascii_letters + string.digits + ',._+-@') 2157 2158# Misc. tokens that are tried after a property/node name. This is important, as 2159# there's overlap with the allowed characters in names. 2160_misc_re = re.compile( 2161 "|".join(re.escape(pat) for pat in ( 2162 "==", "!=", "!", "=", ",", ";", "+", "-", "*", "/", "%", "~", "?", ":", 2163 "^", "(", ")", "{", "}", "[", "]", "<<", "<=", "<", ">>", ">=", ">", 2164 "||", "|", "&&", "&"))) 2165 2166_byte_re = re.compile(r"[0-9a-fA-F]{2}") 2167 2168# Matches a backslash escape within a 'bytes' array. Captures the 'c' part of 2169# '\c', where c might be a single character or an octal/hex escape. 2170_unescape_re = re.compile(br'\\([0-7]{1,3}|x[0-9A-Fa-f]{1,2}|.)') 2171 2172def _init_tokens(): 2173 # Builds a (<token 1>)|(<token 2>)|... regex and returns it. The 2174 # way this is constructed makes the token's value as an int appear 2175 # in match.lastindex after a match. 2176 2177 # Each pattern must have exactly one capturing group, which can capture any 2178 # part of the pattern. This makes match.lastindex match the token type. 2179 # _Token.val is based on the captured string. 2180 token_spec = { 2181 _T.INCLUDE: r'(/include/\s*"(?:[^\\"]|\\.)*")', 2182 # #line directive or GCC linemarker 2183 _T.LINE: 2184 r'^#(?:line)?[ \t]+([0-9]+[ \t]+"(?:[^\\"]|\\.)*")(?:[ \t]+[0-9]+){0,4}', 2185 2186 _T.STRING: r'"((?:[^\\"]|\\.)*)"', 2187 _T.DTS_V1: r"(/dts-v1/)", 2188 _T.PLUGIN: r"(/plugin/)", 2189 _T.MEMRESERVE: r"(/memreserve/)", 2190 _T.BITS: r"(/bits/)", 2191 _T.DEL_PROP: r"(/delete-property/)", 2192 _T.DEL_NODE: r"(/delete-node/)", 2193 _T.OMIT_IF_NO_REF: r"(/omit-if-no-ref/)", 2194 _T.LABEL: r"([a-zA-Z_][a-zA-Z0-9_]*):", 2195 _T.CHAR_LITERAL: r"'((?:[^\\']|\\.)*)'", 2196 _T.REF: r"&([a-zA-Z_][a-zA-Z0-9_]*|{[a-zA-Z0-9,._+*#?@/-]*})", 2197 _T.INCBIN: r"(/incbin/)", 2198 # Whitespace, C comments, and C++ comments 2199 _T.SKIP: r"(\s+|(?:/\*(?:.|\n)*?\*/)|//.*$)", 2200 # Return a token for end-of-file so that the parsing code can 2201 # always assume that there are more tokens when looking 2202 # ahead. This simplifies things. 2203 _T.EOF: r"(\Z)", 2204 } 2205 2206 # MULTILINE is needed for C++ comments and #line directives 2207 return re.compile("|".join(token_spec[tok_id] for tok_id in 2208 range(1, _T.EOF + 1)), 2209 re.MULTILINE | re.ASCII) 2210 2211_token_re = _init_tokens() 2212 2213_TYPE_TO_N_BYTES = { 2214 _MarkerType.UINT8: 1, 2215 _MarkerType.UINT16: 2, 2216 _MarkerType.UINT32: 4, 2217 _MarkerType.UINT64: 8, 2218} 2219 2220_N_BYTES_TO_TYPE = { 2221 1: _MarkerType.UINT8, 2222 2: _MarkerType.UINT16, 2223 4: _MarkerType.UINT32, 2224 8: _MarkerType.UINT64, 2225} 2226 2227_N_BYTES_TO_START_STR = { 2228 1: " [", 2229 2: " /bits/ 16 <", 2230 4: " <", 2231 8: " /bits/ 64 <", 2232} 2233 2234_N_BYTES_TO_END_STR = { 2235 1: " ]", 2236 2: " >", 2237 4: " >", 2238 8: " >", 2239} 2240