1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions for working with the Flattened Device Tree data format
4 *
5 * Copyright 2009 Benjamin Herrenschmidt, IBM Corp
6 * benh@kernel.crashing.org
7 */
8
9 #define pr_fmt(fmt) "OF: fdt: " fmt
10
11 #include <linux/crash_dump.h>
12 #include <linux/crc32.h>
13 #include <linux/kernel.h>
14 #include <linux/initrd.h>
15 #include <linux/memblock.h>
16 #include <linux/mutex.h>
17 #include <linux/of.h>
18 #include <linux/of_fdt.h>
19 #include <linux/of_reserved_mem.h>
20 #include <linux/sizes.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/slab.h>
24 #include <linux/libfdt.h>
25 #include <linux/debugfs.h>
26 #include <linux/serial_core.h>
27 #include <linux/sysfs.h>
28 #include <linux/random.h>
29 #include <linux/kmemleak.h>
30
31 #include <asm/setup.h> /* for COMMAND_LINE_SIZE */
32 #include <asm/page.h>
33
34 #include "of_private.h"
35
36 /*
37 * of_fdt_limit_memory - limit the number of regions in the /memory node
38 * @limit: maximum entries
39 *
40 * Adjust the flattened device tree to have at most 'limit' number of
41 * memory entries in the /memory node. This function may be called
42 * any time after initial_boot_param is set.
43 */
of_fdt_limit_memory(int limit)44 void __init of_fdt_limit_memory(int limit)
45 {
46 int memory;
47 int len;
48 const void *val;
49 int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
50 int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
51 const __be32 *addr_prop;
52 const __be32 *size_prop;
53 int root_offset;
54 int cell_size;
55
56 root_offset = fdt_path_offset(initial_boot_params, "/");
57 if (root_offset < 0)
58 return;
59
60 addr_prop = fdt_getprop(initial_boot_params, root_offset,
61 "#address-cells", NULL);
62 if (addr_prop)
63 nr_address_cells = fdt32_to_cpu(*addr_prop);
64
65 size_prop = fdt_getprop(initial_boot_params, root_offset,
66 "#size-cells", NULL);
67 if (size_prop)
68 nr_size_cells = fdt32_to_cpu(*size_prop);
69
70 cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
71
72 memory = fdt_path_offset(initial_boot_params, "/memory");
73 if (memory > 0) {
74 val = fdt_getprop(initial_boot_params, memory, "reg", &len);
75 if (len > limit*cell_size) {
76 len = limit*cell_size;
77 pr_debug("Limiting number of entries to %d\n", limit);
78 fdt_setprop(initial_boot_params, memory, "reg", val,
79 len);
80 }
81 }
82 }
83
of_fdt_device_is_available(const void * blob,unsigned long node)84 static bool of_fdt_device_is_available(const void *blob, unsigned long node)
85 {
86 const char *status = fdt_getprop(blob, node, "status", NULL);
87
88 if (!status)
89 return true;
90
91 if (!strcmp(status, "ok") || !strcmp(status, "okay"))
92 return true;
93
94 return false;
95 }
96
unflatten_dt_alloc(void ** mem,unsigned long size,unsigned long align)97 static void *unflatten_dt_alloc(void **mem, unsigned long size,
98 unsigned long align)
99 {
100 void *res;
101
102 *mem = PTR_ALIGN(*mem, align);
103 res = *mem;
104 *mem += size;
105
106 return res;
107 }
108
populate_properties(const void * blob,int offset,void ** mem,struct device_node * np,const char * nodename,bool dryrun)109 static void populate_properties(const void *blob,
110 int offset,
111 void **mem,
112 struct device_node *np,
113 const char *nodename,
114 bool dryrun)
115 {
116 struct property *pp, **pprev = NULL;
117 int cur;
118 bool has_name = false;
119
120 pprev = &np->properties;
121 for (cur = fdt_first_property_offset(blob, offset);
122 cur >= 0;
123 cur = fdt_next_property_offset(blob, cur)) {
124 const __be32 *val;
125 const char *pname;
126 u32 sz;
127
128 val = fdt_getprop_by_offset(blob, cur, &pname, &sz);
129 if (!val) {
130 pr_warn("Cannot locate property at 0x%x\n", cur);
131 continue;
132 }
133
134 if (!pname) {
135 pr_warn("Cannot find property name at 0x%x\n", cur);
136 continue;
137 }
138
139 if (!strcmp(pname, "name"))
140 has_name = true;
141
142 pp = unflatten_dt_alloc(mem, sizeof(struct property),
143 __alignof__(struct property));
144 if (dryrun)
145 continue;
146
147 /* We accept flattened tree phandles either in
148 * ePAPR-style "phandle" properties, or the
149 * legacy "linux,phandle" properties. If both
150 * appear and have different values, things
151 * will get weird. Don't do that.
152 */
153 if (!strcmp(pname, "phandle") ||
154 !strcmp(pname, "linux,phandle")) {
155 if (!np->phandle)
156 np->phandle = be32_to_cpup(val);
157 }
158
159 /* And we process the "ibm,phandle" property
160 * used in pSeries dynamic device tree
161 * stuff
162 */
163 if (!strcmp(pname, "ibm,phandle"))
164 np->phandle = be32_to_cpup(val);
165
166 pp->name = (char *)pname;
167 pp->length = sz;
168 pp->value = (__be32 *)val;
169 *pprev = pp;
170 pprev = &pp->next;
171 }
172
173 /* With version 0x10 we may not have the name property,
174 * recreate it here from the unit name if absent
175 */
176 if (!has_name) {
177 const char *p = nodename, *ps = p, *pa = NULL;
178 int len;
179
180 while (*p) {
181 if ((*p) == '@')
182 pa = p;
183 else if ((*p) == '/')
184 ps = p + 1;
185 p++;
186 }
187
188 if (pa < ps)
189 pa = p;
190 len = (pa - ps) + 1;
191 pp = unflatten_dt_alloc(mem, sizeof(struct property) + len,
192 __alignof__(struct property));
193 if (!dryrun) {
194 pp->name = "name";
195 pp->length = len;
196 pp->value = pp + 1;
197 *pprev = pp;
198 memcpy(pp->value, ps, len - 1);
199 ((char *)pp->value)[len - 1] = 0;
200 pr_debug("fixed up name for %s -> %s\n",
201 nodename, (char *)pp->value);
202 }
203 }
204 }
205
populate_node(const void * blob,int offset,void ** mem,struct device_node * dad,struct device_node ** pnp,bool dryrun)206 static int populate_node(const void *blob,
207 int offset,
208 void **mem,
209 struct device_node *dad,
210 struct device_node **pnp,
211 bool dryrun)
212 {
213 struct device_node *np;
214 const char *pathp;
215 int len;
216
217 pathp = fdt_get_name(blob, offset, &len);
218 if (!pathp) {
219 *pnp = NULL;
220 return len;
221 }
222
223 len++;
224
225 np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len,
226 __alignof__(struct device_node));
227 if (!dryrun) {
228 char *fn;
229 of_node_init(np);
230 np->full_name = fn = ((char *)np) + sizeof(*np);
231
232 memcpy(fn, pathp, len);
233
234 if (dad != NULL) {
235 np->parent = dad;
236 np->sibling = dad->child;
237 dad->child = np;
238 }
239 }
240
241 populate_properties(blob, offset, mem, np, pathp, dryrun);
242 if (!dryrun) {
243 np->name = of_get_property(np, "name", NULL);
244 if (!np->name)
245 np->name = "<NULL>";
246 }
247
248 *pnp = np;
249 return 0;
250 }
251
reverse_nodes(struct device_node * parent)252 static void reverse_nodes(struct device_node *parent)
253 {
254 struct device_node *child, *next;
255
256 /* In-depth first */
257 child = parent->child;
258 while (child) {
259 reverse_nodes(child);
260
261 child = child->sibling;
262 }
263
264 /* Reverse the nodes in the child list */
265 child = parent->child;
266 parent->child = NULL;
267 while (child) {
268 next = child->sibling;
269
270 child->sibling = parent->child;
271 parent->child = child;
272 child = next;
273 }
274 }
275
276 /**
277 * unflatten_dt_nodes - Alloc and populate a device_node from the flat tree
278 * @blob: The parent device tree blob
279 * @mem: Memory chunk to use for allocating device nodes and properties
280 * @dad: Parent struct device_node
281 * @nodepp: The device_node tree created by the call
282 *
283 * Return: The size of unflattened device tree or error code
284 */
unflatten_dt_nodes(const void * blob,void * mem,struct device_node * dad,struct device_node ** nodepp)285 static int unflatten_dt_nodes(const void *blob,
286 void *mem,
287 struct device_node *dad,
288 struct device_node **nodepp)
289 {
290 struct device_node *root;
291 int offset = 0, depth = 0, initial_depth = 0;
292 #define FDT_MAX_DEPTH 64
293 struct device_node *nps[FDT_MAX_DEPTH];
294 void *base = mem;
295 bool dryrun = !base;
296 int ret;
297
298 if (nodepp)
299 *nodepp = NULL;
300
301 /*
302 * We're unflattening device sub-tree if @dad is valid. There are
303 * possibly multiple nodes in the first level of depth. We need
304 * set @depth to 1 to make fdt_next_node() happy as it bails
305 * immediately when negative @depth is found. Otherwise, the device
306 * nodes except the first one won't be unflattened successfully.
307 */
308 if (dad)
309 depth = initial_depth = 1;
310
311 root = dad;
312 nps[depth] = dad;
313
314 for (offset = 0;
315 offset >= 0 && depth >= initial_depth;
316 offset = fdt_next_node(blob, offset, &depth)) {
317 if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
318 continue;
319
320 if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
321 !of_fdt_device_is_available(blob, offset))
322 continue;
323
324 ret = populate_node(blob, offset, &mem, nps[depth],
325 &nps[depth+1], dryrun);
326 if (ret < 0)
327 return ret;
328
329 if (!dryrun && nodepp && !*nodepp)
330 *nodepp = nps[depth+1];
331 if (!dryrun && !root)
332 root = nps[depth+1];
333 }
334
335 if (offset < 0 && offset != -FDT_ERR_NOTFOUND) {
336 pr_err("Error %d processing FDT\n", offset);
337 return -EINVAL;
338 }
339
340 /*
341 * Reverse the child list. Some drivers assumes node order matches .dts
342 * node order
343 */
344 if (!dryrun)
345 reverse_nodes(root);
346
347 return mem - base;
348 }
349
350 /**
351 * __unflatten_device_tree - create tree of device_nodes from flat blob
352 * @blob: The blob to expand
353 * @dad: Parent device node
354 * @mynodes: The device_node tree created by the call
355 * @dt_alloc: An allocator that provides a virtual address to memory
356 * for the resulting tree
357 * @detached: if true set OF_DETACHED on @mynodes
358 *
359 * unflattens a device-tree, creating the tree of struct device_node. It also
360 * fills the "name" and "type" pointers of the nodes so the normal device-tree
361 * walking functions can be used.
362 *
363 * Return: NULL on failure or the memory chunk containing the unflattened
364 * device tree on success.
365 */
__unflatten_device_tree(const void * blob,struct device_node * dad,struct device_node ** mynodes,void * (* dt_alloc)(u64 size,u64 align),bool detached)366 void *__unflatten_device_tree(const void *blob,
367 struct device_node *dad,
368 struct device_node **mynodes,
369 void *(*dt_alloc)(u64 size, u64 align),
370 bool detached)
371 {
372 int size;
373 void *mem;
374 int ret;
375
376 if (mynodes)
377 *mynodes = NULL;
378
379 pr_debug(" -> unflatten_device_tree()\n");
380
381 if (!blob) {
382 pr_debug("No device tree pointer\n");
383 return NULL;
384 }
385
386 pr_debug("Unflattening device tree:\n");
387 pr_debug("magic: %08x\n", fdt_magic(blob));
388 pr_debug("size: %08x\n", fdt_totalsize(blob));
389 pr_debug("version: %08x\n", fdt_version(blob));
390
391 if (fdt_check_header(blob)) {
392 pr_err("Invalid device tree blob header\n");
393 return NULL;
394 }
395
396 /* First pass, scan for size */
397 size = unflatten_dt_nodes(blob, NULL, dad, NULL);
398 if (size <= 0)
399 return NULL;
400
401 size = ALIGN(size, 4);
402 pr_debug(" size is %d, allocating...\n", size);
403
404 /* Allocate memory for the expanded device tree */
405 mem = dt_alloc(size + 4, __alignof__(struct device_node));
406 if (!mem)
407 return NULL;
408
409 memset(mem, 0, size);
410
411 *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
412
413 pr_debug(" unflattening %p...\n", mem);
414
415 /* Second pass, do actual unflattening */
416 ret = unflatten_dt_nodes(blob, mem, dad, mynodes);
417
418 if (be32_to_cpup(mem + size) != 0xdeadbeef)
419 pr_warn("End of tree marker overwritten: %08x\n",
420 be32_to_cpup(mem + size));
421
422 if (ret <= 0)
423 return NULL;
424
425 if (detached && mynodes && *mynodes) {
426 of_node_set_flag(*mynodes, OF_DETACHED);
427 pr_debug("unflattened tree is detached\n");
428 }
429
430 pr_debug(" <- unflatten_device_tree()\n");
431 return mem;
432 }
433
kernel_tree_alloc(u64 size,u64 align)434 static void *kernel_tree_alloc(u64 size, u64 align)
435 {
436 return kzalloc(size, GFP_KERNEL);
437 }
438
439 static DEFINE_MUTEX(of_fdt_unflatten_mutex);
440
441 /**
442 * of_fdt_unflatten_tree - create tree of device_nodes from flat blob
443 * @blob: Flat device tree blob
444 * @dad: Parent device node
445 * @mynodes: The device tree created by the call
446 *
447 * unflattens the device-tree passed by the firmware, creating the
448 * tree of struct device_node. It also fills the "name" and "type"
449 * pointers of the nodes so the normal device-tree walking functions
450 * can be used.
451 *
452 * Return: NULL on failure or the memory chunk containing the unflattened
453 * device tree on success.
454 */
of_fdt_unflatten_tree(const unsigned long * blob,struct device_node * dad,struct device_node ** mynodes)455 void *of_fdt_unflatten_tree(const unsigned long *blob,
456 struct device_node *dad,
457 struct device_node **mynodes)
458 {
459 void *mem;
460
461 mutex_lock(&of_fdt_unflatten_mutex);
462 mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc,
463 true);
464 mutex_unlock(&of_fdt_unflatten_mutex);
465
466 return mem;
467 }
468 EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
469
470 /* Everything below here references initial_boot_params directly. */
471 int __initdata dt_root_addr_cells;
472 int __initdata dt_root_size_cells;
473
474 void *initial_boot_params __ro_after_init;
475
476 #ifdef CONFIG_OF_EARLY_FLATTREE
477
478 static u32 of_fdt_crc32;
479
early_init_dt_reserve_memory(phys_addr_t base,phys_addr_t size,bool nomap)480 static int __init early_init_dt_reserve_memory(phys_addr_t base,
481 phys_addr_t size, bool nomap)
482 {
483 if (nomap) {
484 /*
485 * If the memory is already reserved (by another region), we
486 * should not allow it to be marked nomap, but don't worry
487 * if the region isn't memory as it won't be mapped.
488 */
489 if (memblock_overlaps_region(&memblock.memory, base, size) &&
490 memblock_is_region_reserved(base, size))
491 return -EBUSY;
492
493 return memblock_mark_nomap(base, size);
494 }
495 return memblock_reserve(base, size);
496 }
497
498 /*
499 * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property
500 */
__reserved_mem_reserve_reg(unsigned long node,const char * uname)501 static int __init __reserved_mem_reserve_reg(unsigned long node,
502 const char *uname)
503 {
504 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
505 phys_addr_t base, size;
506 int len;
507 const __be32 *prop;
508 int first = 1;
509 bool nomap;
510
511 prop = of_get_flat_dt_prop(node, "reg", &len);
512 if (!prop)
513 return -ENOENT;
514
515 if (len && len % t_len != 0) {
516 pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
517 uname);
518 return -EINVAL;
519 }
520
521 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
522
523 while (len >= t_len) {
524 base = dt_mem_next_cell(dt_root_addr_cells, &prop);
525 size = dt_mem_next_cell(dt_root_size_cells, &prop);
526
527 if (size &&
528 early_init_dt_reserve_memory(base, size, nomap) == 0) {
529 pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
530 uname, &base, (unsigned long)(size / SZ_1M));
531 if (!nomap)
532 kmemleak_alloc_phys(base, size, 0);
533 }
534 else
535 pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
536 uname, &base, (unsigned long)(size / SZ_1M));
537
538 len -= t_len;
539 if (first) {
540 fdt_reserved_mem_save_node(node, uname, base, size);
541 first = 0;
542 }
543 }
544 return 0;
545 }
546
547 /*
548 * __reserved_mem_check_root() - check if #size-cells, #address-cells provided
549 * in /reserved-memory matches the values supported by the current implementation,
550 * also check if ranges property has been provided
551 */
__reserved_mem_check_root(unsigned long node)552 static int __init __reserved_mem_check_root(unsigned long node)
553 {
554 const __be32 *prop;
555
556 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
557 if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
558 return -EINVAL;
559
560 prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
561 if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
562 return -EINVAL;
563
564 prop = of_get_flat_dt_prop(node, "ranges", NULL);
565 if (!prop)
566 return -EINVAL;
567 return 0;
568 }
569
570 /*
571 * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
572 */
fdt_scan_reserved_mem(void)573 static int __init fdt_scan_reserved_mem(void)
574 {
575 int node, child;
576 const void *fdt = initial_boot_params;
577
578 node = fdt_path_offset(fdt, "/reserved-memory");
579 if (node < 0)
580 return -ENODEV;
581
582 if (__reserved_mem_check_root(node) != 0) {
583 pr_err("Reserved memory: unsupported node format, ignoring\n");
584 return -EINVAL;
585 }
586
587 fdt_for_each_subnode(child, fdt, node) {
588 const char *uname;
589 int err;
590
591 if (!of_fdt_device_is_available(fdt, child))
592 continue;
593
594 uname = fdt_get_name(fdt, child, NULL);
595
596 err = __reserved_mem_reserve_reg(child, uname);
597 if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL))
598 fdt_reserved_mem_save_node(child, uname, 0, 0);
599 }
600 return 0;
601 }
602
603 /*
604 * fdt_reserve_elfcorehdr() - reserves memory for elf core header
605 *
606 * This function reserves the memory occupied by an elf core header
607 * described in the device tree. This region contains all the
608 * information about primary kernel's core image and is used by a dump
609 * capture kernel to access the system memory on primary kernel.
610 */
fdt_reserve_elfcorehdr(void)611 static void __init fdt_reserve_elfcorehdr(void)
612 {
613 if (!IS_ENABLED(CONFIG_CRASH_DUMP) || !elfcorehdr_size)
614 return;
615
616 if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
617 pr_warn("elfcorehdr is overlapped\n");
618 return;
619 }
620
621 memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
622
623 pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
624 elfcorehdr_size >> 10, elfcorehdr_addr);
625 }
626
627 /**
628 * early_init_fdt_scan_reserved_mem() - create reserved memory regions
629 *
630 * This function grabs memory from early allocator for device exclusive use
631 * defined in device tree structures. It should be called by arch specific code
632 * once the early allocator (i.e. memblock) has been fully activated.
633 */
early_init_fdt_scan_reserved_mem(void)634 void __init early_init_fdt_scan_reserved_mem(void)
635 {
636 int n;
637 u64 base, size;
638
639 if (!initial_boot_params)
640 return;
641
642 /* Process header /memreserve/ fields */
643 for (n = 0; ; n++) {
644 fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
645 if (!size)
646 break;
647 memblock_reserve(base, size);
648 }
649
650 fdt_scan_reserved_mem();
651 fdt_reserve_elfcorehdr();
652 fdt_init_reserved_mem();
653 }
654
655 /**
656 * early_init_fdt_reserve_self() - reserve the memory used by the FDT blob
657 */
early_init_fdt_reserve_self(void)658 void __init early_init_fdt_reserve_self(void)
659 {
660 if (!initial_boot_params)
661 return;
662
663 /* Reserve the dtb region */
664 memblock_reserve(__pa(initial_boot_params),
665 fdt_totalsize(initial_boot_params));
666 }
667
668 /**
669 * of_scan_flat_dt - scan flattened tree blob and call callback on each.
670 * @it: callback function
671 * @data: context data pointer
672 *
673 * This function is used to scan the flattened device-tree, it is
674 * used to extract the memory information at boot before we can
675 * unflatten the tree
676 */
of_scan_flat_dt(int (* it)(unsigned long node,const char * uname,int depth,void * data),void * data)677 int __init of_scan_flat_dt(int (*it)(unsigned long node,
678 const char *uname, int depth,
679 void *data),
680 void *data)
681 {
682 const void *blob = initial_boot_params;
683 const char *pathp;
684 int offset, rc = 0, depth = -1;
685
686 if (!blob)
687 return 0;
688
689 for (offset = fdt_next_node(blob, -1, &depth);
690 offset >= 0 && depth >= 0 && !rc;
691 offset = fdt_next_node(blob, offset, &depth)) {
692
693 pathp = fdt_get_name(blob, offset, NULL);
694 rc = it(offset, pathp, depth, data);
695 }
696 return rc;
697 }
698
699 /**
700 * of_scan_flat_dt_subnodes - scan sub-nodes of a node call callback on each.
701 * @parent: parent node
702 * @it: callback function
703 * @data: context data pointer
704 *
705 * This function is used to scan sub-nodes of a node.
706 */
of_scan_flat_dt_subnodes(unsigned long parent,int (* it)(unsigned long node,const char * uname,void * data),void * data)707 int __init of_scan_flat_dt_subnodes(unsigned long parent,
708 int (*it)(unsigned long node,
709 const char *uname,
710 void *data),
711 void *data)
712 {
713 const void *blob = initial_boot_params;
714 int node;
715
716 fdt_for_each_subnode(node, blob, parent) {
717 const char *pathp;
718 int rc;
719
720 pathp = fdt_get_name(blob, node, NULL);
721 rc = it(node, pathp, data);
722 if (rc)
723 return rc;
724 }
725 return 0;
726 }
727
728 /**
729 * of_get_flat_dt_subnode_by_name - get the subnode by given name
730 *
731 * @node: the parent node
732 * @uname: the name of subnode
733 * @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none
734 */
735
of_get_flat_dt_subnode_by_name(unsigned long node,const char * uname)736 int __init of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname)
737 {
738 return fdt_subnode_offset(initial_boot_params, node, uname);
739 }
740
741 /*
742 * of_get_flat_dt_root - find the root node in the flat blob
743 */
of_get_flat_dt_root(void)744 unsigned long __init of_get_flat_dt_root(void)
745 {
746 return 0;
747 }
748
749 /*
750 * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
751 *
752 * This function can be used within scan_flattened_dt callback to get
753 * access to properties
754 */
of_get_flat_dt_prop(unsigned long node,const char * name,int * size)755 const void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
756 int *size)
757 {
758 return fdt_getprop(initial_boot_params, node, name, size);
759 }
760
761 /**
762 * of_fdt_is_compatible - Return true if given node from the given blob has
763 * compat in its compatible list
764 * @blob: A device tree blob
765 * @node: node to test
766 * @compat: compatible string to compare with compatible list.
767 *
768 * Return: a non-zero value on match with smaller values returned for more
769 * specific compatible values.
770 */
of_fdt_is_compatible(const void * blob,unsigned long node,const char * compat)771 static int of_fdt_is_compatible(const void *blob,
772 unsigned long node, const char *compat)
773 {
774 const char *cp;
775 int cplen;
776 unsigned long l, score = 0;
777
778 cp = fdt_getprop(blob, node, "compatible", &cplen);
779 if (cp == NULL)
780 return 0;
781 while (cplen > 0) {
782 score++;
783 if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
784 return score;
785 l = strlen(cp) + 1;
786 cp += l;
787 cplen -= l;
788 }
789
790 return 0;
791 }
792
793 /**
794 * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
795 * @node: node to test
796 * @compat: compatible string to compare with compatible list.
797 */
of_flat_dt_is_compatible(unsigned long node,const char * compat)798 int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
799 {
800 return of_fdt_is_compatible(initial_boot_params, node, compat);
801 }
802
803 /*
804 * of_flat_dt_match - Return true if node matches a list of compatible values
805 */
of_flat_dt_match(unsigned long node,const char * const * compat)806 static int __init of_flat_dt_match(unsigned long node, const char *const *compat)
807 {
808 unsigned int tmp, score = 0;
809
810 if (!compat)
811 return 0;
812
813 while (*compat) {
814 tmp = of_fdt_is_compatible(initial_boot_params, node, *compat);
815 if (tmp && (score == 0 || (tmp < score)))
816 score = tmp;
817 compat++;
818 }
819
820 return score;
821 }
822
823 /*
824 * of_get_flat_dt_phandle - Given a node in the flat blob, return the phandle
825 */
of_get_flat_dt_phandle(unsigned long node)826 uint32_t __init of_get_flat_dt_phandle(unsigned long node)
827 {
828 return fdt_get_phandle(initial_boot_params, node);
829 }
830
of_flat_dt_get_machine_name(void)831 const char * __init of_flat_dt_get_machine_name(void)
832 {
833 const char *name;
834 unsigned long dt_root = of_get_flat_dt_root();
835
836 name = of_get_flat_dt_prop(dt_root, "model", NULL);
837 if (!name)
838 name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
839 return name;
840 }
841
842 /**
843 * of_flat_dt_match_machine - Iterate match tables to find matching machine.
844 *
845 * @default_match: A machine specific ptr to return in case of no match.
846 * @get_next_compat: callback function to return next compatible match table.
847 *
848 * Iterate through machine match tables to find the best match for the machine
849 * compatible string in the FDT.
850 */
of_flat_dt_match_machine(const void * default_match,const void * (* get_next_compat)(const char * const **))851 const void * __init of_flat_dt_match_machine(const void *default_match,
852 const void * (*get_next_compat)(const char * const**))
853 {
854 const void *data = NULL;
855 const void *best_data = default_match;
856 const char *const *compat;
857 unsigned long dt_root;
858 unsigned int best_score = ~1, score = 0;
859
860 dt_root = of_get_flat_dt_root();
861 while ((data = get_next_compat(&compat))) {
862 score = of_flat_dt_match(dt_root, compat);
863 if (score > 0 && score < best_score) {
864 best_data = data;
865 best_score = score;
866 }
867 }
868 if (!best_data) {
869 const char *prop;
870 int size;
871
872 pr_err("\n unrecognized device tree list:\n[ ");
873
874 prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
875 if (prop) {
876 while (size > 0) {
877 printk("'%s' ", prop);
878 size -= strlen(prop) + 1;
879 prop += strlen(prop) + 1;
880 }
881 }
882 printk("]\n\n");
883 return NULL;
884 }
885
886 pr_info("Machine model: %s\n", of_flat_dt_get_machine_name());
887
888 return best_data;
889 }
890
__early_init_dt_declare_initrd(unsigned long start,unsigned long end)891 static void __early_init_dt_declare_initrd(unsigned long start,
892 unsigned long end)
893 {
894 /* ARM64 would cause a BUG to occur here when CONFIG_DEBUG_VM is
895 * enabled since __va() is called too early. ARM64 does make use
896 * of phys_initrd_start/phys_initrd_size so we can skip this
897 * conversion.
898 */
899 if (!IS_ENABLED(CONFIG_ARM64)) {
900 initrd_start = (unsigned long)__va(start);
901 initrd_end = (unsigned long)__va(end);
902 initrd_below_start_ok = 1;
903 }
904 }
905
906 /**
907 * early_init_dt_check_for_initrd - Decode initrd location from flat tree
908 * @node: reference to node containing initrd location ('chosen')
909 */
early_init_dt_check_for_initrd(unsigned long node)910 static void __init early_init_dt_check_for_initrd(unsigned long node)
911 {
912 u64 start, end;
913 int len;
914 const __be32 *prop;
915
916 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
917 return;
918
919 pr_debug("Looking for initrd properties... ");
920
921 prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
922 if (!prop)
923 return;
924 start = of_read_number(prop, len/4);
925
926 prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
927 if (!prop)
928 return;
929 end = of_read_number(prop, len/4);
930 if (start > end)
931 return;
932
933 __early_init_dt_declare_initrd(start, end);
934 phys_initrd_start = start;
935 phys_initrd_size = end - start;
936
937 pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", start, end);
938 }
939
940 /**
941 * early_init_dt_check_for_elfcorehdr - Decode elfcorehdr location from flat
942 * tree
943 * @node: reference to node containing elfcorehdr location ('chosen')
944 */
early_init_dt_check_for_elfcorehdr(unsigned long node)945 static void __init early_init_dt_check_for_elfcorehdr(unsigned long node)
946 {
947 const __be32 *prop;
948 int len;
949
950 if (!IS_ENABLED(CONFIG_CRASH_DUMP))
951 return;
952
953 pr_debug("Looking for elfcorehdr property... ");
954
955 prop = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
956 if (!prop || (len < (dt_root_addr_cells + dt_root_size_cells)))
957 return;
958
959 elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &prop);
960 elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &prop);
961
962 pr_debug("elfcorehdr_start=0x%llx elfcorehdr_size=0x%llx\n",
963 elfcorehdr_addr, elfcorehdr_size);
964 }
965
966 static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND;
967
968 /*
969 * The main usage of linux,usable-memory-range is for crash dump kernel.
970 * Originally, the number of usable-memory regions is one. Now there may
971 * be two regions, low region and high region.
972 * To make compatibility with existing user-space and older kdump, the low
973 * region is always the last range of linux,usable-memory-range if exist.
974 */
975 #define MAX_USABLE_RANGES 2
976
977 /**
978 * early_init_dt_check_for_usable_mem_range - Decode usable memory range
979 * location from flat tree
980 */
early_init_dt_check_for_usable_mem_range(void)981 void __init early_init_dt_check_for_usable_mem_range(void)
982 {
983 struct memblock_region rgn[MAX_USABLE_RANGES] = {0};
984 const __be32 *prop, *endp;
985 int len, i;
986 unsigned long node = chosen_node_offset;
987
988 if ((long)node < 0)
989 return;
990
991 pr_debug("Looking for usable-memory-range property... ");
992
993 prop = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
994 if (!prop || (len % (dt_root_addr_cells + dt_root_size_cells)))
995 return;
996
997 endp = prop + (len / sizeof(__be32));
998 for (i = 0; i < MAX_USABLE_RANGES && prop < endp; i++) {
999 rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop);
1000 rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop);
1001
1002 pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n",
1003 i, &rgn[i].base, &rgn[i].size);
1004 }
1005
1006 memblock_cap_memory_range(rgn[0].base, rgn[0].size);
1007 for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++)
1008 memblock_add(rgn[i].base, rgn[i].size);
1009 }
1010
1011 #ifdef CONFIG_SERIAL_EARLYCON
1012
early_init_dt_scan_chosen_stdout(void)1013 int __init early_init_dt_scan_chosen_stdout(void)
1014 {
1015 int offset;
1016 const char *p, *q, *options = NULL;
1017 int l;
1018 const struct earlycon_id *match;
1019 const void *fdt = initial_boot_params;
1020 int ret;
1021
1022 offset = fdt_path_offset(fdt, "/chosen");
1023 if (offset < 0)
1024 offset = fdt_path_offset(fdt, "/chosen@0");
1025 if (offset < 0)
1026 return -ENOENT;
1027
1028 p = fdt_getprop(fdt, offset, "stdout-path", &l);
1029 if (!p)
1030 p = fdt_getprop(fdt, offset, "linux,stdout-path", &l);
1031 if (!p || !l)
1032 return -ENOENT;
1033
1034 q = strchrnul(p, ':');
1035 if (*q != '\0')
1036 options = q + 1;
1037 l = q - p;
1038
1039 /* Get the node specified by stdout-path */
1040 offset = fdt_path_offset_namelen(fdt, p, l);
1041 if (offset < 0) {
1042 pr_warn("earlycon: stdout-path %.*s not found\n", l, p);
1043 return 0;
1044 }
1045
1046 for (match = __earlycon_table; match < __earlycon_table_end; match++) {
1047 if (!match->compatible[0])
1048 continue;
1049
1050 if (fdt_node_check_compatible(fdt, offset, match->compatible))
1051 continue;
1052
1053 ret = of_setup_earlycon(match, offset, options);
1054 if (!ret || ret == -EALREADY)
1055 return 0;
1056 }
1057 return -ENODEV;
1058 }
1059 #endif
1060
1061 /*
1062 * early_init_dt_scan_root - fetch the top level address and size cells
1063 */
early_init_dt_scan_root(void)1064 int __init early_init_dt_scan_root(void)
1065 {
1066 const __be32 *prop;
1067 const void *fdt = initial_boot_params;
1068 int node = fdt_path_offset(fdt, "/");
1069
1070 if (node < 0)
1071 return -ENODEV;
1072
1073 dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
1074 dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
1075
1076 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
1077 if (prop)
1078 dt_root_size_cells = be32_to_cpup(prop);
1079 pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
1080
1081 prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
1082 if (prop)
1083 dt_root_addr_cells = be32_to_cpup(prop);
1084 pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1085
1086 return 0;
1087 }
1088
dt_mem_next_cell(int s,const __be32 ** cellp)1089 u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
1090 {
1091 const __be32 *p = *cellp;
1092
1093 *cellp = p + s;
1094 return of_read_number(p, s);
1095 }
1096
1097 /*
1098 * early_init_dt_scan_memory - Look for and parse memory nodes
1099 */
early_init_dt_scan_memory(void)1100 int __init early_init_dt_scan_memory(void)
1101 {
1102 int node;
1103 const void *fdt = initial_boot_params;
1104
1105 fdt_for_each_subnode(node, fdt, 0) {
1106 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1107 const __be32 *reg, *endp;
1108 int l;
1109 bool hotpluggable;
1110
1111 /* We are scanning "memory" nodes only */
1112 if (type == NULL || strcmp(type, "memory") != 0)
1113 continue;
1114
1115 if (!of_fdt_device_is_available(fdt, node))
1116 continue;
1117
1118 reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
1119 if (reg == NULL)
1120 reg = of_get_flat_dt_prop(node, "reg", &l);
1121 if (reg == NULL)
1122 continue;
1123
1124 endp = reg + (l / sizeof(__be32));
1125 hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
1126
1127 pr_debug("memory scan node %s, reg size %d,\n",
1128 fdt_get_name(fdt, node, NULL), l);
1129
1130 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1131 u64 base, size;
1132
1133 base = dt_mem_next_cell(dt_root_addr_cells, ®);
1134 size = dt_mem_next_cell(dt_root_size_cells, ®);
1135
1136 if (size == 0)
1137 continue;
1138 pr_debug(" - %llx, %llx\n", base, size);
1139
1140 early_init_dt_add_memory_arch(base, size);
1141
1142 if (!hotpluggable)
1143 continue;
1144
1145 if (memblock_mark_hotplug(base, size))
1146 pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
1147 base, base + size);
1148 }
1149 }
1150 return 0;
1151 }
1152
early_init_dt_scan_chosen(char * cmdline)1153 int __init early_init_dt_scan_chosen(char *cmdline)
1154 {
1155 int l, node;
1156 const char *p;
1157 const void *rng_seed;
1158 const void *fdt = initial_boot_params;
1159
1160 node = fdt_path_offset(fdt, "/chosen");
1161 if (node < 0)
1162 node = fdt_path_offset(fdt, "/chosen@0");
1163 if (node < 0)
1164 return -ENOENT;
1165
1166 chosen_node_offset = node;
1167
1168 early_init_dt_check_for_initrd(node);
1169 early_init_dt_check_for_elfcorehdr(node);
1170
1171 /* Retrieve command line */
1172 p = of_get_flat_dt_prop(node, "bootargs", &l);
1173 if (p != NULL && l > 0)
1174 strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
1175
1176 /*
1177 * CONFIG_CMDLINE is meant to be a default in case nothing else
1178 * managed to set the command line, unless CONFIG_CMDLINE_FORCE
1179 * is set in which case we override whatever was found earlier.
1180 */
1181 #ifdef CONFIG_CMDLINE
1182 #if defined(CONFIG_CMDLINE_EXTEND)
1183 strlcat(cmdline, " ", COMMAND_LINE_SIZE);
1184 strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1185 #elif defined(CONFIG_CMDLINE_FORCE)
1186 strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1187 #else
1188 /* No arguments from boot loader, use kernel's cmdl*/
1189 if (!((char *)cmdline)[0])
1190 strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1191 #endif
1192 #endif /* CONFIG_CMDLINE */
1193
1194 pr_debug("Command line is: %s\n", (char *)cmdline);
1195
1196 rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
1197 if (rng_seed && l > 0) {
1198 add_bootloader_randomness(rng_seed, l);
1199
1200 /* try to clear seed so it won't be found. */
1201 fdt_nop_property(initial_boot_params, node, "rng-seed");
1202
1203 /* update CRC check value */
1204 of_fdt_crc32 = crc32_be(~0, initial_boot_params,
1205 fdt_totalsize(initial_boot_params));
1206 }
1207
1208 return 0;
1209 }
1210
1211 #ifndef MIN_MEMBLOCK_ADDR
1212 #define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET)
1213 #endif
1214 #ifndef MAX_MEMBLOCK_ADDR
1215 #define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0)
1216 #endif
1217
early_init_dt_add_memory_arch(u64 base,u64 size)1218 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
1219 {
1220 const u64 phys_offset = MIN_MEMBLOCK_ADDR;
1221
1222 if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
1223 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1224 base, base + size);
1225 return;
1226 }
1227
1228 if (!PAGE_ALIGNED(base)) {
1229 size -= PAGE_SIZE - (base & ~PAGE_MASK);
1230 base = PAGE_ALIGN(base);
1231 }
1232 size &= PAGE_MASK;
1233
1234 if (base > MAX_MEMBLOCK_ADDR) {
1235 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1236 base, base + size);
1237 return;
1238 }
1239
1240 if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
1241 pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
1242 ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
1243 size = MAX_MEMBLOCK_ADDR - base + 1;
1244 }
1245
1246 if (base + size < phys_offset) {
1247 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1248 base, base + size);
1249 return;
1250 }
1251 if (base < phys_offset) {
1252 pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
1253 base, phys_offset);
1254 size -= phys_offset - base;
1255 base = phys_offset;
1256 }
1257 memblock_add(base, size);
1258 }
1259
early_init_dt_alloc_memory_arch(u64 size,u64 align)1260 static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
1261 {
1262 void *ptr = memblock_alloc(size, align);
1263
1264 if (!ptr)
1265 panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
1266 __func__, size, align);
1267
1268 return ptr;
1269 }
1270
early_init_dt_verify(void * params)1271 bool __init early_init_dt_verify(void *params)
1272 {
1273 if (!params)
1274 return false;
1275
1276 /* check device tree validity */
1277 if (fdt_check_header(params))
1278 return false;
1279
1280 /* Setup flat device-tree pointer */
1281 initial_boot_params = params;
1282 of_fdt_crc32 = crc32_be(~0, initial_boot_params,
1283 fdt_totalsize(initial_boot_params));
1284 return true;
1285 }
1286
1287
early_init_dt_scan_nodes(void)1288 void __init early_init_dt_scan_nodes(void)
1289 {
1290 int rc;
1291
1292 /* Initialize {size,address}-cells info */
1293 early_init_dt_scan_root();
1294
1295 /* Retrieve various information from the /chosen node */
1296 rc = early_init_dt_scan_chosen(boot_command_line);
1297 if (rc)
1298 pr_warn("No chosen node found, continuing without\n");
1299
1300 /* Setup memory, calling early_init_dt_add_memory_arch */
1301 early_init_dt_scan_memory();
1302
1303 /* Handle linux,usable-memory-range property */
1304 early_init_dt_check_for_usable_mem_range();
1305 }
1306
early_init_dt_scan(void * params)1307 bool __init early_init_dt_scan(void *params)
1308 {
1309 bool status;
1310
1311 status = early_init_dt_verify(params);
1312 if (!status)
1313 return false;
1314
1315 early_init_dt_scan_nodes();
1316 return true;
1317 }
1318
1319 /**
1320 * unflatten_device_tree - create tree of device_nodes from flat blob
1321 *
1322 * unflattens the device-tree passed by the firmware, creating the
1323 * tree of struct device_node. It also fills the "name" and "type"
1324 * pointers of the nodes so the normal device-tree walking functions
1325 * can be used.
1326 */
unflatten_device_tree(void)1327 void __init unflatten_device_tree(void)
1328 {
1329 __unflatten_device_tree(initial_boot_params, NULL, &of_root,
1330 early_init_dt_alloc_memory_arch, false);
1331
1332 /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
1333 of_alias_scan(early_init_dt_alloc_memory_arch);
1334
1335 unittest_unflatten_overlay_base();
1336 }
1337
1338 /**
1339 * unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob
1340 *
1341 * Copies and unflattens the device-tree passed by the firmware, creating the
1342 * tree of struct device_node. It also fills the "name" and "type"
1343 * pointers of the nodes so the normal device-tree walking functions
1344 * can be used. This should only be used when the FDT memory has not been
1345 * reserved such is the case when the FDT is built-in to the kernel init
1346 * section. If the FDT memory is reserved already then unflatten_device_tree
1347 * should be used instead.
1348 */
unflatten_and_copy_device_tree(void)1349 void __init unflatten_and_copy_device_tree(void)
1350 {
1351 int size;
1352 void *dt;
1353
1354 if (!initial_boot_params) {
1355 pr_warn("No valid device tree found, continuing without\n");
1356 return;
1357 }
1358
1359 size = fdt_totalsize(initial_boot_params);
1360 dt = early_init_dt_alloc_memory_arch(size,
1361 roundup_pow_of_two(FDT_V17_SIZE));
1362
1363 if (dt) {
1364 memcpy(dt, initial_boot_params, size);
1365 initial_boot_params = dt;
1366 }
1367 unflatten_device_tree();
1368 }
1369
1370 #ifdef CONFIG_SYSFS
of_fdt_raw_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)1371 static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj,
1372 struct bin_attribute *bin_attr,
1373 char *buf, loff_t off, size_t count)
1374 {
1375 memcpy(buf, initial_boot_params + off, count);
1376 return count;
1377 }
1378
of_fdt_raw_init(void)1379 static int __init of_fdt_raw_init(void)
1380 {
1381 static struct bin_attribute of_fdt_raw_attr =
1382 __BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0);
1383
1384 if (!initial_boot_params)
1385 return 0;
1386
1387 if (of_fdt_crc32 != crc32_be(~0, initial_boot_params,
1388 fdt_totalsize(initial_boot_params))) {
1389 pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
1390 return 0;
1391 }
1392 of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
1393 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
1394 }
1395 late_initcall(of_fdt_raw_init);
1396 #endif
1397
1398 #endif /* CONFIG_OF_EARLY_FLATTREE */
1399