1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions for working with the Flattened Device Tree data format
4 *
5 * Copyright 2009 Benjamin Herrenschmidt, IBM Corp
6 * benh@kernel.crashing.org
7 */
8
9 #define pr_fmt(fmt) "OF: fdt: " fmt
10
11 #include <linux/acpi.h>
12 #include <linux/crash_dump.h>
13 #include <linux/crc32.h>
14 #include <linux/kernel.h>
15 #include <linux/initrd.h>
16 #include <linux/memblock.h>
17 #include <linux/mutex.h>
18 #include <linux/of.h>
19 #include <linux/of_fdt.h>
20 #include <linux/sizes.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/slab.h>
24 #include <linux/libfdt.h>
25 #include <linux/debugfs.h>
26 #include <linux/serial_core.h>
27 #include <linux/sysfs.h>
28 #include <linux/random.h>
29
30 #include <asm/setup.h> /* for COMMAND_LINE_SIZE */
31 #include <asm/page.h>
32
33 #include "of_private.h"
34
35 /*
36 * __dtb_empty_root_begin[] and __dtb_empty_root_end[] magically created by
37 * cmd_wrap_S_dtb in scripts/Makefile.dtbs
38 */
39 extern uint8_t __dtb_empty_root_begin[];
40 extern uint8_t __dtb_empty_root_end[];
41
42 /*
43 * of_fdt_limit_memory - limit the number of regions in the /memory node
44 * @limit: maximum entries
45 *
46 * Adjust the flattened device tree to have at most 'limit' number of
47 * memory entries in the /memory node. This function may be called
48 * any time after initial_boot_param is set.
49 */
of_fdt_limit_memory(int limit)50 void __init of_fdt_limit_memory(int limit)
51 {
52 int memory;
53 int len;
54 const void *val;
55 int cell_size = sizeof(uint32_t)*(dt_root_addr_cells + dt_root_size_cells);
56
57 memory = fdt_path_offset(initial_boot_params, "/memory");
58 if (memory > 0) {
59 val = fdt_getprop(initial_boot_params, memory, "reg", &len);
60 if (len > limit*cell_size) {
61 len = limit*cell_size;
62 pr_debug("Limiting number of entries to %d\n", limit);
63 fdt_setprop(initial_boot_params, memory, "reg", val,
64 len);
65 }
66 }
67 }
68
of_fdt_device_is_available(const void * blob,unsigned long node)69 bool of_fdt_device_is_available(const void *blob, unsigned long node)
70 {
71 const char *status = fdt_getprop(blob, node, "status", NULL);
72
73 if (!status)
74 return true;
75
76 if (!strcmp(status, "ok") || !strcmp(status, "okay"))
77 return true;
78
79 return false;
80 }
81
unflatten_dt_alloc(void ** mem,unsigned long size,unsigned long align)82 static void *unflatten_dt_alloc(void **mem, unsigned long size,
83 unsigned long align)
84 {
85 void *res;
86
87 *mem = PTR_ALIGN(*mem, align);
88 res = *mem;
89 *mem += size;
90
91 return res;
92 }
93
populate_properties(const void * blob,int offset,void ** mem,struct device_node * np,const char * nodename,bool dryrun)94 static void populate_properties(const void *blob,
95 int offset,
96 void **mem,
97 struct device_node *np,
98 const char *nodename,
99 bool dryrun)
100 {
101 struct property *pp, **pprev = NULL;
102 int cur;
103 bool has_name = false;
104
105 pprev = &np->properties;
106 for (cur = fdt_first_property_offset(blob, offset);
107 cur >= 0;
108 cur = fdt_next_property_offset(blob, cur)) {
109 const __be32 *val;
110 const char *pname;
111 u32 sz;
112
113 val = fdt_getprop_by_offset(blob, cur, &pname, &sz);
114 if (!val) {
115 pr_warn("Cannot locate property at 0x%x\n", cur);
116 continue;
117 }
118
119 if (!pname) {
120 pr_warn("Cannot find property name at 0x%x\n", cur);
121 continue;
122 }
123
124 if (!strcmp(pname, "name"))
125 has_name = true;
126
127 pp = unflatten_dt_alloc(mem, sizeof(struct property),
128 __alignof__(struct property));
129 if (dryrun)
130 continue;
131
132 /* We accept flattened tree phandles either in
133 * ePAPR-style "phandle" properties, or the
134 * legacy "linux,phandle" properties. If both
135 * appear and have different values, things
136 * will get weird. Don't do that.
137 */
138 if (!strcmp(pname, "phandle") ||
139 !strcmp(pname, "linux,phandle")) {
140 if (!np->phandle)
141 np->phandle = be32_to_cpup(val);
142 }
143
144 /* And we process the "ibm,phandle" property
145 * used in pSeries dynamic device tree
146 * stuff
147 */
148 if (!strcmp(pname, "ibm,phandle"))
149 np->phandle = be32_to_cpup(val);
150
151 pp->name = (char *)pname;
152 pp->length = sz;
153 pp->value = (__be32 *)val;
154 *pprev = pp;
155 pprev = &pp->next;
156 }
157
158 /* With version 0x10 we may not have the name property,
159 * recreate it here from the unit name if absent
160 */
161 if (!has_name) {
162 const char *p = nodename, *ps = p, *pa = NULL;
163 int len;
164
165 while (*p) {
166 if ((*p) == '@')
167 pa = p;
168 else if ((*p) == '/')
169 ps = p + 1;
170 p++;
171 }
172
173 if (pa < ps)
174 pa = p;
175 len = (pa - ps) + 1;
176 pp = unflatten_dt_alloc(mem, sizeof(struct property) + len,
177 __alignof__(struct property));
178 if (!dryrun) {
179 pp->name = "name";
180 pp->length = len;
181 pp->value = pp + 1;
182 *pprev = pp;
183 memcpy(pp->value, ps, len - 1);
184 ((char *)pp->value)[len - 1] = 0;
185 pr_debug("fixed up name for %s -> %s\n",
186 nodename, (char *)pp->value);
187 }
188 }
189 }
190
populate_node(const void * blob,int offset,void ** mem,struct device_node * dad,struct device_node ** pnp,bool dryrun)191 static int populate_node(const void *blob,
192 int offset,
193 void **mem,
194 struct device_node *dad,
195 struct device_node **pnp,
196 bool dryrun)
197 {
198 struct device_node *np;
199 const char *pathp;
200 int len;
201
202 pathp = fdt_get_name(blob, offset, &len);
203 if (!pathp) {
204 *pnp = NULL;
205 return len;
206 }
207
208 len++;
209
210 np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len,
211 __alignof__(struct device_node));
212 if (!dryrun) {
213 char *fn;
214 of_node_init(np);
215 np->full_name = fn = ((char *)np) + sizeof(*np);
216
217 memcpy(fn, pathp, len);
218
219 if (dad != NULL) {
220 np->parent = dad;
221 np->sibling = dad->child;
222 dad->child = np;
223 }
224 }
225
226 populate_properties(blob, offset, mem, np, pathp, dryrun);
227 if (!dryrun) {
228 np->name = of_get_property(np, "name", NULL);
229 if (!np->name)
230 np->name = "<NULL>";
231 }
232
233 *pnp = np;
234 return 0;
235 }
236
reverse_nodes(struct device_node * parent)237 static void reverse_nodes(struct device_node *parent)
238 {
239 struct device_node *child, *next;
240
241 /* In-depth first */
242 child = parent->child;
243 while (child) {
244 reverse_nodes(child);
245
246 child = child->sibling;
247 }
248
249 /* Reverse the nodes in the child list */
250 child = parent->child;
251 parent->child = NULL;
252 while (child) {
253 next = child->sibling;
254
255 child->sibling = parent->child;
256 parent->child = child;
257 child = next;
258 }
259 }
260
261 /**
262 * unflatten_dt_nodes - Alloc and populate a device_node from the flat tree
263 * @blob: The parent device tree blob
264 * @mem: Memory chunk to use for allocating device nodes and properties
265 * @dad: Parent struct device_node
266 * @nodepp: The device_node tree created by the call
267 *
268 * Return: The size of unflattened device tree or error code
269 */
unflatten_dt_nodes(const void * blob,void * mem,struct device_node * dad,struct device_node ** nodepp)270 static int unflatten_dt_nodes(const void *blob,
271 void *mem,
272 struct device_node *dad,
273 struct device_node **nodepp)
274 {
275 struct device_node *root;
276 int offset = 0, depth = 0, initial_depth = 0;
277 #define FDT_MAX_DEPTH 64
278 struct device_node *nps[FDT_MAX_DEPTH];
279 void *base = mem;
280 bool dryrun = !base;
281 int ret;
282
283 if (nodepp)
284 *nodepp = NULL;
285
286 /*
287 * We're unflattening device sub-tree if @dad is valid. There are
288 * possibly multiple nodes in the first level of depth. We need
289 * set @depth to 1 to make fdt_next_node() happy as it bails
290 * immediately when negative @depth is found. Otherwise, the device
291 * nodes except the first one won't be unflattened successfully.
292 */
293 if (dad)
294 depth = initial_depth = 1;
295
296 root = dad;
297 nps[depth] = dad;
298
299 for (offset = 0;
300 offset >= 0 && depth >= initial_depth;
301 offset = fdt_next_node(blob, offset, &depth)) {
302 if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
303 continue;
304
305 if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
306 !of_fdt_device_is_available(blob, offset))
307 continue;
308
309 ret = populate_node(blob, offset, &mem, nps[depth],
310 &nps[depth+1], dryrun);
311 if (ret < 0)
312 return ret;
313
314 if (!dryrun && nodepp && !*nodepp)
315 *nodepp = nps[depth+1];
316 if (!dryrun && !root)
317 root = nps[depth+1];
318 }
319
320 if (offset < 0 && offset != -FDT_ERR_NOTFOUND) {
321 pr_err("Error %d processing FDT\n", offset);
322 return -EINVAL;
323 }
324
325 /*
326 * Reverse the child list. Some drivers assumes node order matches .dts
327 * node order
328 */
329 if (!dryrun)
330 reverse_nodes(root);
331
332 return mem - base;
333 }
334
335 /**
336 * __unflatten_device_tree - create tree of device_nodes from flat blob
337 * @blob: The blob to expand
338 * @dad: Parent device node
339 * @mynodes: The device_node tree created by the call
340 * @dt_alloc: An allocator that provides a virtual address to memory
341 * for the resulting tree
342 * @detached: if true set OF_DETACHED on @mynodes
343 *
344 * unflattens a device-tree, creating the tree of struct device_node. It also
345 * fills the "name" and "type" pointers of the nodes so the normal device-tree
346 * walking functions can be used.
347 *
348 * Return: NULL on failure or the memory chunk containing the unflattened
349 * device tree on success.
350 */
__unflatten_device_tree(const void * blob,struct device_node * dad,struct device_node ** mynodes,void * (* dt_alloc)(u64 size,u64 align),bool detached)351 void *__unflatten_device_tree(const void *blob,
352 struct device_node *dad,
353 struct device_node **mynodes,
354 void *(*dt_alloc)(u64 size, u64 align),
355 bool detached)
356 {
357 int size;
358 void *mem;
359 int ret;
360
361 if (mynodes)
362 *mynodes = NULL;
363
364 pr_debug(" -> unflatten_device_tree()\n");
365
366 if (!blob) {
367 pr_debug("No device tree pointer\n");
368 return NULL;
369 }
370
371 pr_debug("Unflattening device tree:\n");
372 pr_debug("magic: %08x\n", fdt_magic(blob));
373 pr_debug("size: %08x\n", fdt_totalsize(blob));
374 pr_debug("version: %08x\n", fdt_version(blob));
375
376 if (fdt_check_header(blob)) {
377 pr_err("Invalid device tree blob header\n");
378 return NULL;
379 }
380
381 /* First pass, scan for size */
382 size = unflatten_dt_nodes(blob, NULL, dad, NULL);
383 if (size <= 0)
384 return NULL;
385
386 size = ALIGN(size, 4);
387 pr_debug(" size is %d, allocating...\n", size);
388
389 /* Allocate memory for the expanded device tree */
390 mem = dt_alloc(size + 4, __alignof__(struct device_node));
391 if (!mem)
392 return NULL;
393
394 memset(mem, 0, size);
395
396 *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
397
398 pr_debug(" unflattening %p...\n", mem);
399
400 /* Second pass, do actual unflattening */
401 ret = unflatten_dt_nodes(blob, mem, dad, mynodes);
402
403 if (be32_to_cpup(mem + size) != 0xdeadbeef)
404 pr_warn("End of tree marker overwritten: %08x\n",
405 be32_to_cpup(mem + size));
406
407 if (ret <= 0)
408 return NULL;
409
410 if (detached && mynodes && *mynodes) {
411 of_node_set_flag(*mynodes, OF_DETACHED);
412 pr_debug("unflattened tree is detached\n");
413 }
414
415 pr_debug(" <- unflatten_device_tree()\n");
416 return mem;
417 }
418
kernel_tree_alloc(u64 size,u64 align)419 static void *kernel_tree_alloc(u64 size, u64 align)
420 {
421 return kzalloc(size, GFP_KERNEL);
422 }
423
424 static DEFINE_MUTEX(of_fdt_unflatten_mutex);
425
426 /**
427 * of_fdt_unflatten_tree - create tree of device_nodes from flat blob
428 * @blob: Flat device tree blob
429 * @dad: Parent device node
430 * @mynodes: The device tree created by the call
431 *
432 * unflattens the device-tree passed by the firmware, creating the
433 * tree of struct device_node. It also fills the "name" and "type"
434 * pointers of the nodes so the normal device-tree walking functions
435 * can be used.
436 *
437 * Return: NULL on failure or the memory chunk containing the unflattened
438 * device tree on success.
439 */
of_fdt_unflatten_tree(const unsigned long * blob,struct device_node * dad,struct device_node ** mynodes)440 void *of_fdt_unflatten_tree(const unsigned long *blob,
441 struct device_node *dad,
442 struct device_node **mynodes)
443 {
444 void *mem;
445
446 mutex_lock(&of_fdt_unflatten_mutex);
447 mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc,
448 true);
449 mutex_unlock(&of_fdt_unflatten_mutex);
450
451 return mem;
452 }
453 EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
454
455 /* Everything below here references initial_boot_params directly. */
456 int __initdata dt_root_addr_cells;
457 int __initdata dt_root_size_cells;
458
459 void *initial_boot_params __ro_after_init;
460
461 #ifdef CONFIG_OF_EARLY_FLATTREE
462
463 static u32 of_fdt_crc32;
464
465 /*
466 * fdt_reserve_elfcorehdr() - reserves memory for elf core header
467 *
468 * This function reserves the memory occupied by an elf core header
469 * described in the device tree. This region contains all the
470 * information about primary kernel's core image and is used by a dump
471 * capture kernel to access the system memory on primary kernel.
472 */
fdt_reserve_elfcorehdr(void)473 static void __init fdt_reserve_elfcorehdr(void)
474 {
475 if (!IS_ENABLED(CONFIG_CRASH_DUMP) || !elfcorehdr_size)
476 return;
477
478 if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
479 pr_warn("elfcorehdr is overlapped\n");
480 return;
481 }
482
483 memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
484
485 pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
486 elfcorehdr_size >> 10, elfcorehdr_addr);
487 }
488
489 /**
490 * early_init_fdt_scan_reserved_mem() - create reserved memory regions
491 *
492 * This function grabs memory from early allocator for device exclusive use
493 * defined in device tree structures. It should be called by arch specific code
494 * once the early allocator (i.e. memblock) has been fully activated.
495 */
early_init_fdt_scan_reserved_mem(void)496 void __init early_init_fdt_scan_reserved_mem(void)
497 {
498 int n;
499 u64 base, size;
500
501 if (!initial_boot_params)
502 return;
503
504 fdt_scan_reserved_mem();
505 fdt_reserve_elfcorehdr();
506
507 /* Process header /memreserve/ fields */
508 for (n = 0; ; n++) {
509 fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
510 if (!size)
511 break;
512 memblock_reserve(base, size);
513 }
514
515 fdt_init_reserved_mem();
516 }
517
518 /**
519 * early_init_fdt_reserve_self() - reserve the memory used by the FDT blob
520 */
early_init_fdt_reserve_self(void)521 void __init early_init_fdt_reserve_self(void)
522 {
523 if (!initial_boot_params)
524 return;
525
526 /* Reserve the dtb region */
527 memblock_reserve(__pa(initial_boot_params),
528 fdt_totalsize(initial_boot_params));
529 }
530
531 /**
532 * of_scan_flat_dt - scan flattened tree blob and call callback on each.
533 * @it: callback function
534 * @data: context data pointer
535 *
536 * This function is used to scan the flattened device-tree, it is
537 * used to extract the memory information at boot before we can
538 * unflatten the tree
539 */
of_scan_flat_dt(int (* it)(unsigned long node,const char * uname,int depth,void * data),void * data)540 int __init of_scan_flat_dt(int (*it)(unsigned long node,
541 const char *uname, int depth,
542 void *data),
543 void *data)
544 {
545 const void *blob = initial_boot_params;
546 const char *pathp;
547 int offset, rc = 0, depth = -1;
548
549 if (!blob)
550 return 0;
551
552 for (offset = fdt_next_node(blob, -1, &depth);
553 offset >= 0 && depth >= 0 && !rc;
554 offset = fdt_next_node(blob, offset, &depth)) {
555
556 pathp = fdt_get_name(blob, offset, NULL);
557 rc = it(offset, pathp, depth, data);
558 }
559 return rc;
560 }
561
562 /**
563 * of_scan_flat_dt_subnodes - scan sub-nodes of a node call callback on each.
564 * @parent: parent node
565 * @it: callback function
566 * @data: context data pointer
567 *
568 * This function is used to scan sub-nodes of a node.
569 */
of_scan_flat_dt_subnodes(unsigned long parent,int (* it)(unsigned long node,const char * uname,void * data),void * data)570 int __init of_scan_flat_dt_subnodes(unsigned long parent,
571 int (*it)(unsigned long node,
572 const char *uname,
573 void *data),
574 void *data)
575 {
576 const void *blob = initial_boot_params;
577 int node;
578
579 fdt_for_each_subnode(node, blob, parent) {
580 const char *pathp;
581 int rc;
582
583 pathp = fdt_get_name(blob, node, NULL);
584 rc = it(node, pathp, data);
585 if (rc)
586 return rc;
587 }
588 return 0;
589 }
590
591 /**
592 * of_get_flat_dt_subnode_by_name - get the subnode by given name
593 *
594 * @node: the parent node
595 * @uname: the name of subnode
596 * @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none
597 */
598
of_get_flat_dt_subnode_by_name(unsigned long node,const char * uname)599 int __init of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname)
600 {
601 return fdt_subnode_offset(initial_boot_params, node, uname);
602 }
603
604 /*
605 * of_get_flat_dt_root - find the root node in the flat blob
606 */
of_get_flat_dt_root(void)607 unsigned long __init of_get_flat_dt_root(void)
608 {
609 return 0;
610 }
611
612 /*
613 * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
614 *
615 * This function can be used within scan_flattened_dt callback to get
616 * access to properties
617 */
of_get_flat_dt_prop(unsigned long node,const char * name,int * size)618 const void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
619 int *size)
620 {
621 return fdt_getprop(initial_boot_params, node, name, size);
622 }
623
624 /**
625 * of_fdt_is_compatible - Return true if given node from the given blob has
626 * compat in its compatible list
627 * @blob: A device tree blob
628 * @node: node to test
629 * @compat: compatible string to compare with compatible list.
630 *
631 * Return: a non-zero value on match with smaller values returned for more
632 * specific compatible values.
633 */
of_fdt_is_compatible(const void * blob,unsigned long node,const char * compat)634 static int of_fdt_is_compatible(const void *blob,
635 unsigned long node, const char *compat)
636 {
637 const char *cp;
638 int cplen;
639 unsigned long l, score = 0;
640
641 cp = fdt_getprop(blob, node, "compatible", &cplen);
642 if (cp == NULL)
643 return 0;
644 while (cplen > 0) {
645 score++;
646 if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
647 return score;
648 l = strlen(cp) + 1;
649 cp += l;
650 cplen -= l;
651 }
652
653 return 0;
654 }
655
656 /**
657 * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
658 * @node: node to test
659 * @compat: compatible string to compare with compatible list.
660 */
of_flat_dt_is_compatible(unsigned long node,const char * compat)661 int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
662 {
663 return of_fdt_is_compatible(initial_boot_params, node, compat);
664 }
665
666 /*
667 * of_flat_dt_match - Return true if node matches a list of compatible values
668 */
of_flat_dt_match(unsigned long node,const char * const * compat)669 static int __init of_flat_dt_match(unsigned long node, const char *const *compat)
670 {
671 unsigned int tmp, score = 0;
672
673 if (!compat)
674 return 0;
675
676 while (*compat) {
677 tmp = of_fdt_is_compatible(initial_boot_params, node, *compat);
678 if (tmp && (score == 0 || (tmp < score)))
679 score = tmp;
680 compat++;
681 }
682
683 return score;
684 }
685
686 /*
687 * of_get_flat_dt_phandle - Given a node in the flat blob, return the phandle
688 */
of_get_flat_dt_phandle(unsigned long node)689 uint32_t __init of_get_flat_dt_phandle(unsigned long node)
690 {
691 return fdt_get_phandle(initial_boot_params, node);
692 }
693
of_flat_dt_get_machine_name(void)694 const char * __init of_flat_dt_get_machine_name(void)
695 {
696 const char *name;
697 unsigned long dt_root = of_get_flat_dt_root();
698
699 name = of_get_flat_dt_prop(dt_root, "model", NULL);
700 if (!name)
701 name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
702 return name;
703 }
704
705 /**
706 * of_flat_dt_match_machine - Iterate match tables to find matching machine.
707 *
708 * @default_match: A machine specific ptr to return in case of no match.
709 * @get_next_compat: callback function to return next compatible match table.
710 *
711 * Iterate through machine match tables to find the best match for the machine
712 * compatible string in the FDT.
713 */
of_flat_dt_match_machine(const void * default_match,const void * (* get_next_compat)(const char * const **))714 const void * __init of_flat_dt_match_machine(const void *default_match,
715 const void * (*get_next_compat)(const char * const**))
716 {
717 const void *data = NULL;
718 const void *best_data = default_match;
719 const char *const *compat;
720 unsigned long dt_root;
721 unsigned int best_score = ~1, score = 0;
722
723 dt_root = of_get_flat_dt_root();
724 while ((data = get_next_compat(&compat))) {
725 score = of_flat_dt_match(dt_root, compat);
726 if (score > 0 && score < best_score) {
727 best_data = data;
728 best_score = score;
729 }
730 }
731 if (!best_data) {
732 const char *prop;
733 int size;
734
735 pr_err("\n unrecognized device tree list:\n[ ");
736
737 prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
738 if (prop) {
739 while (size > 0) {
740 printk("'%s' ", prop);
741 size -= strlen(prop) + 1;
742 prop += strlen(prop) + 1;
743 }
744 }
745 printk("]\n\n");
746 return NULL;
747 }
748
749 pr_info("Machine model: %s\n", of_flat_dt_get_machine_name());
750
751 return best_data;
752 }
753
__early_init_dt_declare_initrd(unsigned long start,unsigned long end)754 static void __early_init_dt_declare_initrd(unsigned long start,
755 unsigned long end)
756 {
757 /*
758 * __va() is not yet available this early on some platforms. In that
759 * case, the platform uses phys_initrd_start/phys_initrd_size instead
760 * and does the VA conversion itself.
761 */
762 if (!IS_ENABLED(CONFIG_ARM64) &&
763 !(IS_ENABLED(CONFIG_RISCV) && IS_ENABLED(CONFIG_64BIT))) {
764 initrd_start = (unsigned long)__va(start);
765 initrd_end = (unsigned long)__va(end);
766 initrd_below_start_ok = 1;
767 }
768 }
769
770 /**
771 * early_init_dt_check_for_initrd - Decode initrd location from flat tree
772 * @node: reference to node containing initrd location ('chosen')
773 */
early_init_dt_check_for_initrd(unsigned long node)774 static void __init early_init_dt_check_for_initrd(unsigned long node)
775 {
776 u64 start, end;
777 int len;
778 const __be32 *prop;
779
780 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
781 return;
782
783 pr_debug("Looking for initrd properties... ");
784
785 prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
786 if (!prop)
787 return;
788 start = of_read_number(prop, len/4);
789
790 prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
791 if (!prop)
792 return;
793 end = of_read_number(prop, len/4);
794 if (start > end)
795 return;
796
797 __early_init_dt_declare_initrd(start, end);
798 phys_initrd_start = start;
799 phys_initrd_size = end - start;
800
801 pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", start, end);
802 }
803
804 /**
805 * early_init_dt_check_for_elfcorehdr - Decode elfcorehdr location from flat
806 * tree
807 * @node: reference to node containing elfcorehdr location ('chosen')
808 */
early_init_dt_check_for_elfcorehdr(unsigned long node)809 static void __init early_init_dt_check_for_elfcorehdr(unsigned long node)
810 {
811 const __be32 *prop;
812 int len;
813
814 if (!IS_ENABLED(CONFIG_CRASH_DUMP))
815 return;
816
817 pr_debug("Looking for elfcorehdr property... ");
818
819 prop = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
820 if (!prop || (len < (dt_root_addr_cells + dt_root_size_cells)))
821 return;
822
823 elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &prop);
824 elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &prop);
825
826 pr_debug("elfcorehdr_start=0x%llx elfcorehdr_size=0x%llx\n",
827 elfcorehdr_addr, elfcorehdr_size);
828 }
829
830 static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND;
831
832 /*
833 * The main usage of linux,usable-memory-range is for crash dump kernel.
834 * Originally, the number of usable-memory regions is one. Now there may
835 * be two regions, low region and high region.
836 * To make compatibility with existing user-space and older kdump, the low
837 * region is always the last range of linux,usable-memory-range if exist.
838 */
839 #define MAX_USABLE_RANGES 2
840
841 /**
842 * early_init_dt_check_for_usable_mem_range - Decode usable memory range
843 * location from flat tree
844 */
early_init_dt_check_for_usable_mem_range(void)845 void __init early_init_dt_check_for_usable_mem_range(void)
846 {
847 struct memblock_region rgn[MAX_USABLE_RANGES] = {0};
848 const __be32 *prop, *endp;
849 int len, i;
850 unsigned long node = chosen_node_offset;
851
852 if ((long)node < 0)
853 return;
854
855 pr_debug("Looking for usable-memory-range property... ");
856
857 prop = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
858 if (!prop || (len % (dt_root_addr_cells + dt_root_size_cells)))
859 return;
860
861 endp = prop + (len / sizeof(__be32));
862 for (i = 0; i < MAX_USABLE_RANGES && prop < endp; i++) {
863 rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop);
864 rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop);
865
866 pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n",
867 i, &rgn[i].base, &rgn[i].size);
868 }
869
870 memblock_cap_memory_range(rgn[0].base, rgn[0].size);
871 for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++)
872 memblock_add(rgn[i].base, rgn[i].size);
873 }
874
875 #ifdef CONFIG_SERIAL_EARLYCON
876
early_init_dt_scan_chosen_stdout(void)877 int __init early_init_dt_scan_chosen_stdout(void)
878 {
879 int offset;
880 const char *p, *q, *options = NULL;
881 int l;
882 const struct earlycon_id *match;
883 const void *fdt = initial_boot_params;
884 int ret;
885
886 offset = fdt_path_offset(fdt, "/chosen");
887 if (offset < 0)
888 offset = fdt_path_offset(fdt, "/chosen@0");
889 if (offset < 0)
890 return -ENOENT;
891
892 p = fdt_getprop(fdt, offset, "stdout-path", &l);
893 if (!p)
894 p = fdt_getprop(fdt, offset, "linux,stdout-path", &l);
895 if (!p || !l)
896 return -ENOENT;
897
898 q = strchrnul(p, ':');
899 if (*q != '\0')
900 options = q + 1;
901 l = q - p;
902
903 /* Get the node specified by stdout-path */
904 offset = fdt_path_offset_namelen(fdt, p, l);
905 if (offset < 0) {
906 pr_warn("earlycon: stdout-path %.*s not found\n", l, p);
907 return 0;
908 }
909
910 for (match = __earlycon_table; match < __earlycon_table_end; match++) {
911 if (!match->compatible[0])
912 continue;
913
914 if (fdt_node_check_compatible(fdt, offset, match->compatible))
915 continue;
916
917 ret = of_setup_earlycon(match, offset, options);
918 if (!ret || ret == -EALREADY)
919 return 0;
920 }
921 return -ENODEV;
922 }
923 #endif
924
925 /*
926 * early_init_dt_scan_root - fetch the top level address and size cells
927 */
early_init_dt_scan_root(void)928 int __init early_init_dt_scan_root(void)
929 {
930 const __be32 *prop;
931 const void *fdt = initial_boot_params;
932 int node = fdt_path_offset(fdt, "/");
933
934 if (node < 0)
935 return -ENODEV;
936
937 dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
938 dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
939
940 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
941 if (prop)
942 dt_root_size_cells = be32_to_cpup(prop);
943 pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
944
945 prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
946 if (prop)
947 dt_root_addr_cells = be32_to_cpup(prop);
948 pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
949
950 return 0;
951 }
952
dt_mem_next_cell(int s,const __be32 ** cellp)953 u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
954 {
955 const __be32 *p = *cellp;
956
957 *cellp = p + s;
958 return of_read_number(p, s);
959 }
960
961 /*
962 * early_init_dt_scan_memory - Look for and parse memory nodes
963 */
early_init_dt_scan_memory(void)964 int __init early_init_dt_scan_memory(void)
965 {
966 int node, found_memory = 0;
967 const void *fdt = initial_boot_params;
968
969 fdt_for_each_subnode(node, fdt, 0) {
970 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
971 const __be32 *reg, *endp;
972 int l;
973 bool hotpluggable;
974
975 /* We are scanning "memory" nodes only */
976 if (type == NULL || strcmp(type, "memory") != 0)
977 continue;
978
979 if (!of_fdt_device_is_available(fdt, node))
980 continue;
981
982 reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
983 if (reg == NULL)
984 reg = of_get_flat_dt_prop(node, "reg", &l);
985 if (reg == NULL)
986 continue;
987
988 endp = reg + (l / sizeof(__be32));
989 hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
990
991 pr_debug("memory scan node %s, reg size %d,\n",
992 fdt_get_name(fdt, node, NULL), l);
993
994 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
995 u64 base, size;
996
997 base = dt_mem_next_cell(dt_root_addr_cells, ®);
998 size = dt_mem_next_cell(dt_root_size_cells, ®);
999
1000 if (size == 0)
1001 continue;
1002 pr_debug(" - %llx, %llx\n", base, size);
1003
1004 early_init_dt_add_memory_arch(base, size);
1005
1006 found_memory = 1;
1007
1008 if (!hotpluggable)
1009 continue;
1010
1011 if (memblock_mark_hotplug(base, size))
1012 pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
1013 base, base + size);
1014 }
1015 }
1016 return found_memory;
1017 }
1018
early_init_dt_scan_chosen(char * cmdline)1019 int __init early_init_dt_scan_chosen(char *cmdline)
1020 {
1021 int l, node;
1022 const char *p;
1023 const void *rng_seed;
1024 const void *fdt = initial_boot_params;
1025
1026 node = fdt_path_offset(fdt, "/chosen");
1027 if (node < 0)
1028 node = fdt_path_offset(fdt, "/chosen@0");
1029 if (node < 0)
1030 /* Handle the cmdline config options even if no /chosen node */
1031 goto handle_cmdline;
1032
1033 chosen_node_offset = node;
1034
1035 early_init_dt_check_for_initrd(node);
1036 early_init_dt_check_for_elfcorehdr(node);
1037
1038 rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
1039 if (rng_seed && l > 0) {
1040 add_bootloader_randomness(rng_seed, l);
1041
1042 /* try to clear seed so it won't be found. */
1043 fdt_nop_property(initial_boot_params, node, "rng-seed");
1044
1045 /* update CRC check value */
1046 of_fdt_crc32 = crc32_be(~0, initial_boot_params,
1047 fdt_totalsize(initial_boot_params));
1048 }
1049
1050 /* Retrieve command line */
1051 p = of_get_flat_dt_prop(node, "bootargs", &l);
1052 if (p != NULL && l > 0)
1053 strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
1054
1055 handle_cmdline:
1056 /*
1057 * CONFIG_CMDLINE is meant to be a default in case nothing else
1058 * managed to set the command line, unless CONFIG_CMDLINE_FORCE
1059 * is set in which case we override whatever was found earlier.
1060 */
1061 #ifdef CONFIG_CMDLINE
1062 #if defined(CONFIG_CMDLINE_EXTEND)
1063 strlcat(cmdline, " ", COMMAND_LINE_SIZE);
1064 strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1065 #elif defined(CONFIG_CMDLINE_FORCE)
1066 strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1067 #else
1068 /* No arguments from boot loader, use kernel's cmdl*/
1069 if (!((char *)cmdline)[0])
1070 strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1071 #endif
1072 #endif /* CONFIG_CMDLINE */
1073
1074 pr_debug("Command line is: %s\n", (char *)cmdline);
1075
1076 return 0;
1077 }
1078
1079 #ifndef MIN_MEMBLOCK_ADDR
1080 #define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET)
1081 #endif
1082 #ifndef MAX_MEMBLOCK_ADDR
1083 #define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0)
1084 #endif
1085
early_init_dt_add_memory_arch(u64 base,u64 size)1086 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
1087 {
1088 const u64 phys_offset = MIN_MEMBLOCK_ADDR;
1089
1090 if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
1091 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1092 base, base + size);
1093 return;
1094 }
1095
1096 if (!PAGE_ALIGNED(base)) {
1097 size -= PAGE_SIZE - (base & ~PAGE_MASK);
1098 base = PAGE_ALIGN(base);
1099 }
1100 size &= PAGE_MASK;
1101
1102 if (base > MAX_MEMBLOCK_ADDR) {
1103 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1104 base, base + size);
1105 return;
1106 }
1107
1108 if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
1109 pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
1110 ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
1111 size = MAX_MEMBLOCK_ADDR - base + 1;
1112 }
1113
1114 if (base + size < phys_offset) {
1115 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1116 base, base + size);
1117 return;
1118 }
1119 if (base < phys_offset) {
1120 pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
1121 base, phys_offset);
1122 size -= phys_offset - base;
1123 base = phys_offset;
1124 }
1125 memblock_add(base, size);
1126 }
1127
early_init_dt_alloc_memory_arch(u64 size,u64 align)1128 static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
1129 {
1130 void *ptr = memblock_alloc(size, align);
1131
1132 if (!ptr)
1133 panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
1134 __func__, size, align);
1135
1136 return ptr;
1137 }
1138
early_init_dt_verify(void * params)1139 bool __init early_init_dt_verify(void *params)
1140 {
1141 if (!params)
1142 return false;
1143
1144 /* check device tree validity */
1145 if (fdt_check_header(params))
1146 return false;
1147
1148 /* Setup flat device-tree pointer */
1149 initial_boot_params = params;
1150 of_fdt_crc32 = crc32_be(~0, initial_boot_params,
1151 fdt_totalsize(initial_boot_params));
1152
1153 /* Initialize {size,address}-cells info */
1154 early_init_dt_scan_root();
1155
1156 return true;
1157 }
1158
1159
early_init_dt_scan_nodes(void)1160 void __init early_init_dt_scan_nodes(void)
1161 {
1162 int rc;
1163
1164 /* Retrieve various information from the /chosen node */
1165 rc = early_init_dt_scan_chosen(boot_command_line);
1166 if (rc)
1167 pr_warn("No chosen node found, continuing without\n");
1168
1169 /* Setup memory, calling early_init_dt_add_memory_arch */
1170 early_init_dt_scan_memory();
1171
1172 /* Handle linux,usable-memory-range property */
1173 early_init_dt_check_for_usable_mem_range();
1174 }
1175
early_init_dt_scan(void * params)1176 bool __init early_init_dt_scan(void *params)
1177 {
1178 bool status;
1179
1180 status = early_init_dt_verify(params);
1181 if (!status)
1182 return false;
1183
1184 early_init_dt_scan_nodes();
1185 return true;
1186 }
1187
copy_device_tree(void * fdt)1188 static void *__init copy_device_tree(void *fdt)
1189 {
1190 int size;
1191 void *dt;
1192
1193 size = fdt_totalsize(fdt);
1194 dt = early_init_dt_alloc_memory_arch(size,
1195 roundup_pow_of_two(FDT_V17_SIZE));
1196
1197 if (dt)
1198 memcpy(dt, fdt, size);
1199
1200 return dt;
1201 }
1202
1203 /**
1204 * unflatten_device_tree - create tree of device_nodes from flat blob
1205 *
1206 * unflattens the device-tree passed by the firmware, creating the
1207 * tree of struct device_node. It also fills the "name" and "type"
1208 * pointers of the nodes so the normal device-tree walking functions
1209 * can be used.
1210 */
unflatten_device_tree(void)1211 void __init unflatten_device_tree(void)
1212 {
1213 void *fdt = initial_boot_params;
1214
1215 /* Don't use the bootloader provided DTB if ACPI is enabled */
1216 if (!acpi_disabled)
1217 fdt = NULL;
1218
1219 /*
1220 * Populate an empty root node when ACPI is enabled or bootloader
1221 * doesn't provide one.
1222 */
1223 if (!fdt) {
1224 fdt = (void *) __dtb_empty_root_begin;
1225 /* fdt_totalsize() will be used for copy size */
1226 if (fdt_totalsize(fdt) >
1227 __dtb_empty_root_end - __dtb_empty_root_begin) {
1228 pr_err("invalid size in dtb_empty_root\n");
1229 return;
1230 }
1231 of_fdt_crc32 = crc32_be(~0, fdt, fdt_totalsize(fdt));
1232 fdt = copy_device_tree(fdt);
1233 }
1234
1235 __unflatten_device_tree(fdt, NULL, &of_root,
1236 early_init_dt_alloc_memory_arch, false);
1237
1238 /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
1239 of_alias_scan(early_init_dt_alloc_memory_arch);
1240
1241 unittest_unflatten_overlay_base();
1242 }
1243
1244 /**
1245 * unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob
1246 *
1247 * Copies and unflattens the device-tree passed by the firmware, creating the
1248 * tree of struct device_node. It also fills the "name" and "type"
1249 * pointers of the nodes so the normal device-tree walking functions
1250 * can be used. This should only be used when the FDT memory has not been
1251 * reserved such is the case when the FDT is built-in to the kernel init
1252 * section. If the FDT memory is reserved already then unflatten_device_tree
1253 * should be used instead.
1254 */
unflatten_and_copy_device_tree(void)1255 void __init unflatten_and_copy_device_tree(void)
1256 {
1257 if (initial_boot_params)
1258 initial_boot_params = copy_device_tree(initial_boot_params);
1259
1260 unflatten_device_tree();
1261 }
1262
1263 #ifdef CONFIG_SYSFS
of_fdt_raw_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)1264 static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj,
1265 struct bin_attribute *bin_attr,
1266 char *buf, loff_t off, size_t count)
1267 {
1268 memcpy(buf, initial_boot_params + off, count);
1269 return count;
1270 }
1271
of_fdt_raw_init(void)1272 static int __init of_fdt_raw_init(void)
1273 {
1274 static struct bin_attribute of_fdt_raw_attr =
1275 __BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0);
1276
1277 if (!initial_boot_params)
1278 return 0;
1279
1280 if (of_fdt_crc32 != crc32_be(~0, initial_boot_params,
1281 fdt_totalsize(initial_boot_params))) {
1282 pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
1283 return 0;
1284 }
1285 of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
1286 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
1287 }
1288 late_initcall(of_fdt_raw_init);
1289 #endif
1290
1291 #endif /* CONFIG_OF_EARLY_FLATTREE */
1292