1 /* Copyright 2013-2018 IBM Corp.
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12 * implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <inttypes.h>
18 #include <skiboot.h>
19 #include <mem-map.h>
20 #include <libfdt_env.h>
21 #include <lock.h>
22 #include <device.h>
23 #include <cpu.h>
24 #include <chip.h>
25 #include <affinity.h>
26 #include <types.h>
27 #include <mem_region.h>
28 #include <mem_region-malloc.h>
29
30 /* Memory poisoning on free (if POISON_MEM_REGION set to 1) */
31 #ifdef DEBUG
32 #define POISON_MEM_REGION 1
33 #else
34 #define POISON_MEM_REGION 0
35 #endif
36 #define POISON_MEM_REGION_WITH 0x99
37 #define POISON_MEM_REGION_LIMIT 1*1024*1024*1024
38
39 /* Locking: The mem_region_lock protects the regions list from concurrent
40 * updates. Additions to, or removals from, the region list must be done
41 * with this lock held. This is typically done when we're establishing
42 * the memory & reserved regions.
43 *
44 * Each region has a lock (region->free_list_lock) to protect the free list
45 * from concurrent modification. This lock is used when we're allocating
46 * memory out of a specific region.
47 *
48 * If both locks are needed (eg, __local_alloc, where we need to find a region,
49 * then allocate from it), the mem_region_lock must be acquired before (and
50 * released after) the per-region lock.
51 */
52 struct lock mem_region_lock = LOCK_UNLOCKED;
53
54 static struct list_head regions = LIST_HEAD_INIT(regions);
55 static struct list_head early_reserves = LIST_HEAD_INIT(early_reserves);
56
57 static bool mem_region_init_done = false;
58 static bool mem_regions_finalised = false;
59
60 unsigned long top_of_ram = SKIBOOT_BASE + SKIBOOT_SIZE;
61
62 static struct mem_region skiboot_os_reserve = {
63 .name = "ibm,os-reserve",
64 .start = 0,
65 .len = SKIBOOT_BASE,
66 .type = REGION_OS,
67 };
68
69 struct mem_region skiboot_heap = {
70 .name = "ibm,firmware-heap",
71 .start = HEAP_BASE,
72 .len = HEAP_SIZE,
73 .type = REGION_SKIBOOT_HEAP,
74 };
75
76 static struct mem_region skiboot_code_and_text = {
77 .name = "ibm,firmware-code",
78 .start = SKIBOOT_BASE,
79 .len = HEAP_BASE - SKIBOOT_BASE,
80 .type = REGION_SKIBOOT_FIRMWARE,
81 };
82
83 static struct mem_region skiboot_after_heap = {
84 .name = "ibm,firmware-data",
85 .start = HEAP_BASE + HEAP_SIZE,
86 .len = SKIBOOT_BASE + SKIBOOT_SIZE - (HEAP_BASE + HEAP_SIZE),
87 .type = REGION_SKIBOOT_FIRMWARE,
88 };
89
90 static struct mem_region skiboot_cpu_stacks = {
91 .name = "ibm,firmware-stacks",
92 .start = CPU_STACKS_BASE,
93 .len = 0, /* TBA */
94 .type = REGION_SKIBOOT_FIRMWARE,
95 };
96
97 static struct mem_region skiboot_mambo_kernel = {
98 .name = "ibm,firmware-mambo-kernel",
99 .start = (unsigned long)KERNEL_LOAD_BASE,
100 .len = KERNEL_LOAD_SIZE,
101 .type = REGION_SKIBOOT_FIRMWARE,
102 };
103
104 static struct mem_region skiboot_mambo_initramfs = {
105 .name = "ibm,firmware-mambo-initramfs",
106 .start = (unsigned long)INITRAMFS_LOAD_BASE,
107 .len = INITRAMFS_LOAD_SIZE,
108 .type = REGION_SKIBOOT_FIRMWARE,
109 };
110
111
112 struct alloc_hdr {
113 bool free : 1;
114 bool prev_free : 1;
115 bool printed : 1;
116 unsigned long num_longs : BITS_PER_LONG-3; /* Including header. */
117 const char *location;
118 };
119
120 struct free_hdr {
121 struct alloc_hdr hdr;
122 struct list_node list;
123 /* ... unsigned long tailer; */
124 };
125
126 #define ALLOC_HDR_LONGS (sizeof(struct alloc_hdr) / sizeof(long))
127 #define ALLOC_MIN_LONGS (sizeof(struct free_hdr) / sizeof(long) + 1)
128
129 /* Avoid ugly casts. */
region_start(const struct mem_region * region)130 static void *region_start(const struct mem_region *region)
131 {
132 return (void *)(unsigned long)region->start;
133 }
134
135 /* Each free block has a tailer, so we can walk backwards. */
tailer(struct free_hdr * f)136 static unsigned long *tailer(struct free_hdr *f)
137 {
138 return (unsigned long *)f + f->hdr.num_longs - 1;
139 }
140
141 /* This walks forward to the next hdr (or NULL if at the end). */
next_hdr(const struct mem_region * region,const struct alloc_hdr * hdr)142 static struct alloc_hdr *next_hdr(const struct mem_region *region,
143 const struct alloc_hdr *hdr)
144 {
145 void *next;
146
147 next = ((unsigned long *)hdr + hdr->num_longs);
148 if (next >= region_start(region) + region->len)
149 next = NULL;
150 return next;
151 }
152
153 #if POISON_MEM_REGION == 1
mem_poison(struct free_hdr * f)154 static void mem_poison(struct free_hdr *f)
155 {
156 size_t poison_size = (void*)tailer(f) - (void*)(f+1);
157
158 /* We only poison up to a limit, as otherwise boot is
159 * kinda slow */
160 if (poison_size > POISON_MEM_REGION_LIMIT)
161 poison_size = POISON_MEM_REGION_LIMIT;
162
163 memset(f+1, POISON_MEM_REGION_WITH, poison_size);
164 }
165 #endif
166
167 /* Creates free block covering entire region. */
init_allocatable_region(struct mem_region * region)168 static void init_allocatable_region(struct mem_region *region)
169 {
170 struct free_hdr *f = region_start(region);
171 assert(region->type == REGION_SKIBOOT_HEAP ||
172 region->type == REGION_MEMORY);
173 f->hdr.num_longs = region->len / sizeof(long);
174 f->hdr.free = true;
175 f->hdr.prev_free = false;
176 *tailer(f) = f->hdr.num_longs;
177 list_head_init(®ion->free_list);
178 list_add(®ion->free_list, &f->list);
179 #if POISON_MEM_REGION == 1
180 mem_poison(f);
181 #endif
182 }
183
make_free(struct mem_region * region,struct free_hdr * f,const char * location,bool skip_poison)184 static void make_free(struct mem_region *region, struct free_hdr *f,
185 const char *location, bool skip_poison)
186 {
187 struct alloc_hdr *next;
188
189 #if POISON_MEM_REGION == 1
190 if (!skip_poison)
191 mem_poison(f);
192 #else
193 (void)skip_poison;
194 #endif
195
196 if (f->hdr.prev_free) {
197 struct free_hdr *prev;
198 unsigned long *prev_tailer = (unsigned long *)f - 1;
199
200 assert(*prev_tailer);
201 prev = (void *)((unsigned long *)f - *prev_tailer);
202 assert(prev->hdr.free);
203 assert(!prev->hdr.prev_free);
204
205 /* Expand to cover the one we just freed. */
206 prev->hdr.num_longs += f->hdr.num_longs;
207 f = prev;
208 } else {
209 f->hdr.free = true;
210 f->hdr.location = location;
211 list_add(®ion->free_list, &f->list);
212 }
213
214 /* Fix up tailer. */
215 *tailer(f) = f->hdr.num_longs;
216
217 /* If next is free, coalesce it */
218 next = next_hdr(region, &f->hdr);
219 if (next) {
220 next->prev_free = true;
221 if (next->free) {
222 struct free_hdr *next_free = (void *)next;
223 list_del_from(®ion->free_list, &next_free->list);
224 /* Maximum of one level of recursion */
225 make_free(region, next_free, location, true);
226 }
227 }
228 }
229
230 /* Can we fit this many longs with this alignment in this free block? */
fits(struct free_hdr * f,size_t longs,size_t align,size_t * offset)231 static bool fits(struct free_hdr *f, size_t longs, size_t align, size_t *offset)
232 {
233 *offset = 0;
234
235 while (f->hdr.num_longs >= *offset + longs) {
236 size_t addr;
237
238 addr = (unsigned long)f
239 + (*offset + ALLOC_HDR_LONGS) * sizeof(long);
240 if ((addr & (align - 1)) == 0)
241 return true;
242
243 /* Don't make tiny chunks! */
244 if (*offset == 0)
245 *offset = ALLOC_MIN_LONGS;
246 else
247 (*offset)++;
248 }
249 return false;
250 }
251
discard_excess(struct mem_region * region,struct alloc_hdr * hdr,size_t alloc_longs,const char * location,bool skip_poison)252 static void discard_excess(struct mem_region *region,
253 struct alloc_hdr *hdr, size_t alloc_longs,
254 const char *location, bool skip_poison)
255 {
256 /* Do we have excess? */
257 if (hdr->num_longs > alloc_longs + ALLOC_MIN_LONGS) {
258 struct free_hdr *post;
259
260 /* Set up post block. */
261 post = (void *)hdr + alloc_longs * sizeof(long);
262 post->hdr.num_longs = hdr->num_longs - alloc_longs;
263 post->hdr.prev_free = false;
264
265 /* Trim our block. */
266 hdr->num_longs = alloc_longs;
267
268 /* This coalesces as required. */
269 make_free(region, post, location, skip_poison);
270 }
271 }
272
hdr_location(const struct alloc_hdr * hdr)273 static const char *hdr_location(const struct alloc_hdr *hdr)
274 {
275 /* Corrupt: step carefully! */
276 if (is_rodata(hdr->location))
277 return hdr->location;
278 return "*CORRUPT*";
279 }
280
bad_header(const struct mem_region * region,const struct alloc_hdr * hdr,const char * during,const char * location)281 static void bad_header(const struct mem_region *region,
282 const struct alloc_hdr *hdr,
283 const char *during,
284 const char *location)
285 {
286 /* Corrupt: step carefully! */
287 if (is_rodata(hdr->location))
288 prerror("%p (in %s) %s at %s, previously %s\n",
289 hdr-1, region->name, during, location, hdr->location);
290 else
291 prerror("%p (in %s) %s at %s, previously %p\n",
292 hdr-1, region->name, during, location, hdr->location);
293 abort();
294 }
295
region_is_reservable(struct mem_region * region)296 static bool region_is_reservable(struct mem_region *region)
297 {
298 return region->type != REGION_OS;
299 }
300
region_is_reserved(struct mem_region * region)301 static bool region_is_reserved(struct mem_region *region)
302 {
303 return region->type != REGION_OS && region->type != REGION_MEMORY;
304 }
305
mem_dump_allocs(void)306 void mem_dump_allocs(void)
307 {
308 struct mem_region *region;
309 struct alloc_hdr *h, *i;
310
311 /* Second pass: populate property data */
312 prlog(PR_INFO, "Memory regions:\n");
313 list_for_each(®ions, region, list) {
314 if (!(region->type == REGION_SKIBOOT_HEAP ||
315 region->type == REGION_MEMORY))
316 continue;
317 prlog(PR_INFO, " 0x%012llx..%012llx : %s\n",
318 (long long)region->start,
319 (long long)(region->start + region->len - 1),
320 region->name);
321 if (region->free_list.n.next == NULL) {
322 prlog(PR_INFO, " no allocs\n");
323 continue;
324 }
325
326 /*
327 * XXX: When dumping the allocation list we coalase allocations
328 * with the same location and size into a single line. This is
329 * quadratic, but it makes the dump human-readable and the raw
330 * dump sometimes causes the log buffer to wrap.
331 */
332 for (h = region_start(region); h; h = next_hdr(region, h))
333 h->printed = false;
334
335 for (h = region_start(region); h; h = next_hdr(region, h)) {
336 unsigned long bytes;
337 int count = 0;
338
339 if (h->free)
340 continue;
341 if (h->printed)
342 continue;
343
344 for (i = h; i; i = next_hdr(region, i)) {
345 if (i->free)
346 continue;
347 if (i->num_longs != h->num_longs)
348 continue;
349 if (strcmp(i->location, h->location))
350 continue;
351
352 i->printed = true;
353 count++;
354 }
355
356 bytes = h->num_longs * sizeof(long);
357 prlog(PR_NOTICE, " % 8d allocs of 0x%.8lx bytes at %s (total 0x%lx)\n",
358 count, bytes, hdr_location(h), bytes * count);
359 }
360 }
361 }
362
mem_dump_free(void)363 int64_t mem_dump_free(void)
364 {
365 struct mem_region *region;
366 struct alloc_hdr *hdr;
367 int64_t total_free;
368 int64_t region_free;
369
370 total_free = 0;
371
372 prlog(PR_INFO, "Free space in HEAP memory regions:\n");
373 list_for_each(®ions, region, list) {
374 if (!(region->type == REGION_SKIBOOT_HEAP ||
375 region->type == REGION_MEMORY))
376 continue;
377 region_free = 0;
378
379 if (region->free_list.n.next == NULL) {
380 continue;
381 }
382 for (hdr = region_start(region); hdr; hdr = next_hdr(region, hdr)) {
383 if (!hdr->free)
384 continue;
385
386 region_free+= hdr->num_longs * sizeof(long);
387 }
388 prlog(PR_INFO, "Region %s free: %"PRIx64"\n",
389 region->name, region_free);
390 total_free += region_free;
391 }
392
393 prlog(PR_INFO, "Total free: %"PRIu64"\n", total_free);
394
395 return total_free;
396 }
397
__mem_alloc(struct mem_region * region,size_t size,size_t align,const char * location)398 static void *__mem_alloc(struct mem_region *region, size_t size, size_t align,
399 const char *location)
400 {
401 size_t alloc_longs, offset;
402 struct free_hdr *f;
403 struct alloc_hdr *next;
404
405 /* Align must be power of 2. */
406 assert(!((align - 1) & align));
407
408 /* This should be a constant. */
409 assert(is_rodata(location));
410
411 /* Unallocatable region? */
412 if (!(region->type == REGION_SKIBOOT_HEAP ||
413 region->type == REGION_MEMORY))
414 return NULL;
415
416 /* First allocation? */
417 if (region->free_list.n.next == NULL)
418 init_allocatable_region(region);
419
420 /* Don't do screwy sizes. */
421 if (size > region->len)
422 return NULL;
423
424 /* Don't do tiny alignments, we deal in long increments. */
425 if (align < sizeof(long))
426 align = sizeof(long);
427
428 /* Convert size to number of longs, too. */
429 alloc_longs = (size + sizeof(long)-1) / sizeof(long) + ALLOC_HDR_LONGS;
430
431 /* Can't be too small for when we free it, either. */
432 if (alloc_longs < ALLOC_MIN_LONGS)
433 alloc_longs = ALLOC_MIN_LONGS;
434
435 /* Walk free list. */
436 list_for_each(®ion->free_list, f, list) {
437 /* We may have to skip some to meet alignment. */
438 if (fits(f, alloc_longs, align, &offset))
439 goto found;
440 }
441
442 return NULL;
443
444 found:
445 assert(f->hdr.free);
446 assert(!f->hdr.prev_free);
447
448 /* This block is no longer free. */
449 list_del_from(®ion->free_list, &f->list);
450 f->hdr.free = false;
451 f->hdr.location = location;
452
453 next = next_hdr(region, &f->hdr);
454 if (next) {
455 assert(next->prev_free);
456 next->prev_free = false;
457 }
458
459 if (offset != 0) {
460 struct free_hdr *pre = f;
461
462 f = (void *)f + offset * sizeof(long);
463 assert(f >= pre + 1);
464
465 /* Set up new header. */
466 f->hdr.num_longs = pre->hdr.num_longs - offset;
467 /* f->hdr.prev_free will be set by make_free below. */
468 f->hdr.free = false;
469 f->hdr.location = location;
470
471 /* Fix up old header. */
472 pre->hdr.num_longs = offset;
473 pre->hdr.prev_free = false;
474
475 /* This coalesces as required. */
476 make_free(region, pre, location, true);
477 }
478
479 /* We might be too long; put the rest back. */
480 discard_excess(region, &f->hdr, alloc_longs, location, true);
481
482 /* Clear tailer for debugging */
483 *tailer(f) = 0;
484
485 /* Their pointer is immediately after header. */
486 return &f->hdr + 1;
487 }
488
mem_alloc(struct mem_region * region,size_t size,size_t align,const char * location)489 void *mem_alloc(struct mem_region *region, size_t size, size_t align,
490 const char *location)
491 {
492 static bool dumped = false;
493 void *r;
494
495 assert(lock_held_by_me(®ion->free_list_lock));
496
497 r = __mem_alloc(region, size, align, location);
498 if (r)
499 return r;
500
501 prerror("mem_alloc(0x%lx, 0x%lx, \"%s\", %s) failed !\n",
502 size, align, location, region->name);
503 if (!dumped) {
504 mem_dump_allocs();
505 dumped = true;
506 }
507
508 return NULL;
509 }
510
mem_free(struct mem_region * region,void * mem,const char * location)511 void mem_free(struct mem_region *region, void *mem, const char *location)
512 {
513 struct alloc_hdr *hdr;
514
515 /* This should be a constant. */
516 assert(is_rodata(location));
517
518 assert(lock_held_by_me(®ion->free_list_lock));
519
520 /* Freeing NULL is always a noop. */
521 if (!mem)
522 return;
523
524 /* Your memory is in the region, right? */
525 assert(mem >= region_start(region) + sizeof(*hdr));
526 assert(mem < region_start(region) + region->len);
527
528 /* Grab header. */
529 hdr = mem - sizeof(*hdr);
530
531 if (hdr->free)
532 bad_header(region, hdr, "re-freed", location);
533
534 make_free(region, (struct free_hdr *)hdr, location, false);
535 }
536
mem_allocated_size(const void * ptr)537 size_t mem_allocated_size(const void *ptr)
538 {
539 const struct alloc_hdr *hdr = ptr - sizeof(*hdr);
540 return hdr->num_longs * sizeof(long) - sizeof(struct alloc_hdr);
541 }
542
mem_resize(struct mem_region * region,void * mem,size_t len,const char * location)543 bool mem_resize(struct mem_region *region, void *mem, size_t len,
544 const char *location)
545 {
546 struct alloc_hdr *hdr, *next;
547 struct free_hdr *f;
548
549 /* This should be a constant. */
550 assert(is_rodata(location));
551
552 assert(lock_held_by_me(®ion->free_list_lock));
553
554 /* Get header. */
555 hdr = mem - sizeof(*hdr);
556 if (hdr->free)
557 bad_header(region, hdr, "resize", location);
558
559 /* Round up size to multiple of longs. */
560 len = (sizeof(*hdr) + len + sizeof(long) - 1) / sizeof(long);
561
562 /* Can't be too small for when we free it, either. */
563 if (len < ALLOC_MIN_LONGS)
564 len = ALLOC_MIN_LONGS;
565
566 /* Shrinking is simple. */
567 if (len <= hdr->num_longs) {
568 hdr->location = location;
569 discard_excess(region, hdr, len, location, false);
570 return true;
571 }
572
573 /* Check if we can expand. */
574 next = next_hdr(region, hdr);
575 if (!next || !next->free || hdr->num_longs + next->num_longs < len)
576 return false;
577
578 /* OK, it's free and big enough, absorb it. */
579 f = (struct free_hdr *)next;
580 list_del_from(®ion->free_list, &f->list);
581 hdr->num_longs += next->num_longs;
582 hdr->location = location;
583
584 /* Update next prev_free */
585 next = next_hdr(region, &f->hdr);
586 if (next) {
587 assert(next->prev_free);
588 next->prev_free = false;
589 }
590
591 /* Clear tailer for debugging */
592 *tailer(f) = 0;
593
594 /* Now we might have *too* much. */
595 discard_excess(region, hdr, len, location, true);
596 return true;
597 }
598
mem_check(const struct mem_region * region)599 bool mem_check(const struct mem_region *region)
600 {
601 size_t frees = 0;
602 struct alloc_hdr *hdr, *prev_free = NULL;
603 struct free_hdr *f;
604
605 /* Check it's sanely aligned. */
606 if (region->start % sizeof(long)) {
607 prerror("Region '%s' not sanely aligned (%llx)\n",
608 region->name, (unsigned long long)region->start);
609 return false;
610 }
611 if ((long)region->len % sizeof(long)) {
612 prerror("Region '%s' not sane length (%llu)\n",
613 region->name, (unsigned long long)region->len);
614 return false;
615 }
616
617 /* Not ours to play with, or empty? Don't do anything. */
618 if (!(region->type == REGION_MEMORY ||
619 region->type == REGION_SKIBOOT_HEAP) ||
620 region->free_list.n.next == NULL)
621 return true;
622
623 /* Walk linearly. */
624 for (hdr = region_start(region); hdr; hdr = next_hdr(region, hdr)) {
625 if (hdr->num_longs < ALLOC_MIN_LONGS) {
626 prerror("Region '%s' %s %p (%s) size %zu\n",
627 region->name, hdr->free ? "free" : "alloc",
628 hdr, hdr_location(hdr),
629 hdr->num_longs * sizeof(long));
630 return false;
631 }
632 if ((unsigned long)hdr + hdr->num_longs * sizeof(long) >
633 region->start + region->len) {
634 prerror("Region '%s' %s %p (%s) oversize %zu\n",
635 region->name, hdr->free ? "free" : "alloc",
636 hdr, hdr_location(hdr),
637 hdr->num_longs * sizeof(long));
638 return false;
639 }
640 if (hdr->free) {
641 if (hdr->prev_free || prev_free) {
642 prerror("Region '%s' free %p (%s) has prev_free"
643 " %p (%s) %sset?\n",
644 region->name, hdr, hdr_location(hdr),
645 prev_free,
646 prev_free ? hdr_location(prev_free)
647 : "NULL",
648 hdr->prev_free ? "" : "un");
649 return false;
650 }
651 prev_free = hdr;
652 frees ^= (unsigned long)hdr - region->start;
653 } else {
654 if (hdr->prev_free != (bool)prev_free) {
655 prerror("Region '%s' alloc %p (%s) has"
656 " prev_free %p %sset?\n",
657 region->name, hdr, hdr_location(hdr),
658 prev_free, hdr->prev_free ? "" : "un");
659 return false;
660 }
661 prev_free = NULL;
662 }
663 }
664
665 /* Now walk free list. */
666 list_for_each(®ion->free_list, f, list)
667 frees ^= (unsigned long)f - region->start;
668
669 if (frees) {
670 prerror("Region '%s' free list and walk do not match!\n",
671 region->name);
672 return false;
673 }
674 return true;
675 }
676
mem_check_all(void)677 bool mem_check_all(void)
678 {
679 struct mem_region *r;
680
681 list_for_each(®ions, r, list) {
682 if (!mem_check(r))
683 return false;
684 }
685
686 return true;
687 }
688
new_region(const char * name,uint64_t start,uint64_t len,struct dt_node * node,enum mem_region_type type)689 static struct mem_region *new_region(const char *name,
690 uint64_t start, uint64_t len,
691 struct dt_node *node,
692 enum mem_region_type type)
693 {
694 struct mem_region *region;
695
696 region = malloc(sizeof(*region));
697 if (!region)
698 return NULL;
699
700 region->name = name;
701 region->start = start;
702 region->len = len;
703 region->node = node;
704 region->type = type;
705 region->free_list.n.next = NULL;
706 init_lock(®ion->free_list_lock);
707
708 return region;
709 }
710
711 /* We always split regions, so we only have to replace one. */
split_region(struct mem_region * head,uint64_t split_at,enum mem_region_type type)712 static struct mem_region *split_region(struct mem_region *head,
713 uint64_t split_at,
714 enum mem_region_type type)
715 {
716 struct mem_region *tail;
717 uint64_t end = head->start + head->len;
718
719 tail = new_region(head->name, split_at, end - split_at,
720 head->node, type);
721 /* Original region becomes head. */
722 if (tail)
723 head->len -= tail->len;
724
725 return tail;
726 }
727
intersects(const struct mem_region * region,uint64_t addr)728 static bool intersects(const struct mem_region *region, uint64_t addr)
729 {
730 return addr > region->start &&
731 addr < region->start + region->len;
732 }
733
maybe_split(struct mem_region * r,uint64_t split_at)734 static bool maybe_split(struct mem_region *r, uint64_t split_at)
735 {
736 struct mem_region *tail;
737
738 if (!intersects(r, split_at))
739 return true;
740
741 tail = split_region(r, split_at, r->type);
742 if (!tail)
743 return false;
744
745 /* Tail add is important: we may need to split again! */
746 list_add_tail(®ions, &tail->list);
747 return true;
748 }
749
overlaps(const struct mem_region * r1,const struct mem_region * r2)750 static bool overlaps(const struct mem_region *r1, const struct mem_region *r2)
751 {
752 return (r1->start + r1->len > r2->start
753 && r1->start < r2->start + r2->len);
754 }
755
contains(const struct mem_region * r1,const struct mem_region * r2)756 static bool contains(const struct mem_region *r1, const struct mem_region *r2)
757 {
758 u64 r1_end = r1->start + r1->len;
759 u64 r2_end = r2->start + r2->len;
760
761 return (r1->start <= r2->start && r2_end <= r1_end);
762 }
763
get_overlap(const struct mem_region * region)764 static struct mem_region *get_overlap(const struct mem_region *region)
765 {
766 struct mem_region *i;
767
768 list_for_each(®ions, i, list) {
769 if (overlaps(region, i))
770 return i;
771 }
772 return NULL;
773 }
774
add_region(struct mem_region * region)775 static bool add_region(struct mem_region *region)
776 {
777 struct mem_region *r;
778
779 if (mem_regions_finalised) {
780 prerror("MEM: add_region(%s@0x%"PRIx64") called after finalise!\n",
781 region->name, region->start);
782 return false;
783 }
784
785 /* First split any regions which intersect. */
786 list_for_each(®ions, r, list) {
787 /*
788 * The new region should be fully contained by an existing one.
789 * If it's not then we have a problem where reservations
790 * partially overlap which is probably broken.
791 *
792 * NB: There *might* be situations where this is legitimate,
793 * but the region handling does not currently support this.
794 */
795 if (overlaps(r, region) && !contains(r, region)) {
796 prerror("MEM: Partial overlap detected between regions:\n");
797 prerror("MEM: %s [0x%"PRIx64"-0x%"PRIx64"] (new)\n",
798 region->name, region->start,
799 region->start + region->len);
800 prerror("MEM: %s [0x%"PRIx64"-0x%"PRIx64"]\n",
801 r->name, r->start, r->start + r->len);
802 return false;
803 }
804
805 if (!maybe_split(r, region->start) ||
806 !maybe_split(r, region->start + region->len))
807 return false;
808 }
809
810 /* Now we have only whole overlaps, if any. */
811 while ((r = get_overlap(region)) != NULL) {
812 assert(r->start == region->start);
813 assert(r->len == region->len);
814 list_del_from(®ions, &r->list);
815 free(r);
816 }
817
818 /* Finally, add in our own region. */
819 list_add(®ions, ®ion->list);
820 return true;
821 }
822
mem_reserve(enum mem_region_type type,const char * name,uint64_t start,uint64_t len)823 static void mem_reserve(enum mem_region_type type, const char *name,
824 uint64_t start, uint64_t len)
825 {
826 struct mem_region *region;
827 bool added = true;
828
829 lock(&mem_region_lock);
830 region = new_region(name, start, len, NULL, type);
831 assert(region);
832
833 if (!mem_region_init_done)
834 list_add(&early_reserves, ®ion->list);
835 else
836 added = add_region(region);
837
838 assert(added);
839 unlock(&mem_region_lock);
840 }
841
mem_reserve_fw(const char * name,uint64_t start,uint64_t len)842 void mem_reserve_fw(const char *name, uint64_t start, uint64_t len)
843 {
844 mem_reserve(REGION_FW_RESERVED, name, start, len);
845 }
846
mem_reserve_hwbuf(const char * name,uint64_t start,uint64_t len)847 void mem_reserve_hwbuf(const char *name, uint64_t start, uint64_t len)
848 {
849 mem_reserve(REGION_RESERVED, name, start, len);
850 }
851
matches_chip_id(const __be32 ids[],size_t num,u32 chip_id)852 static bool matches_chip_id(const __be32 ids[], size_t num, u32 chip_id)
853 {
854 size_t i;
855
856 for (i = 0; i < num; i++)
857 if (be32_to_cpu(ids[i]) == chip_id)
858 return true;
859
860 return false;
861 }
862
__local_alloc(unsigned int chip_id,size_t size,size_t align,const char * location)863 void *__local_alloc(unsigned int chip_id, size_t size, size_t align,
864 const char *location)
865 {
866 struct mem_region *region;
867 void *p = NULL;
868 bool use_local = true;
869
870 lock(&mem_region_lock);
871
872 restart:
873 list_for_each(®ions, region, list) {
874 const struct dt_property *prop;
875 const __be32 *ids;
876
877 if (!(region->type == REGION_SKIBOOT_HEAP ||
878 region->type == REGION_MEMORY))
879 continue;
880
881 /* Don't allocate from normal heap. */
882 if (region == &skiboot_heap)
883 continue;
884
885 /* First pass, only match node local regions */
886 if (use_local) {
887 if (!region->node)
888 continue;
889 prop = dt_find_property(region->node, "ibm,chip-id");
890 ids = (const __be32 *)prop->prop;
891 if (!matches_chip_id(ids, prop->len/sizeof(u32),
892 chip_id))
893 continue;
894 }
895
896 /* Second pass, match anything */
897 lock(®ion->free_list_lock);
898 p = mem_alloc(region, size, align, location);
899 unlock(®ion->free_list_lock);
900 if (p)
901 break;
902 }
903
904 /*
905 * If we can't allocate the memory block from the expected
906 * node, we bail to any one that can accommodate our request.
907 */
908 if (!p && use_local) {
909 use_local = false;
910 goto restart;
911 }
912
913 unlock(&mem_region_lock);
914
915 return p;
916 }
917
find_mem_region(const char * name)918 struct mem_region *find_mem_region(const char *name)
919 {
920 struct mem_region *region;
921
922 list_for_each(®ions, region, list) {
923 if (streq(region->name, name))
924 return region;
925 }
926 return NULL;
927 }
928
mem_range_is_reserved(uint64_t start,uint64_t size)929 bool mem_range_is_reserved(uint64_t start, uint64_t size)
930 {
931 uint64_t end = start + size;
932 struct mem_region *region;
933 struct list_head *search;
934
935 /* We may have the range covered by a number of regions, which could
936 * appear in any order. So, we look for a region that covers the
937 * start address, and bump start up to the end of that region.
938 *
939 * We repeat until we've either bumped past the end of the range,
940 * or we didn't find a matching region.
941 *
942 * This has a worst-case of O(n^2), but n is well bounded by the
943 * small number of reservations.
944 */
945
946 if (!mem_region_init_done)
947 search = &early_reserves;
948 else
949 search = ®ions;
950
951 for (;;) {
952 bool found = false;
953
954 list_for_each(search, region, list) {
955 if (!region_is_reserved(region))
956 continue;
957
958 /* does this region overlap the start address, and
959 * have a non-zero size? */
960 if (region->start <= start &&
961 region->start + region->len > start &&
962 region->len) {
963 start = region->start + region->len;
964 found = true;
965 }
966 }
967
968 /* 'end' is the first byte outside of the range */
969 if (start >= end)
970 return true;
971
972 if (!found)
973 break;
974 }
975
976 return false;
977 }
978
mem_region_parse_reserved_properties(void)979 static void mem_region_parse_reserved_properties(void)
980 {
981 const struct dt_property *names, *ranges;
982 struct mem_region *region;
983
984 prlog(PR_DEBUG, "MEM: parsing reserved memory from "
985 "reserved-names/-ranges properties\n");
986
987 names = dt_find_property(dt_root, "reserved-names");
988 ranges = dt_find_property(dt_root, "reserved-ranges");
989 if (names && ranges) {
990 const uint64_t *range;
991 int n, len;
992
993 range = (const void *)ranges->prop;
994
995 for (n = 0; n < names->len; n += len, range += 2) {
996 char *name;
997
998 len = strlen(names->prop + n) + 1;
999 name = strdup(names->prop + n);
1000
1001 region = new_region(name,
1002 dt_get_number(range, 2),
1003 dt_get_number(range + 1, 2),
1004 NULL, REGION_FW_RESERVED);
1005 if (!add_region(region)) {
1006 prerror("Couldn't add mem_region %s\n", name);
1007 abort();
1008 }
1009 }
1010 } else if (names || ranges) {
1011 prerror("Invalid properties: reserved-names=%p "
1012 "with reserved-ranges=%p\n",
1013 names, ranges);
1014 abort();
1015 } else {
1016 return;
1017 }
1018 }
1019
mem_region_parse_reserved_nodes(const char * path)1020 static bool mem_region_parse_reserved_nodes(const char *path)
1021 {
1022 struct dt_node *parent, *node;
1023
1024 parent = dt_find_by_path(dt_root, path);
1025 if (!parent)
1026 return false;
1027
1028 prlog(PR_INFO, "MEM: parsing reserved memory from node %s\n", path);
1029
1030 dt_for_each_child(parent, node) {
1031 const struct dt_property *reg;
1032 struct mem_region *region;
1033 int type;
1034
1035 reg = dt_find_property(node, "reg");
1036 if (!reg) {
1037 char *nodepath = dt_get_path(node);
1038 prerror("node %s has no reg property, ignoring\n",
1039 nodepath);
1040 free(nodepath);
1041 continue;
1042 }
1043
1044 if (dt_has_node_property(node, "no-map", NULL))
1045 type = REGION_RESERVED;
1046 else
1047 type = REGION_FW_RESERVED;
1048
1049 region = new_region(strdup(node->name),
1050 dt_get_number(reg->prop, 2),
1051 dt_get_number(reg->prop + sizeof(u64), 2),
1052 node, type);
1053 if (!add_region(region)) {
1054 char *nodepath = dt_get_path(node);
1055 prerror("node %s failed to add_region()\n", nodepath);
1056 free(nodepath);
1057 }
1058 }
1059
1060 return true;
1061 }
1062
1063 /* Trawl through device tree, create memory regions from nodes. */
mem_region_init(void)1064 void mem_region_init(void)
1065 {
1066 struct mem_region *region, *next;
1067 struct dt_node *i;
1068 bool rc;
1069
1070 /* Ensure we have no collision between skiboot core and our heap */
1071 extern char _end[];
1072 BUILD_ASSERT(HEAP_BASE >= (uint64_t)_end);
1073
1074 /*
1075 * Add associativity properties outside of the lock
1076 * to avoid recursive locking caused by allocations
1077 * done by add_chip_dev_associativity()
1078 */
1079 dt_for_each_node(dt_root, i) {
1080 if (!dt_has_node_property(i, "device_type", "memory"))
1081 continue;
1082
1083 /* Add associativity properties */
1084 add_chip_dev_associativity(i);
1085 }
1086
1087 /* Add each memory node. */
1088 dt_for_each_node(dt_root, i) {
1089 uint64_t start, len;
1090 char *rname;
1091 #define NODE_REGION_PREFIX "ibm,firmware-allocs-"
1092
1093 if (!dt_has_node_property(i, "device_type", "memory"))
1094 continue;
1095 rname = zalloc(strlen(i->name) + strlen(NODE_REGION_PREFIX) + 1);
1096 assert(rname);
1097 strcat(rname, NODE_REGION_PREFIX);
1098 strcat(rname, i->name);
1099 start = dt_get_address(i, 0, &len);
1100 lock(&mem_region_lock);
1101 region = new_region(rname, start, len, i, REGION_MEMORY);
1102 if (!region) {
1103 prerror("MEM: Could not add mem region %s!\n", i->name);
1104 abort();
1105 }
1106 list_add(®ions, ®ion->list);
1107 if ((start + len) > top_of_ram)
1108 top_of_ram = start + len;
1109 unlock(&mem_region_lock);
1110 }
1111
1112 /*
1113 * This is called after we know the maximum PIR of all CPUs,
1114 * so we can dynamically set the stack length.
1115 */
1116 skiboot_cpu_stacks.len = (cpu_max_pir + 1) * STACK_SIZE;
1117
1118 lock(&mem_region_lock);
1119
1120 /* Now carve out our own reserved areas. */
1121 if (!add_region(&skiboot_os_reserve) ||
1122 !add_region(&skiboot_code_and_text) ||
1123 !add_region(&skiboot_heap) ||
1124 !add_region(&skiboot_after_heap) ||
1125 !add_region(&skiboot_cpu_stacks)) {
1126 prerror("Out of memory adding skiboot reserved areas\n");
1127 abort();
1128 }
1129
1130 if (chip_quirk(QUIRK_MAMBO_CALLOUTS)) {
1131 if (!add_region(&skiboot_mambo_kernel) ||
1132 !add_region(&skiboot_mambo_initramfs)) {
1133 prerror("Out of memory adding mambo payload\n");
1134 abort();
1135 }
1136 }
1137
1138 /* Add reserved reanges from HDAT */
1139 list_for_each_safe(&early_reserves, region, next, list) {
1140 bool added;
1141
1142 list_del(®ion->list);
1143 added = add_region(region);
1144 assert(added);
1145 }
1146
1147 /* Add reserved ranges from the DT */
1148 rc = mem_region_parse_reserved_nodes("/reserved-memory");
1149 if (!rc)
1150 rc = mem_region_parse_reserved_nodes(
1151 "/ibm,hostboot/reserved-memory");
1152 if (!rc)
1153 mem_region_parse_reserved_properties();
1154
1155 mem_region_init_done = true;
1156 unlock(&mem_region_lock);
1157 }
1158
allocated_length(const struct mem_region * r)1159 static uint64_t allocated_length(const struct mem_region *r)
1160 {
1161 struct free_hdr *f, *last = NULL;
1162
1163 /* No allocations at all? */
1164 if (r->free_list.n.next == NULL)
1165 return 0;
1166
1167 /* Find last free block. */
1168 list_for_each(&r->free_list, f, list)
1169 if (f > last)
1170 last = f;
1171
1172 /* No free blocks? */
1173 if (!last)
1174 return r->len;
1175
1176 /* Last free block isn't at end? */
1177 if (next_hdr(r, &last->hdr))
1178 return r->len;
1179 return (unsigned long)last - r->start;
1180 }
1181
1182 /* Separate out allocated sections into their own region. */
mem_region_release_unused(void)1183 void mem_region_release_unused(void)
1184 {
1185 struct mem_region *r;
1186
1187 lock(&mem_region_lock);
1188 assert(!mem_regions_finalised);
1189
1190 prlog(PR_INFO, "Releasing unused memory:\n");
1191 list_for_each(®ions, r, list) {
1192 uint64_t used_len;
1193
1194 /* If it's not allocatable, ignore it. */
1195 if (!(r->type == REGION_SKIBOOT_HEAP ||
1196 r->type == REGION_MEMORY))
1197 continue;
1198
1199 used_len = allocated_length(r);
1200
1201 prlog(PR_INFO, " %s: %llu/%llu used\n",
1202 r->name, (long long)used_len, (long long)r->len);
1203
1204 /* We keep the skiboot heap. */
1205 if (r == &skiboot_heap)
1206 continue;
1207
1208 /* Nothing used? Whole thing is for Linux. */
1209 if (used_len == 0)
1210 r->type = REGION_OS;
1211 /* Partially used? Split region. */
1212 else if (used_len != r->len) {
1213 struct mem_region *for_linux;
1214 struct free_hdr *last = region_start(r) + used_len;
1215
1216 /* Remove the final free block. */
1217 list_del_from(&r->free_list, &last->list);
1218
1219 for_linux = split_region(r, r->start + used_len,
1220 REGION_OS);
1221 if (!for_linux) {
1222 prerror("OOM splitting mem node %s for linux\n",
1223 r->name);
1224 abort();
1225 }
1226 list_add(®ions, &for_linux->list);
1227 }
1228 }
1229 unlock(&mem_region_lock);
1230 }
1231
mem_clear_range(uint64_t s,uint64_t e)1232 static void mem_clear_range(uint64_t s, uint64_t e)
1233 {
1234 uint64_t res_start, res_end;
1235
1236 /* Skip exception vectors */
1237 if (s < EXCEPTION_VECTORS_END)
1238 s = EXCEPTION_VECTORS_END;
1239
1240 /* Skip kernel preload area */
1241 res_start = (uint64_t)KERNEL_LOAD_BASE;
1242 res_end = res_start + KERNEL_LOAD_SIZE;
1243
1244 if (s >= res_start && s < res_end)
1245 s = res_end;
1246 if (e > res_start && e <= res_end)
1247 e = res_start;
1248 if (e <= s)
1249 return;
1250 if (s < res_start && e > res_end) {
1251 mem_clear_range(s, res_start);
1252 mem_clear_range(res_end, e);
1253 return;
1254 }
1255
1256 /* Skip initramfs preload area */
1257 res_start = (uint64_t)INITRAMFS_LOAD_BASE;
1258 res_end = res_start + INITRAMFS_LOAD_SIZE;
1259
1260 if (s >= res_start && s < res_end)
1261 s = res_end;
1262 if (e > res_start && e <= res_end)
1263 e = res_start;
1264 if (e <= s)
1265 return;
1266 if (s < res_start && e > res_end) {
1267 mem_clear_range(s, res_start);
1268 mem_clear_range(res_end, e);
1269 return;
1270 }
1271
1272 prlog(PR_DEBUG, "Clearing region %llx-%llx\n",
1273 (long long)s, (long long)e);
1274 memset((void *)s, 0, e - s);
1275 }
1276
1277 struct mem_region_clear_job_args {
1278 char *job_name;
1279 uint64_t s,e;
1280 };
1281
mem_region_clear_job(void * data)1282 static void mem_region_clear_job(void *data)
1283 {
1284 struct mem_region_clear_job_args *arg = (struct mem_region_clear_job_args*)data;
1285 mem_clear_range(arg->s, arg->e);
1286 }
1287
1288 #define MEM_REGION_CLEAR_JOB_SIZE (16ULL*(1<<30))
1289
1290 static struct cpu_job **mem_clear_jobs;
1291 static struct mem_region_clear_job_args *mem_clear_job_args;
1292 static int mem_clear_njobs = 0;
1293
start_mem_region_clear_unused(void)1294 void start_mem_region_clear_unused(void)
1295 {
1296 struct mem_region *r;
1297 uint64_t s,l;
1298 uint64_t total = 0;
1299 uint32_t chip_id;
1300 char *path;
1301 int i;
1302 struct cpu_job **jobs;
1303 struct mem_region_clear_job_args *job_args;
1304
1305 lock(&mem_region_lock);
1306 assert(mem_regions_finalised);
1307
1308 mem_clear_njobs = 0;
1309
1310 list_for_each(®ions, r, list) {
1311 if (!(r->type == REGION_OS))
1312 continue;
1313 mem_clear_njobs++;
1314 /* One job per 16GB */
1315 mem_clear_njobs += r->len / MEM_REGION_CLEAR_JOB_SIZE;
1316 }
1317
1318 jobs = malloc(mem_clear_njobs * sizeof(struct cpu_job*));
1319 job_args = malloc(mem_clear_njobs * sizeof(struct mem_region_clear_job_args));
1320 mem_clear_jobs = jobs;
1321 mem_clear_job_args = job_args;
1322
1323 prlog(PR_NOTICE, "Clearing unused memory:\n");
1324 i = 0;
1325 list_for_each(®ions, r, list) {
1326 /* If it's not unused, ignore it. */
1327 if (!(r->type == REGION_OS))
1328 continue;
1329
1330 assert(r != &skiboot_heap);
1331
1332 s = r->start;
1333 l = r->len;
1334 while(l > MEM_REGION_CLEAR_JOB_SIZE) {
1335 job_args[i].s = s+l - MEM_REGION_CLEAR_JOB_SIZE;
1336 job_args[i].e = s+l;
1337 l-=MEM_REGION_CLEAR_JOB_SIZE;
1338 job_args[i].job_name = malloc(sizeof(char)*100);
1339 total+=MEM_REGION_CLEAR_JOB_SIZE;
1340 chip_id = __dt_get_chip_id(r->node);
1341 if (chip_id == -1)
1342 chip_id = 0;
1343 path = dt_get_path(r->node);
1344 snprintf(job_args[i].job_name, 100,
1345 "clear %s, %s 0x%"PRIx64" len: %"PRIx64" on %d",
1346 r->name, path,
1347 job_args[i].s,
1348 (job_args[i].e - job_args[i].s),
1349 chip_id);
1350 free(path);
1351 jobs[i] = cpu_queue_job_on_node(chip_id,
1352 job_args[i].job_name,
1353 mem_region_clear_job,
1354 &job_args[i]);
1355 if (!jobs[i])
1356 jobs[i] = cpu_queue_job(NULL,
1357 job_args[i].job_name,
1358 mem_region_clear_job,
1359 &job_args[i]);
1360 assert(jobs[i]);
1361 i++;
1362 }
1363 job_args[i].s = s;
1364 job_args[i].e = s+l;
1365 job_args[i].job_name = malloc(sizeof(char)*100);
1366 total+=l;
1367 chip_id = __dt_get_chip_id(r->node);
1368 if (chip_id == -1)
1369 chip_id = 0;
1370 path = dt_get_path(r->node);
1371 snprintf(job_args[i].job_name,100,
1372 "clear %s, %s 0x%"PRIx64" len: 0x%"PRIx64" on %d",
1373 r->name, path,
1374 job_args[i].s,
1375 (job_args[i].e - job_args[i].s),
1376 chip_id);
1377 free(path);
1378 jobs[i] = cpu_queue_job_on_node(chip_id,
1379 job_args[i].job_name,
1380 mem_region_clear_job,
1381 &job_args[i]);
1382 i++;
1383 }
1384 unlock(&mem_region_lock);
1385 cpu_process_local_jobs();
1386 }
1387
wait_mem_region_clear_unused(void)1388 void wait_mem_region_clear_unused(void)
1389 {
1390 uint64_t l;
1391 uint64_t total = 0;
1392 int i;
1393
1394 for(i=0; i < mem_clear_njobs; i++) {
1395 total += (mem_clear_job_args[i].e - mem_clear_job_args[i].s);
1396 }
1397
1398 l = 0;
1399 for(i=0; i < mem_clear_njobs; i++) {
1400 cpu_wait_job(mem_clear_jobs[i], true);
1401 l += (mem_clear_job_args[i].e - mem_clear_job_args[i].s);
1402 printf("Clearing memory... %"PRIu64"/%"PRIu64"GB done\n",
1403 l>>30, total>>30);
1404 free(mem_clear_job_args[i].job_name);
1405 }
1406 free(mem_clear_jobs);
1407 free(mem_clear_job_args);
1408 }
1409
mem_region_add_dt_reserved_node(struct dt_node * parent,struct mem_region * region)1410 static void mem_region_add_dt_reserved_node(struct dt_node *parent,
1411 struct mem_region *region)
1412 {
1413 char *name, *p;
1414
1415 /* If a reserved region was established before skiboot, it may be
1416 * referenced by a device-tree node with extra data. In that case,
1417 * copy the node to /reserved-memory/, unless it's already there.
1418 *
1419 * We update region->node to the new copy here, as the prd code may
1420 * update regions' device-tree nodes, and we want those updates to
1421 * apply to the nodes in /reserved-memory/.
1422 */
1423 if (region->type == REGION_FW_RESERVED && region->node) {
1424 if (region->node->parent != parent)
1425 region->node = dt_copy(region->node, parent);
1426 return;
1427 }
1428
1429 name = strdup(region->name);
1430 assert(name);
1431
1432 /* remove any cell addresses in the region name; we have our own cell
1433 * addresses here */
1434 p = strchr(name, '@');
1435 if (p)
1436 *p = '\0';
1437
1438 region->node = dt_new_addr(parent, name, region->start);
1439 assert(region->node);
1440 dt_add_property_u64s(region->node, "reg", region->start, region->len);
1441
1442 /*
1443 * This memory is used by hardware and may need special handling. Ask
1444 * the host kernel not to map it by default.
1445 */
1446 if (region->type == REGION_RESERVED)
1447 dt_add_property(region->node, "no-map", NULL, 0);
1448
1449 free(name);
1450 }
1451
mem_region_add_dt_reserved(void)1452 void mem_region_add_dt_reserved(void)
1453 {
1454 int names_len, ranges_len, len;
1455 const struct dt_property *prop;
1456 struct mem_region *region;
1457 void *names, *ranges;
1458 struct dt_node *node;
1459 uint64_t *range;
1460 char *name;
1461
1462 names_len = 0;
1463 ranges_len = 0;
1464
1465 /* Finalise the region list, so we know that the regions list won't be
1466 * altered after this point. The regions' free lists may change after
1467 * we drop the lock, but we don't access those. */
1468 lock(&mem_region_lock);
1469 mem_regions_finalised = true;
1470
1471 /* establish top-level reservation node */
1472 node = dt_find_by_path(dt_root, "reserved-memory");
1473 if (!node) {
1474 node = dt_new(dt_root, "reserved-memory");
1475 dt_add_property_cells(node, "#address-cells", 2);
1476 dt_add_property_cells(node, "#size-cells", 2);
1477 dt_add_property(node, "ranges", NULL, 0);
1478 }
1479
1480 prlog(PR_INFO, "Reserved regions:\n");
1481
1482 /* First pass, create /reserved-memory/ nodes for each reservation,
1483 * and calculate the length for the /reserved-names and
1484 * /reserved-ranges properties */
1485 list_for_each(®ions, region, list) {
1486 if (!region_is_reservable(region))
1487 continue;
1488
1489 prlog(PR_INFO, " 0x%012llx..%012llx : %s\n",
1490 (long long)region->start,
1491 (long long)(region->start + region->len - 1),
1492 region->name);
1493
1494 mem_region_add_dt_reserved_node(node, region);
1495
1496 /* calculate the size of the properties populated later */
1497 names_len += strlen(region->node->name) + 1;
1498 ranges_len += 2 * sizeof(uint64_t);
1499 }
1500
1501 name = names = malloc(names_len);
1502 range = ranges = malloc(ranges_len);
1503
1504 /* Second pass: populate the old-style reserved-names and
1505 * reserved-regions arrays based on the node data */
1506 list_for_each(®ions, region, list) {
1507 if (!region_is_reservable(region))
1508 continue;
1509
1510 len = strlen(region->node->name) + 1;
1511 memcpy(name, region->node->name, len);
1512 name += len;
1513
1514 range[0] = cpu_to_fdt64(region->start);
1515 range[1] = cpu_to_fdt64(region->len);
1516 range += 2;
1517 }
1518 unlock(&mem_region_lock);
1519
1520 prop = dt_find_property(dt_root, "reserved-names");
1521 if (prop)
1522 dt_del_property(dt_root, (struct dt_property *)prop);
1523
1524 prop = dt_find_property(dt_root, "reserved-ranges");
1525 if (prop)
1526 dt_del_property(dt_root, (struct dt_property *)prop);
1527
1528 dt_add_property(dt_root, "reserved-names", names, names_len);
1529 dt_add_property(dt_root, "reserved-ranges", ranges, ranges_len);
1530
1531 free(names);
1532 free(ranges);
1533 }
1534
mem_region_next(struct mem_region * region)1535 struct mem_region *mem_region_next(struct mem_region *region)
1536 {
1537 struct list_node *node;
1538
1539 assert(lock_held_by_me(&mem_region_lock));
1540
1541 node = region ? ®ion->list : ®ions.n;
1542
1543 if (node->next == ®ions.n)
1544 return NULL;
1545
1546 return list_entry(node->next, struct mem_region, list);
1547 }
1548