1 // Internal dynamic memory allocations.
2 //
3 // Copyright (C) 2009-2013 Kevin O'Connor <kevin@koconnor.net>
4 //
5 // This file may be distributed under the terms of the GNU LGPLv3 license.
6
7 #include "biosvar.h" // GET_BDA
8 #include "config.h" // BUILD_BIOS_ADDR
9 #include "e820map.h" // struct e820entry
10 #include "list.h" // hlist_node
11 #include "malloc.h" // _malloc
12 #include "memmap.h" // PAGE_SIZE
13 #include "output.h" // dprintf
14 #include "stacks.h" // wait_preempt
15 #include "std/optionrom.h" // OPTION_ROM_ALIGN
16 #include "string.h" // memset
17
18 // Information on a reserved area.
19 struct allocinfo_s {
20 struct hlist_node node;
21 u32 range_start, range_end, alloc_size;
22 };
23
24 // Information on a tracked memory allocation.
25 struct allocdetail_s {
26 struct allocinfo_s detailinfo;
27 struct allocinfo_s datainfo;
28 u32 handle;
29 };
30
31 // The various memory zones.
32 struct zone_s {
33 struct hlist_head head;
34 };
35
36 struct zone_s ZoneLow VARVERIFY32INIT, ZoneHigh VARVERIFY32INIT;
37 struct zone_s ZoneFSeg VARVERIFY32INIT;
38 struct zone_s ZoneTmpLow VARVERIFY32INIT, ZoneTmpHigh VARVERIFY32INIT;
39
40 static struct zone_s *Zones[] VARVERIFY32INIT = {
41 &ZoneTmpLow, &ZoneLow, &ZoneFSeg, &ZoneTmpHigh, &ZoneHigh
42 };
43
44
45 /****************************************************************
46 * low-level memory reservations
47 ****************************************************************/
48
49 // Find and reserve space from a given zone
50 static u32
alloc_new(struct zone_s * zone,u32 size,u32 align,struct allocinfo_s * fill)51 alloc_new(struct zone_s *zone, u32 size, u32 align, struct allocinfo_s *fill)
52 {
53 struct allocinfo_s *info;
54 hlist_for_each_entry(info, &zone->head, node) {
55 u32 alloc_end = info->range_start + info->alloc_size;
56 u32 range_end = info->range_end;
57 u32 new_range_end = ALIGN_DOWN(range_end - size, align);
58 if (new_range_end >= alloc_end && new_range_end <= range_end) {
59 // Found space - now reserve it.
60 fill->range_start = new_range_end;
61 fill->range_end = range_end;
62 fill->alloc_size = size;
63
64 info->range_end = new_range_end;
65 hlist_add_before(&fill->node, &info->node);
66 return new_range_end;
67 }
68 }
69 return 0;
70 }
71
72 // Reserve space for a 'struct allocdetail_s' and fill
73 static struct allocdetail_s *
alloc_new_detail(struct allocdetail_s * temp)74 alloc_new_detail(struct allocdetail_s *temp)
75 {
76 u32 detail_addr = alloc_new(&ZoneTmpHigh, sizeof(struct allocdetail_s)
77 , MALLOC_MIN_ALIGN, &temp->detailinfo);
78 if (!detail_addr) {
79 detail_addr = alloc_new(&ZoneTmpLow, sizeof(struct allocdetail_s)
80 , MALLOC_MIN_ALIGN, &temp->detailinfo);
81 if (!detail_addr) {
82 warn_noalloc();
83 return NULL;
84 }
85 }
86 struct allocdetail_s *detail = memremap(detail_addr, sizeof(*detail));
87
88 // Fill final 'detail' allocation from data in 'temp'
89 memcpy(detail, temp, sizeof(*detail));
90 hlist_replace(&temp->detailinfo.node, &detail->detailinfo.node);
91 hlist_replace(&temp->datainfo.node, &detail->datainfo.node);
92 return detail;
93 }
94
95 // Add new memory to a zone
96 static void
alloc_add(struct zone_s * zone,u32 start,u32 end)97 alloc_add(struct zone_s *zone, u32 start, u32 end)
98 {
99 // Find position to add space
100 struct allocinfo_s *info;
101 struct hlist_node **pprev;
102 hlist_for_each_entry_pprev(info, pprev, &zone->head, node) {
103 if (info->range_start < start)
104 break;
105 }
106
107 // Add space using temporary allocation info.
108 struct allocdetail_s tempdetail;
109 tempdetail.handle = MALLOC_DEFAULT_HANDLE;
110 tempdetail.datainfo.range_start = start;
111 tempdetail.datainfo.range_end = end;
112 tempdetail.datainfo.alloc_size = 0;
113 hlist_add(&tempdetail.datainfo.node, pprev);
114
115 // Allocate final allocation info.
116 struct allocdetail_s *detail = alloc_new_detail(&tempdetail);
117 if (!detail)
118 hlist_del(&tempdetail.datainfo.node);
119 }
120
121 // Release space allocated with alloc_new()
122 static void
alloc_free(struct allocinfo_s * info)123 alloc_free(struct allocinfo_s *info)
124 {
125 struct allocinfo_s *next = container_of_or_null(
126 info->node.next, struct allocinfo_s, node);
127 if (next && next->range_end == info->range_start)
128 next->range_end = info->range_end;
129 hlist_del(&info->node);
130 }
131
132 // Search all zones for an allocation obtained from alloc_new()
133 static struct allocinfo_s *
alloc_find(u32 data)134 alloc_find(u32 data)
135 {
136 int i;
137 for (i=0; i<ARRAY_SIZE(Zones); i++) {
138 struct allocinfo_s *info;
139 hlist_for_each_entry(info, &Zones[i]->head, node) {
140 if (info->range_start == data)
141 return info;
142 }
143 }
144 return NULL;
145 }
146
147 // Find the lowest memory range added by alloc_add()
148 static struct allocinfo_s *
alloc_find_lowest(struct zone_s * zone)149 alloc_find_lowest(struct zone_s *zone)
150 {
151 struct allocinfo_s *info, *last = NULL;
152 hlist_for_each_entry(info, &zone->head, node) {
153 last = info;
154 }
155 return last;
156 }
157
158
159 /****************************************************************
160 * ebda movement
161 ****************************************************************/
162
163 // Move ebda
164 static int
relocate_ebda(u32 newebda,u32 oldebda,u8 ebda_size)165 relocate_ebda(u32 newebda, u32 oldebda, u8 ebda_size)
166 {
167 u32 lowram = GET_BDA(mem_size_kb) * 1024;
168 if (oldebda != lowram)
169 // EBDA isn't at end of ram - give up.
170 return -1;
171
172 // Do copy
173 memmove((void*)newebda, (void*)oldebda, ebda_size * 1024);
174
175 // Update indexes
176 dprintf(1, "ebda moved from %x to %x\n", oldebda, newebda);
177 SET_BDA(mem_size_kb, newebda / 1024);
178 SET_BDA(ebda_seg, FLATPTR_TO_SEG(newebda));
179 return 0;
180 }
181
182 // Support expanding the ZoneLow dynamically.
183 static u32
zonelow_expand(u32 size,u32 align,struct allocinfo_s * fill)184 zonelow_expand(u32 size, u32 align, struct allocinfo_s *fill)
185 {
186 // Make sure to not move ebda while an optionrom is running.
187 if (unlikely(wait_preempt())) {
188 u32 data = alloc_new(&ZoneLow, size, align, fill);
189 if (data)
190 return data;
191 }
192
193 struct allocinfo_s *info = alloc_find_lowest(&ZoneLow);
194 if (!info)
195 return 0;
196 u32 oldpos = info->range_end;
197 u32 newpos = ALIGN_DOWN(oldpos - size, align);
198 u32 bottom = info->range_start + info->alloc_size;
199 if (newpos >= bottom && newpos <= oldpos)
200 // Space already present.
201 return alloc_new(&ZoneLow, size, align, fill);
202 u16 ebda_seg = get_ebda_seg();
203 u32 ebda_pos = (u32)MAKE_FLATPTR(ebda_seg, 0);
204 u8 ebda_size = GET_EBDA(ebda_seg, size);
205 u32 ebda_end = ebda_pos + ebda_size * 1024;
206 if (ebda_end != bottom)
207 // Something else is after ebda - can't use any existing space.
208 newpos = ALIGN_DOWN(ebda_end - size, align);
209 u32 newbottom = ALIGN_DOWN(newpos, 1024);
210 u32 newebda = ALIGN_DOWN(newbottom - ebda_size * 1024, 1024);
211 if (newebda < BUILD_EBDA_MINIMUM)
212 // Not enough space.
213 return 0;
214
215 // Move ebda
216 int ret = relocate_ebda(newebda, ebda_pos, ebda_size);
217 if (ret)
218 return 0;
219
220 // Update zone
221 if (ebda_end == bottom)
222 info->range_start = newbottom;
223 else
224 alloc_add(&ZoneLow, newbottom, ebda_end);
225
226 return alloc_new(&ZoneLow, size, align, fill);
227 }
228
229
230 /****************************************************************
231 * tracked memory allocations
232 ****************************************************************/
233
234 // Allocate physical memory from the given zone and track it as a PMM allocation
235 unsigned long
malloc_palloc(struct zone_s * zone,u32 size,u32 align)236 malloc_palloc(struct zone_s *zone, u32 size, u32 align)
237 {
238 ASSERT32FLAT();
239 if (!size)
240 return 0;
241
242 // Find and reserve space for main allocation
243 struct allocdetail_s tempdetail;
244 tempdetail.handle = MALLOC_DEFAULT_HANDLE;
245 u32 data = alloc_new(zone, size, align, &tempdetail.datainfo);
246 if (!CONFIG_MALLOC_UPPERMEMORY && !data && zone == &ZoneLow)
247 data = zonelow_expand(size, align, &tempdetail.datainfo);
248 if (!data)
249 return 0;
250
251 // Find and reserve space for bookkeeping.
252 struct allocdetail_s *detail = alloc_new_detail(&tempdetail);
253 if (!detail) {
254 alloc_free(&tempdetail.datainfo);
255 return 0;
256 }
257
258 dprintf(8, "phys_alloc zone=%p size=%d align=%x ret=%x (detail=%p)\n"
259 , zone, size, align, data, detail);
260
261 return data;
262 }
263
264 // Allocate virtual memory from the given zone
265 void * __malloc
x86_malloc(struct zone_s * zone,u32 size,u32 align)266 x86_malloc(struct zone_s *zone, u32 size, u32 align)
267 {
268 return memremap(malloc_palloc(zone, size, align), size);
269 }
270
271 // Free a data block allocated with phys_alloc
272 int
malloc_pfree(u32 data)273 malloc_pfree(u32 data)
274 {
275 ASSERT32FLAT();
276 struct allocinfo_s *info = alloc_find(data);
277 if (!info || data == virt_to_phys(info) || !info->alloc_size)
278 return -1;
279 struct allocdetail_s *detail = container_of(
280 info, struct allocdetail_s, datainfo);
281 dprintf(8, "phys_free %x (detail=%p)\n", data, detail);
282 alloc_free(info);
283 alloc_free(&detail->detailinfo);
284 return 0;
285 }
286
287 void
free(void * data)288 free(void *data)
289 {
290 if (!data)
291 return;
292 int ret = malloc_pfree(virt_to_phys(data));
293 if (ret)
294 warn_internalerror();
295 }
296
297 // Find the amount of free space in a given zone.
298 u32
malloc_getspace(struct zone_s * zone)299 malloc_getspace(struct zone_s *zone)
300 {
301 // XXX - doesn't account for ZoneLow being able to grow.
302 // XXX - results not reliable when CONFIG_THREAD_OPTIONROMS
303 u32 maxspace = 0;
304 struct allocinfo_s *info;
305 hlist_for_each_entry(info, &zone->head, node) {
306 u32 space = info->range_end - info->range_start - info->alloc_size;
307 if (space > maxspace)
308 maxspace = space;
309 }
310
311 if (zone != &ZoneTmpHigh && zone != &ZoneTmpLow)
312 return maxspace;
313 // Account for space needed for PMM tracking.
314 u32 reserve = ALIGN(sizeof(struct allocdetail_s), MALLOC_MIN_ALIGN);
315 if (maxspace <= reserve)
316 return 0;
317 return maxspace - reserve;
318 }
319
320 // Set a handle associated with an allocation.
321 void
malloc_sethandle(u32 data,u32 handle)322 malloc_sethandle(u32 data, u32 handle)
323 {
324 ASSERT32FLAT();
325 struct allocinfo_s *info = alloc_find(data);
326 if (!info || data == virt_to_phys(info) || !info->alloc_size)
327 return;
328 struct allocdetail_s *detail = container_of(
329 info, struct allocdetail_s, datainfo);
330 detail->handle = handle;
331 }
332
333 // Find the data block allocated with phys_alloc with a given handle.
334 u32
malloc_findhandle(u32 handle)335 malloc_findhandle(u32 handle)
336 {
337 int i;
338 for (i=0; i<ARRAY_SIZE(Zones); i++) {
339 struct allocinfo_s *info;
340 hlist_for_each_entry(info, &Zones[i]->head, node) {
341 if (info->range_start != virt_to_phys(info))
342 continue;
343 struct allocdetail_s *detail = container_of(
344 info, struct allocdetail_s, detailinfo);
345 if (detail->handle == handle)
346 return detail->datainfo.range_start;
347 }
348 }
349 return 0;
350 }
351
352
353 /****************************************************************
354 * 0xc0000-0xf0000 management
355 ****************************************************************/
356
357 static u32 RomEnd = BUILD_ROM_START;
358 static struct allocinfo_s *RomBase;
359
360 #define OPROM_HEADER_RESERVE 16
361
362 // Return the maximum memory position option roms may use.
363 u32
rom_get_max(void)364 rom_get_max(void)
365 {
366 if (CONFIG_MALLOC_UPPERMEMORY)
367 return ALIGN_DOWN(RomBase->range_end - OPROM_HEADER_RESERVE
368 , OPTION_ROM_ALIGN);
369 return SYMBOL(final_readonly_start);
370 }
371
372 // Return the end of the last deployed option rom.
373 u32
rom_get_last(void)374 rom_get_last(void)
375 {
376 return RomEnd;
377 }
378
379 // Request space for an optionrom in 0xc0000-0xf0000 area.
380 struct rom_header *
rom_reserve(u32 size)381 rom_reserve(u32 size)
382 {
383 u32 newend = ALIGN(RomEnd + size, OPTION_ROM_ALIGN);
384 if (newend > rom_get_max())
385 return NULL;
386 if (CONFIG_MALLOC_UPPERMEMORY) {
387 if (newend < SYMBOL(zonelow_base))
388 newend = SYMBOL(zonelow_base);
389 RomBase->range_start = newend + OPROM_HEADER_RESERVE;
390 }
391 return (void*)RomEnd;
392 }
393
394 // Confirm space as in use by an optionrom.
395 int
rom_confirm(u32 size)396 rom_confirm(u32 size)
397 {
398 void *new = rom_reserve(size);
399 if (!new) {
400 warn_noalloc();
401 return -1;
402 }
403 RomEnd = ALIGN(RomEnd + size, OPTION_ROM_ALIGN);
404 return 0;
405 }
406
407
408 /****************************************************************
409 * Setup
410 ****************************************************************/
411
412 void
malloc_preinit(void)413 malloc_preinit(void)
414 {
415 ASSERT32FLAT();
416 dprintf(3, "malloc preinit\n");
417
418 // Don't declare any memory between 0xa0000 and 0x100000
419 e820_remove(BUILD_LOWRAM_END, BUILD_BIOS_ADDR-BUILD_LOWRAM_END);
420
421 // Mark known areas as reserved.
422 e820_add(BUILD_BIOS_ADDR, BUILD_BIOS_SIZE, E820_RESERVED);
423
424 // Populate temp high ram
425 u32 highram = 0;
426 int i;
427 for (i=e820_count-1; i>=0; i--) {
428 struct e820entry *en = &e820_list[i];
429 u64 end = en->start + en->size;
430 if (end < 1024*1024)
431 break;
432 if (en->type != E820_RAM || end > 0xffffffff)
433 continue;
434 u32 s = en->start, e = end;
435 if (!highram) {
436 u32 newe = ALIGN_DOWN(e - BUILD_MAX_HIGHTABLE, MALLOC_MIN_ALIGN);
437 if (newe <= e && newe >= s) {
438 highram = newe;
439 e = newe;
440 }
441 }
442 alloc_add(&ZoneTmpHigh, s, e);
443 }
444
445 // Populate regions
446 alloc_add(&ZoneTmpLow, BUILD_STACK_ADDR, BUILD_EBDA_MINIMUM);
447 if (highram) {
448 alloc_add(&ZoneHigh, highram, highram + BUILD_MAX_HIGHTABLE);
449 e820_add(highram, BUILD_MAX_HIGHTABLE, E820_RESERVED);
450 }
451 }
452
453 void
malloc_csm_preinit(u32 low_pmm,u32 low_pmm_size,u32 hi_pmm,u32 hi_pmm_size)454 malloc_csm_preinit(u32 low_pmm, u32 low_pmm_size, u32 hi_pmm, u32 hi_pmm_size)
455 {
456 ASSERT32FLAT();
457
458 if (hi_pmm_size > BUILD_MAX_HIGHTABLE) {
459 u32 hi_pmm_end = hi_pmm + hi_pmm_size;
460 alloc_add(&ZoneTmpHigh, hi_pmm, hi_pmm_end - BUILD_MAX_HIGHTABLE);
461 alloc_add(&ZoneHigh, hi_pmm_end - BUILD_MAX_HIGHTABLE, hi_pmm_end);
462 } else {
463 alloc_add(&ZoneTmpHigh, hi_pmm, hi_pmm + hi_pmm_size);
464 }
465 alloc_add(&ZoneTmpLow, low_pmm, low_pmm + low_pmm_size);
466 }
467
468 u32 LegacyRamSize VARFSEG;
469
470 // Calculate the maximum ramsize (less than 4gig) from e820 map.
471 static void
calcRamSize(void)472 calcRamSize(void)
473 {
474 u32 rs = 0;
475 int i;
476 for (i=e820_count-1; i>=0; i--) {
477 struct e820entry *en = &e820_list[i];
478 u64 end = en->start + en->size;
479 u32 type = en->type;
480 if (end <= 0xffffffff && (type == E820_ACPI || type == E820_RAM)) {
481 rs = end;
482 break;
483 }
484 }
485 LegacyRamSize = rs >= 1024*1024 ? rs : 1024*1024;
486 }
487
488 // Update pointers after code relocation.
489 void
malloc_init(void)490 malloc_init(void)
491 {
492 ASSERT32FLAT();
493 dprintf(3, "malloc init\n");
494
495 if (CONFIG_RELOCATE_INIT) {
496 // Fixup malloc pointers after relocation
497 int i;
498 for (i=0; i<ARRAY_SIZE(Zones); i++) {
499 struct zone_s *zone = Zones[i];
500 if (zone->head.first)
501 zone->head.first->pprev = &zone->head.first;
502 }
503 }
504
505 // Initialize low-memory region
506 memmove(VSYMBOL(final_varlow_start), VSYMBOL(varlow_start)
507 , SYMBOL(varlow_end) - SYMBOL(varlow_start));
508 if (CONFIG_MALLOC_UPPERMEMORY) {
509 alloc_add(&ZoneLow, SYMBOL(zonelow_base) + OPROM_HEADER_RESERVE
510 , SYMBOL(final_varlow_start));
511 RomBase = alloc_find_lowest(&ZoneLow);
512 } else {
513 alloc_add(&ZoneLow, ALIGN_DOWN(SYMBOL(final_varlow_start), 1024)
514 , SYMBOL(final_varlow_start));
515 }
516
517 // Add space available in f-segment to ZoneFSeg
518 memset(VSYMBOL(zonefseg_start), 0
519 , SYMBOL(zonefseg_end) - SYMBOL(zonefseg_start));
520 alloc_add(&ZoneFSeg, SYMBOL(zonefseg_start), SYMBOL(zonefseg_end));
521
522 calcRamSize();
523 }
524
525 void
malloc_prepboot(void)526 malloc_prepboot(void)
527 {
528 ASSERT32FLAT();
529 dprintf(3, "malloc finalize\n");
530
531 u32 base = rom_get_max();
532 memset((void*)RomEnd, 0, base-RomEnd);
533 if (CONFIG_MALLOC_UPPERMEMORY) {
534 // Place an optionrom signature around used low mem area.
535 struct rom_header *dummyrom = (void*)base;
536 dummyrom->signature = OPTION_ROM_SIGNATURE;
537 int size = (BUILD_BIOS_ADDR - base) / 512;
538 dummyrom->size = (size > 255) ? 255 : size;
539 }
540
541 // Reserve more low-mem if needed.
542 u32 endlow = GET_BDA(mem_size_kb)*1024;
543 e820_add(endlow, BUILD_LOWRAM_END-endlow, E820_RESERVED);
544
545 // Clear unused f-seg ram.
546 struct allocinfo_s *info = alloc_find_lowest(&ZoneFSeg);
547 u32 size = info->range_end - info->range_start;
548 memset(memremap(info->range_start, size), 0, size);
549 dprintf(1, "Space available for UMB: %x-%x, %x-%x\n"
550 , RomEnd, base, info->range_start, info->range_end);
551
552 // Give back unused high ram.
553 info = alloc_find_lowest(&ZoneHigh);
554 if (info) {
555 u32 giveback = ALIGN_DOWN(info->range_end-info->range_start, PAGE_SIZE);
556 e820_add(info->range_start, giveback, E820_RAM);
557 dprintf(1, "Returned %d bytes of ZoneHigh\n", giveback);
558 }
559
560 calcRamSize();
561 }
562