1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef _LINUX_MEMBLOCK_H 3 #define _LINUX_MEMBLOCK_H 4 #ifdef __KERNEL__ 5 6 /* 7 * Logical memory blocks. 8 * 9 * Copyright (C) 2001 Peter Bergner, IBM Corp. 10 */ 11 12 #include <linux/init.h> 13 #include <linux/mm.h> 14 #include <asm/dma.h> 15 16 extern unsigned long max_low_pfn; 17 extern unsigned long min_low_pfn; 18 19 /* 20 * highest page 21 */ 22 extern unsigned long max_pfn; 23 /* 24 * highest possible page 25 */ 26 extern unsigned long long max_possible_pfn; 27 28 /** 29 * enum memblock_flags - definition of memory region attributes 30 * @MEMBLOCK_NONE: no special request 31 * @MEMBLOCK_HOTPLUG: hotpluggable region 32 * @MEMBLOCK_MIRROR: mirrored region 33 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping 34 */ 35 enum memblock_flags { 36 MEMBLOCK_NONE = 0x0, /* No special request */ 37 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */ 38 MEMBLOCK_MIRROR = 0x2, /* mirrored region */ 39 MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */ 40 }; 41 42 /** 43 * struct memblock_region - represents a memory region 44 * @base: physical address of the region 45 * @size: size of the region 46 * @flags: memory region attributes 47 * @nid: NUMA node id 48 */ 49 struct memblock_region { 50 phys_addr_t base; 51 phys_addr_t size; 52 enum memblock_flags flags; 53 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 54 int nid; 55 #endif 56 }; 57 58 /** 59 * struct memblock_type - collection of memory regions of certain type 60 * @cnt: number of regions 61 * @max: size of the allocated array 62 * @total_size: size of all regions 63 * @regions: array of regions 64 * @name: the memory type symbolic name 65 */ 66 struct memblock_type { 67 unsigned long cnt; 68 unsigned long max; 69 phys_addr_t total_size; 70 struct memblock_region *regions; 71 char *name; 72 }; 73 74 /** 75 * struct memblock - memblock allocator metadata 76 * @bottom_up: is bottom up direction? 77 * @current_limit: physical address of the current allocation limit 78 * @memory: usabe memory regions 79 * @reserved: reserved memory regions 80 * @physmem: all physical memory 81 */ 82 struct memblock { 83 bool bottom_up; /* is bottom up direction? */ 84 phys_addr_t current_limit; 85 struct memblock_type memory; 86 struct memblock_type reserved; 87 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 88 struct memblock_type physmem; 89 #endif 90 }; 91 92 extern struct memblock memblock; 93 extern int memblock_debug; 94 95 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK 96 #define __init_memblock __meminit 97 #define __initdata_memblock __meminitdata 98 void memblock_discard(void); 99 #else 100 #define __init_memblock 101 #define __initdata_memblock 102 static inline void memblock_discard(void) {} 103 #endif 104 105 #define memblock_dbg(fmt, ...) \ 106 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) 107 108 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, 109 phys_addr_t size, phys_addr_t align); 110 void memblock_allow_resize(void); 111 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); 112 int memblock_add(phys_addr_t base, phys_addr_t size); 113 int memblock_remove(phys_addr_t base, phys_addr_t size); 114 int memblock_free(phys_addr_t base, phys_addr_t size); 115 int memblock_reserve(phys_addr_t base, phys_addr_t size); 116 void memblock_trim_memory(phys_addr_t align); 117 bool memblock_overlaps_region(struct memblock_type *type, 118 phys_addr_t base, phys_addr_t size); 119 int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); 120 int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); 121 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); 122 int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); 123 int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); 124 125 unsigned long memblock_free_all(void); 126 void reset_node_managed_pages(pg_data_t *pgdat); 127 void reset_all_zones_managed_pages(void); 128 129 /* Low level functions */ 130 int memblock_add_range(struct memblock_type *type, 131 phys_addr_t base, phys_addr_t size, 132 int nid, enum memblock_flags flags); 133 134 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, 135 struct memblock_type *type_a, 136 struct memblock_type *type_b, phys_addr_t *out_start, 137 phys_addr_t *out_end, int *out_nid); 138 139 void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags, 140 struct memblock_type *type_a, 141 struct memblock_type *type_b, phys_addr_t *out_start, 142 phys_addr_t *out_end, int *out_nid); 143 144 void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, 145 phys_addr_t *out_end); 146 147 void __memblock_free_late(phys_addr_t base, phys_addr_t size); 148 149 /** 150 * for_each_mem_range - iterate through memblock areas from type_a and not 151 * included in type_b. Or just type_a if type_b is NULL. 152 * @i: u64 used as loop variable 153 * @type_a: ptr to memblock_type to iterate 154 * @type_b: ptr to memblock_type which excludes from the iteration 155 * @nid: node selector, %NUMA_NO_NODE for all nodes 156 * @flags: pick from blocks based on memory attributes 157 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 158 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 159 * @p_nid: ptr to int for nid of the range, can be %NULL 160 */ 161 #define for_each_mem_range(i, type_a, type_b, nid, flags, \ 162 p_start, p_end, p_nid) \ 163 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \ 164 p_start, p_end, p_nid); \ 165 i != (u64)ULLONG_MAX; \ 166 __next_mem_range(&i, nid, flags, type_a, type_b, \ 167 p_start, p_end, p_nid)) 168 169 /** 170 * for_each_mem_range_rev - reverse iterate through memblock areas from 171 * type_a and not included in type_b. Or just type_a if type_b is NULL. 172 * @i: u64 used as loop variable 173 * @type_a: ptr to memblock_type to iterate 174 * @type_b: ptr to memblock_type which excludes from the iteration 175 * @nid: node selector, %NUMA_NO_NODE for all nodes 176 * @flags: pick from blocks based on memory attributes 177 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 178 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 179 * @p_nid: ptr to int for nid of the range, can be %NULL 180 */ 181 #define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \ 182 p_start, p_end, p_nid) \ 183 for (i = (u64)ULLONG_MAX, \ 184 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\ 185 p_start, p_end, p_nid); \ 186 i != (u64)ULLONG_MAX; \ 187 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ 188 p_start, p_end, p_nid)) 189 190 /** 191 * for_each_reserved_mem_region - iterate over all reserved memblock areas 192 * @i: u64 used as loop variable 193 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 194 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 195 * 196 * Walks over reserved areas of memblock. Available as soon as memblock 197 * is initialized. 198 */ 199 #define for_each_reserved_mem_region(i, p_start, p_end) \ 200 for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \ 201 i != (u64)ULLONG_MAX; \ 202 __next_reserved_mem_region(&i, p_start, p_end)) 203 204 static inline bool memblock_is_hotpluggable(struct memblock_region *m) 205 { 206 return m->flags & MEMBLOCK_HOTPLUG; 207 } 208 209 static inline bool memblock_is_mirror(struct memblock_region *m) 210 { 211 return m->flags & MEMBLOCK_MIRROR; 212 } 213 214 static inline bool memblock_is_nomap(struct memblock_region *m) 215 { 216 return m->flags & MEMBLOCK_NOMAP; 217 } 218 219 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 220 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, 221 unsigned long *end_pfn); 222 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, 223 unsigned long *out_end_pfn, int *out_nid); 224 225 /** 226 * for_each_mem_pfn_range - early memory pfn range iterator 227 * @i: an integer used as loop variable 228 * @nid: node selector, %MAX_NUMNODES for all nodes 229 * @p_start: ptr to ulong for start pfn of the range, can be %NULL 230 * @p_end: ptr to ulong for end pfn of the range, can be %NULL 231 * @p_nid: ptr to int for nid of the range, can be %NULL 232 * 233 * Walks over configured memory ranges. 234 */ 235 #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ 236 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ 237 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) 238 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 239 240 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 241 void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, 242 unsigned long *out_spfn, 243 unsigned long *out_epfn); 244 /** 245 * for_each_free_mem_range_in_zone - iterate through zone specific free 246 * memblock areas 247 * @i: u64 used as loop variable 248 * @zone: zone in which all of the memory blocks reside 249 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 250 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 251 * 252 * Walks over free (memory && !reserved) areas of memblock in a specific 253 * zone. Available once memblock and an empty zone is initialized. The main 254 * assumption is that the zone start, end, and pgdat have been associated. 255 * This way we can use the zone to determine NUMA node, and if a given part 256 * of the memblock is valid for the zone. 257 */ 258 #define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \ 259 for (i = 0, \ 260 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \ 261 i != U64_MAX; \ 262 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) 263 264 /** 265 * for_each_free_mem_range_in_zone_from - iterate through zone specific 266 * free memblock areas from a given point 267 * @i: u64 used as loop variable 268 * @zone: zone in which all of the memory blocks reside 269 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 270 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 271 * 272 * Walks over free (memory && !reserved) areas of memblock in a specific 273 * zone, continuing from current position. Available as soon as memblock is 274 * initialized. 275 */ 276 #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \ 277 for (; i != U64_MAX; \ 278 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) 279 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 280 281 /** 282 * for_each_free_mem_range - iterate through free memblock areas 283 * @i: u64 used as loop variable 284 * @nid: node selector, %NUMA_NO_NODE for all nodes 285 * @flags: pick from blocks based on memory attributes 286 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 287 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 288 * @p_nid: ptr to int for nid of the range, can be %NULL 289 * 290 * Walks over free (memory && !reserved) areas of memblock. Available as 291 * soon as memblock is initialized. 292 */ 293 #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \ 294 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ 295 nid, flags, p_start, p_end, p_nid) 296 297 /** 298 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas 299 * @i: u64 used as loop variable 300 * @nid: node selector, %NUMA_NO_NODE for all nodes 301 * @flags: pick from blocks based on memory attributes 302 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 303 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 304 * @p_nid: ptr to int for nid of the range, can be %NULL 305 * 306 * Walks over free (memory && !reserved) areas of memblock in reverse 307 * order. Available as soon as memblock is initialized. 308 */ 309 #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \ 310 p_nid) \ 311 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ 312 nid, flags, p_start, p_end, p_nid) 313 314 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 315 int memblock_set_node(phys_addr_t base, phys_addr_t size, 316 struct memblock_type *type, int nid); 317 318 static inline void memblock_set_region_node(struct memblock_region *r, int nid) 319 { 320 r->nid = nid; 321 } 322 323 static inline int memblock_get_region_node(const struct memblock_region *r) 324 { 325 return r->nid; 326 } 327 #else 328 static inline void memblock_set_region_node(struct memblock_region *r, int nid) 329 { 330 } 331 332 static inline int memblock_get_region_node(const struct memblock_region *r) 333 { 334 return 0; 335 } 336 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 337 338 /* Flags for memblock allocation APIs */ 339 #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) 340 #define MEMBLOCK_ALLOC_ACCESSIBLE 0 341 #define MEMBLOCK_ALLOC_KASAN 1 342 343 /* We are using top down, so it is safe to use 0 here */ 344 #define MEMBLOCK_LOW_LIMIT 0 345 346 #ifndef ARCH_LOW_ADDRESS_LIMIT 347 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL 348 #endif 349 350 phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align, 351 phys_addr_t start, phys_addr_t end); 352 phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); 353 354 static inline phys_addr_t memblock_phys_alloc(phys_addr_t size, 355 phys_addr_t align) 356 { 357 return memblock_phys_alloc_range(size, align, 0, 358 MEMBLOCK_ALLOC_ACCESSIBLE); 359 } 360 361 void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align, 362 phys_addr_t min_addr, phys_addr_t max_addr, 363 int nid); 364 void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, 365 phys_addr_t min_addr, phys_addr_t max_addr, 366 int nid); 367 void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, 368 phys_addr_t min_addr, phys_addr_t max_addr, 369 int nid); 370 371 static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align) 372 { 373 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, 374 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); 375 } 376 377 static inline void * __init memblock_alloc_raw(phys_addr_t size, 378 phys_addr_t align) 379 { 380 return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT, 381 MEMBLOCK_ALLOC_ACCESSIBLE, 382 NUMA_NO_NODE); 383 } 384 385 static inline void * __init memblock_alloc_from(phys_addr_t size, 386 phys_addr_t align, 387 phys_addr_t min_addr) 388 { 389 return memblock_alloc_try_nid(size, align, min_addr, 390 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); 391 } 392 393 static inline void * __init memblock_alloc_low(phys_addr_t size, 394 phys_addr_t align) 395 { 396 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, 397 ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); 398 } 399 400 static inline void * __init memblock_alloc_node(phys_addr_t size, 401 phys_addr_t align, int nid) 402 { 403 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, 404 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 405 } 406 407 static inline void __init memblock_free_early(phys_addr_t base, 408 phys_addr_t size) 409 { 410 memblock_free(base, size); 411 } 412 413 static inline void __init memblock_free_early_nid(phys_addr_t base, 414 phys_addr_t size, int nid) 415 { 416 memblock_free(base, size); 417 } 418 419 static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size) 420 { 421 __memblock_free_late(base, size); 422 } 423 424 /* 425 * Set the allocation direction to bottom-up or top-down. 426 */ 427 static inline void __init memblock_set_bottom_up(bool enable) 428 { 429 memblock.bottom_up = enable; 430 } 431 432 /* 433 * Check if the allocation direction is bottom-up or not. 434 * if this is true, that said, memblock will allocate memory 435 * in bottom-up direction. 436 */ 437 static inline bool memblock_bottom_up(void) 438 { 439 return memblock.bottom_up; 440 } 441 442 phys_addr_t memblock_phys_mem_size(void); 443 phys_addr_t memblock_reserved_size(void); 444 phys_addr_t memblock_mem_size(unsigned long limit_pfn); 445 phys_addr_t memblock_start_of_DRAM(void); 446 phys_addr_t memblock_end_of_DRAM(void); 447 void memblock_enforce_memory_limit(phys_addr_t memory_limit); 448 void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size); 449 void memblock_mem_limit_remove_map(phys_addr_t limit); 450 bool memblock_is_memory(phys_addr_t addr); 451 bool memblock_is_map_memory(phys_addr_t addr); 452 bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size); 453 bool memblock_is_reserved(phys_addr_t addr); 454 bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); 455 456 extern void __memblock_dump_all(void); 457 458 static inline void memblock_dump_all(void) 459 { 460 if (memblock_debug) 461 __memblock_dump_all(); 462 } 463 464 /** 465 * memblock_set_current_limit - Set the current allocation limit to allow 466 * limiting allocations to what is currently 467 * accessible during boot 468 * @limit: New limit value (physical address) 469 */ 470 void memblock_set_current_limit(phys_addr_t limit); 471 472 473 phys_addr_t memblock_get_current_limit(void); 474 475 /* 476 * pfn conversion functions 477 * 478 * While the memory MEMBLOCKs should always be page aligned, the reserved 479 * MEMBLOCKs may not be. This accessor attempt to provide a very clear 480 * idea of what they return for such non aligned MEMBLOCKs. 481 */ 482 483 /** 484 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region 485 * @reg: memblock_region structure 486 * 487 * Return: the lowest pfn intersecting with the memory region 488 */ 489 static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) 490 { 491 return PFN_UP(reg->base); 492 } 493 494 /** 495 * memblock_region_memory_end_pfn - get the end pfn of the memory region 496 * @reg: memblock_region structure 497 * 498 * Return: the end_pfn of the reserved region 499 */ 500 static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) 501 { 502 return PFN_DOWN(reg->base + reg->size); 503 } 504 505 /** 506 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region 507 * @reg: memblock_region structure 508 * 509 * Return: the lowest pfn intersecting with the reserved region 510 */ 511 static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) 512 { 513 return PFN_DOWN(reg->base); 514 } 515 516 /** 517 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region 518 * @reg: memblock_region structure 519 * 520 * Return: the end_pfn of the reserved region 521 */ 522 static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) 523 { 524 return PFN_UP(reg->base + reg->size); 525 } 526 527 #define for_each_memblock(memblock_type, region) \ 528 for (region = memblock.memblock_type.regions; \ 529 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ 530 region++) 531 532 #define for_each_memblock_type(i, memblock_type, rgn) \ 533 for (i = 0, rgn = &memblock_type->regions[0]; \ 534 i < memblock_type->cnt; \ 535 i++, rgn = &memblock_type->regions[i]) 536 537 extern void *alloc_large_system_hash(const char *tablename, 538 unsigned long bucketsize, 539 unsigned long numentries, 540 int scale, 541 int flags, 542 unsigned int *_hash_shift, 543 unsigned int *_hash_mask, 544 unsigned long low_limit, 545 unsigned long high_limit); 546 547 #define HASH_EARLY 0x00000001 /* Allocating during early boot? */ 548 #define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min 549 * shift passed via *_hash_shift */ 550 #define HASH_ZERO 0x00000004 /* Zero allocated hash table */ 551 552 /* Only NUMA needs hash distribution. 64bit NUMA architectures have 553 * sufficient vmalloc space. 554 */ 555 #ifdef CONFIG_NUMA 556 #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT) 557 extern int hashdist; /* Distribute hashes across NUMA nodes? */ 558 #else 559 #define hashdist (0) 560 #endif 561 562 #ifdef CONFIG_MEMTEST 563 extern void early_memtest(phys_addr_t start, phys_addr_t end); 564 #else 565 static inline void early_memtest(phys_addr_t start, phys_addr_t end) 566 { 567 } 568 #endif 569 570 #endif /* __KERNEL__ */ 571 572 #endif /* _LINUX_MEMBLOCK_H */ 573