1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_MEMORY_HOTPLUG_H 3 #define __LINUX_MEMORY_HOTPLUG_H 4 5 #include <linux/mmzone.h> 6 #include <linux/spinlock.h> 7 #include <linux/notifier.h> 8 #include <linux/bug.h> 9 10 struct page; 11 struct zone; 12 struct pglist_data; 13 struct mem_section; 14 struct memory_block; 15 struct resource; 16 struct vmem_altmap; 17 18 #ifdef CONFIG_MEMORY_HOTPLUG 19 /* 20 * Return page for the valid pfn only if the page is online. All pfn 21 * walkers which rely on the fully initialized page->flags and others 22 * should use this rather than pfn_valid && pfn_to_page 23 */ 24 #define pfn_to_online_page(pfn) \ 25 ({ \ 26 struct page *___page = NULL; \ 27 unsigned long ___pfn = pfn; \ 28 unsigned long ___nr = pfn_to_section_nr(___pfn); \ 29 \ 30 if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \ 31 pfn_valid_within(___pfn)) \ 32 ___page = pfn_to_page(___pfn); \ 33 ___page; \ 34 }) 35 36 /* 37 * Types for free bootmem stored in page->lru.next. These have to be in 38 * some random range in unsigned long space for debugging purposes. 39 */ 40 enum { 41 MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, 42 SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, 43 MIX_SECTION_INFO, 44 NODE_INFO, 45 MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, 46 }; 47 48 /* Types for control the zone type of onlined and offlined memory */ 49 enum { 50 MMOP_OFFLINE = -1, 51 MMOP_ONLINE_KEEP, 52 MMOP_ONLINE_KERNEL, 53 MMOP_ONLINE_MOVABLE, 54 }; 55 56 /* 57 * Restrictions for the memory hotplug: 58 * flags: MHP_ flags 59 * altmap: alternative allocator for memmap array 60 */ 61 struct mhp_restrictions { 62 unsigned long flags; 63 struct vmem_altmap *altmap; 64 }; 65 66 /* 67 * Zone resizing functions 68 * 69 * Note: any attempt to resize a zone should has pgdat_resize_lock() 70 * zone_span_writelock() both held. This ensure the size of a zone 71 * can't be changed while pgdat_resize_lock() held. 72 */ 73 static inline unsigned zone_span_seqbegin(struct zone *zone) 74 { 75 return read_seqbegin(&zone->span_seqlock); 76 } 77 static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 78 { 79 return read_seqretry(&zone->span_seqlock, iv); 80 } 81 static inline void zone_span_writelock(struct zone *zone) 82 { 83 write_seqlock(&zone->span_seqlock); 84 } 85 static inline void zone_span_writeunlock(struct zone *zone) 86 { 87 write_sequnlock(&zone->span_seqlock); 88 } 89 static inline void zone_seqlock_init(struct zone *zone) 90 { 91 seqlock_init(&zone->span_seqlock); 92 } 93 extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); 94 extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); 95 extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); 96 /* VM interface that may be used by firmware interface */ 97 extern int online_pages(unsigned long pfn, unsigned long nr_pages, 98 int online_type, int nid); 99 extern struct zone *test_pages_in_a_zone(unsigned long start_pfn, 100 unsigned long end_pfn); 101 extern unsigned long __offline_isolated_pages(unsigned long start_pfn, 102 unsigned long end_pfn); 103 104 typedef void (*online_page_callback_t)(struct page *page, unsigned int order); 105 106 extern void generic_online_page(struct page *page, unsigned int order); 107 extern int set_online_page_callback(online_page_callback_t callback); 108 extern int restore_online_page_callback(online_page_callback_t callback); 109 110 extern int try_online_node(int nid); 111 112 extern int arch_add_memory(int nid, u64 start, u64 size, 113 struct mhp_restrictions *restrictions); 114 extern u64 max_mem_size; 115 116 extern bool memhp_auto_online; 117 /* If movable_node boot option specified */ 118 extern bool movable_node_enabled; 119 static inline bool movable_node_is_enabled(void) 120 { 121 return movable_node_enabled; 122 } 123 124 extern void arch_remove_memory(int nid, u64 start, u64 size, 125 struct vmem_altmap *altmap); 126 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, 127 struct vmem_altmap *altmap); 128 129 /* reasonably generic interface to expand the physical pages */ 130 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 131 struct mhp_restrictions *restrictions); 132 133 #ifndef CONFIG_ARCH_HAS_ADD_PAGES 134 static inline int add_pages(int nid, unsigned long start_pfn, 135 unsigned long nr_pages, struct mhp_restrictions *restrictions) 136 { 137 return __add_pages(nid, start_pfn, nr_pages, restrictions); 138 } 139 #else /* ARCH_HAS_ADD_PAGES */ 140 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 141 struct mhp_restrictions *restrictions); 142 #endif /* ARCH_HAS_ADD_PAGES */ 143 144 #ifdef CONFIG_NUMA 145 extern int memory_add_physaddr_to_nid(u64 start); 146 #else 147 static inline int memory_add_physaddr_to_nid(u64 start) 148 { 149 return 0; 150 } 151 #endif 152 153 #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION 154 /* 155 * For supporting node-hotadd, we have to allocate a new pgdat. 156 * 157 * If an arch has generic style NODE_DATA(), 158 * node_data[nid] = kzalloc() works well. But it depends on the architecture. 159 * 160 * In general, generic_alloc_nodedata() is used. 161 * Now, arch_free_nodedata() is just defined for error path of node_hot_add. 162 * 163 */ 164 extern pg_data_t *arch_alloc_nodedata(int nid); 165 extern void arch_free_nodedata(pg_data_t *pgdat); 166 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); 167 168 #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ 169 170 #define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) 171 #define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) 172 173 #ifdef CONFIG_NUMA 174 /* 175 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat. 176 * XXX: kmalloc_node() can't work well to get new node's memory at this time. 177 * Because, pgdat for the new node is not allocated/initialized yet itself. 178 * To use new node's memory, more consideration will be necessary. 179 */ 180 #define generic_alloc_nodedata(nid) \ 181 ({ \ 182 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ 183 }) 184 /* 185 * This definition is just for error path in node hotadd. 186 * For node hotremove, we have to replace this. 187 */ 188 #define generic_free_nodedata(pgdat) kfree(pgdat) 189 190 extern pg_data_t *node_data[]; 191 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) 192 { 193 node_data[nid] = pgdat; 194 } 195 196 #else /* !CONFIG_NUMA */ 197 198 /* never called */ 199 static inline pg_data_t *generic_alloc_nodedata(int nid) 200 { 201 BUG(); 202 return NULL; 203 } 204 static inline void generic_free_nodedata(pg_data_t *pgdat) 205 { 206 } 207 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) 208 { 209 } 210 #endif /* CONFIG_NUMA */ 211 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ 212 213 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE 214 extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat); 215 #else 216 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) 217 { 218 } 219 #endif 220 extern void put_page_bootmem(struct page *page); 221 extern void get_page_bootmem(unsigned long ingo, struct page *page, 222 unsigned long type); 223 224 void get_online_mems(void); 225 void put_online_mems(void); 226 227 void mem_hotplug_begin(void); 228 void mem_hotplug_done(void); 229 230 #else /* ! CONFIG_MEMORY_HOTPLUG */ 231 #define pfn_to_online_page(pfn) \ 232 ({ \ 233 struct page *___page = NULL; \ 234 if (pfn_valid(pfn)) \ 235 ___page = pfn_to_page(pfn); \ 236 ___page; \ 237 }) 238 239 static inline unsigned zone_span_seqbegin(struct zone *zone) 240 { 241 return 0; 242 } 243 static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 244 { 245 return 0; 246 } 247 static inline void zone_span_writelock(struct zone *zone) {} 248 static inline void zone_span_writeunlock(struct zone *zone) {} 249 static inline void zone_seqlock_init(struct zone *zone) {} 250 251 static inline int mhp_notimplemented(const char *func) 252 { 253 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); 254 dump_stack(); 255 return -ENOSYS; 256 } 257 258 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) 259 { 260 } 261 262 static inline int try_online_node(int nid) 263 { 264 return 0; 265 } 266 267 static inline void get_online_mems(void) {} 268 static inline void put_online_mems(void) {} 269 270 static inline void mem_hotplug_begin(void) {} 271 static inline void mem_hotplug_done(void) {} 272 273 static inline bool movable_node_is_enabled(void) 274 { 275 return false; 276 } 277 #endif /* ! CONFIG_MEMORY_HOTPLUG */ 278 279 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) 280 /* 281 * pgdat resizing functions 282 */ 283 static inline 284 void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) 285 { 286 spin_lock_irqsave(&pgdat->node_size_lock, *flags); 287 } 288 static inline 289 void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) 290 { 291 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); 292 } 293 static inline 294 void pgdat_resize_init(struct pglist_data *pgdat) 295 { 296 spin_lock_init(&pgdat->node_size_lock); 297 } 298 #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ 299 /* 300 * Stub functions for when hotplug is off 301 */ 302 static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} 303 static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} 304 static inline void pgdat_resize_init(struct pglist_data *pgdat) {} 305 #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ 306 307 #ifdef CONFIG_MEMORY_HOTREMOVE 308 309 extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); 310 extern void try_offline_node(int nid); 311 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 312 extern int remove_memory(int nid, u64 start, u64 size); 313 extern void __remove_memory(int nid, u64 start, u64 size); 314 315 #else 316 static inline bool is_mem_section_removable(unsigned long pfn, 317 unsigned long nr_pages) 318 { 319 return false; 320 } 321 322 static inline void try_offline_node(int nid) {} 323 324 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) 325 { 326 return -EINVAL; 327 } 328 329 static inline int remove_memory(int nid, u64 start, u64 size) 330 { 331 return -EBUSY; 332 } 333 334 static inline void __remove_memory(int nid, u64 start, u64 size) {} 335 #endif /* CONFIG_MEMORY_HOTREMOVE */ 336 337 extern void set_zone_contiguous(struct zone *zone); 338 extern void clear_zone_contiguous(struct zone *zone); 339 340 extern void __ref free_area_init_core_hotplug(int nid); 341 extern int __add_memory(int nid, u64 start, u64 size); 342 extern int add_memory(int nid, u64 start, u64 size); 343 extern int add_memory_resource(int nid, struct resource *resource); 344 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, 345 unsigned long nr_pages, struct vmem_altmap *altmap); 346 extern void remove_pfn_range_from_zone(struct zone *zone, 347 unsigned long start_pfn, 348 unsigned long nr_pages); 349 extern bool is_memblock_offlined(struct memory_block *mem); 350 extern int sparse_add_section(int nid, unsigned long pfn, 351 unsigned long nr_pages, struct vmem_altmap *altmap); 352 extern void sparse_remove_section(struct mem_section *ms, 353 unsigned long pfn, unsigned long nr_pages, 354 unsigned long map_offset, struct vmem_altmap *altmap); 355 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, 356 unsigned long pnum); 357 extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, 358 int online_type); 359 extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, 360 unsigned long nr_pages); 361 #endif /* __LINUX_MEMORY_HOTPLUG_H */ 362