at v5.9-rc7 11 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_MEMORY_HOTPLUG_H 3#define __LINUX_MEMORY_HOTPLUG_H 4 5#include <linux/mmzone.h> 6#include <linux/spinlock.h> 7#include <linux/notifier.h> 8#include <linux/bug.h> 9 10struct page; 11struct zone; 12struct pglist_data; 13struct mem_section; 14struct memory_block; 15struct resource; 16struct vmem_altmap; 17 18#ifdef CONFIG_MEMORY_HOTPLUG 19/* 20 * Return page for the valid pfn only if the page is online. All pfn 21 * walkers which rely on the fully initialized page->flags and others 22 * should use this rather than pfn_valid && pfn_to_page 23 */ 24#define pfn_to_online_page(pfn) \ 25({ \ 26 struct page *___page = NULL; \ 27 unsigned long ___pfn = pfn; \ 28 unsigned long ___nr = pfn_to_section_nr(___pfn); \ 29 \ 30 if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \ 31 pfn_valid_within(___pfn)) \ 32 ___page = pfn_to_page(___pfn); \ 33 ___page; \ 34}) 35 36/* 37 * Types for free bootmem stored in page->lru.next. These have to be in 38 * some random range in unsigned long space for debugging purposes. 39 */ 40enum { 41 MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, 42 SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, 43 MIX_SECTION_INFO, 44 NODE_INFO, 45 MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, 46}; 47 48/* Types for control the zone type of onlined and offlined memory */ 49enum { 50 /* Offline the memory. */ 51 MMOP_OFFLINE = 0, 52 /* Online the memory. Zone depends, see default_zone_for_pfn(). */ 53 MMOP_ONLINE, 54 /* Online the memory to ZONE_NORMAL. */ 55 MMOP_ONLINE_KERNEL, 56 /* Online the memory to ZONE_MOVABLE. */ 57 MMOP_ONLINE_MOVABLE, 58}; 59 60/* 61 * Extended parameters for memory hotplug: 62 * altmap: alternative allocator for memmap array (optional) 63 * pgprot: page protection flags to apply to newly created page tables 64 * (required) 65 */ 66struct mhp_params { 67 struct vmem_altmap *altmap; 68 pgprot_t pgprot; 69}; 70 71/* 72 * Zone resizing functions 73 * 74 * Note: any attempt to resize a zone should has pgdat_resize_lock() 75 * zone_span_writelock() both held. This ensure the size of a zone 76 * can't be changed while pgdat_resize_lock() held. 77 */ 78static inline unsigned zone_span_seqbegin(struct zone *zone) 79{ 80 return read_seqbegin(&zone->span_seqlock); 81} 82static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 83{ 84 return read_seqretry(&zone->span_seqlock, iv); 85} 86static inline void zone_span_writelock(struct zone *zone) 87{ 88 write_seqlock(&zone->span_seqlock); 89} 90static inline void zone_span_writeunlock(struct zone *zone) 91{ 92 write_sequnlock(&zone->span_seqlock); 93} 94static inline void zone_seqlock_init(struct zone *zone) 95{ 96 seqlock_init(&zone->span_seqlock); 97} 98extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); 99extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); 100extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); 101/* VM interface that may be used by firmware interface */ 102extern int online_pages(unsigned long pfn, unsigned long nr_pages, 103 int online_type, int nid); 104extern struct zone *test_pages_in_a_zone(unsigned long start_pfn, 105 unsigned long end_pfn); 106extern unsigned long __offline_isolated_pages(unsigned long start_pfn, 107 unsigned long end_pfn); 108 109typedef void (*online_page_callback_t)(struct page *page, unsigned int order); 110 111extern void generic_online_page(struct page *page, unsigned int order); 112extern int set_online_page_callback(online_page_callback_t callback); 113extern int restore_online_page_callback(online_page_callback_t callback); 114 115extern int try_online_node(int nid); 116 117extern int arch_add_memory(int nid, u64 start, u64 size, 118 struct mhp_params *params); 119extern u64 max_mem_size; 120 121extern int memhp_online_type_from_str(const char *str); 122 123/* Default online_type (MMOP_*) when new memory blocks are added. */ 124extern int memhp_default_online_type; 125/* If movable_node boot option specified */ 126extern bool movable_node_enabled; 127static inline bool movable_node_is_enabled(void) 128{ 129 return movable_node_enabled; 130} 131 132extern void arch_remove_memory(int nid, u64 start, u64 size, 133 struct vmem_altmap *altmap); 134extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, 135 struct vmem_altmap *altmap); 136 137/* reasonably generic interface to expand the physical pages */ 138extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 139 struct mhp_params *params); 140 141#ifndef CONFIG_ARCH_HAS_ADD_PAGES 142static inline int add_pages(int nid, unsigned long start_pfn, 143 unsigned long nr_pages, struct mhp_params *params) 144{ 145 return __add_pages(nid, start_pfn, nr_pages, params); 146} 147#else /* ARCH_HAS_ADD_PAGES */ 148int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 149 struct mhp_params *params); 150#endif /* ARCH_HAS_ADD_PAGES */ 151 152#ifdef CONFIG_NUMA 153extern int memory_add_physaddr_to_nid(u64 start); 154#else 155static inline int memory_add_physaddr_to_nid(u64 start) 156{ 157 return 0; 158} 159#endif 160 161#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION 162/* 163 * For supporting node-hotadd, we have to allocate a new pgdat. 164 * 165 * If an arch has generic style NODE_DATA(), 166 * node_data[nid] = kzalloc() works well. But it depends on the architecture. 167 * 168 * In general, generic_alloc_nodedata() is used. 169 * Now, arch_free_nodedata() is just defined for error path of node_hot_add. 170 * 171 */ 172extern pg_data_t *arch_alloc_nodedata(int nid); 173extern void arch_free_nodedata(pg_data_t *pgdat); 174extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); 175 176#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ 177 178#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) 179#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) 180 181#ifdef CONFIG_NUMA 182/* 183 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat. 184 * XXX: kmalloc_node() can't work well to get new node's memory at this time. 185 * Because, pgdat for the new node is not allocated/initialized yet itself. 186 * To use new node's memory, more consideration will be necessary. 187 */ 188#define generic_alloc_nodedata(nid) \ 189({ \ 190 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ 191}) 192/* 193 * This definition is just for error path in node hotadd. 194 * For node hotremove, we have to replace this. 195 */ 196#define generic_free_nodedata(pgdat) kfree(pgdat) 197 198extern pg_data_t *node_data[]; 199static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) 200{ 201 node_data[nid] = pgdat; 202} 203 204#else /* !CONFIG_NUMA */ 205 206/* never called */ 207static inline pg_data_t *generic_alloc_nodedata(int nid) 208{ 209 BUG(); 210 return NULL; 211} 212static inline void generic_free_nodedata(pg_data_t *pgdat) 213{ 214} 215static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) 216{ 217} 218#endif /* CONFIG_NUMA */ 219#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ 220 221#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE 222extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat); 223#else 224static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) 225{ 226} 227#endif 228extern void put_page_bootmem(struct page *page); 229extern void get_page_bootmem(unsigned long ingo, struct page *page, 230 unsigned long type); 231 232void get_online_mems(void); 233void put_online_mems(void); 234 235void mem_hotplug_begin(void); 236void mem_hotplug_done(void); 237 238#else /* ! CONFIG_MEMORY_HOTPLUG */ 239#define pfn_to_online_page(pfn) \ 240({ \ 241 struct page *___page = NULL; \ 242 if (pfn_valid(pfn)) \ 243 ___page = pfn_to_page(pfn); \ 244 ___page; \ 245 }) 246 247static inline unsigned zone_span_seqbegin(struct zone *zone) 248{ 249 return 0; 250} 251static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 252{ 253 return 0; 254} 255static inline void zone_span_writelock(struct zone *zone) {} 256static inline void zone_span_writeunlock(struct zone *zone) {} 257static inline void zone_seqlock_init(struct zone *zone) {} 258 259static inline int mhp_notimplemented(const char *func) 260{ 261 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); 262 dump_stack(); 263 return -ENOSYS; 264} 265 266static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) 267{ 268} 269 270static inline int try_online_node(int nid) 271{ 272 return 0; 273} 274 275static inline void get_online_mems(void) {} 276static inline void put_online_mems(void) {} 277 278static inline void mem_hotplug_begin(void) {} 279static inline void mem_hotplug_done(void) {} 280 281static inline bool movable_node_is_enabled(void) 282{ 283 return false; 284} 285#endif /* ! CONFIG_MEMORY_HOTPLUG */ 286 287#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) 288/* 289 * pgdat resizing functions 290 */ 291static inline 292void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) 293{ 294 spin_lock_irqsave(&pgdat->node_size_lock, *flags); 295} 296static inline 297void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) 298{ 299 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); 300} 301static inline 302void pgdat_resize_init(struct pglist_data *pgdat) 303{ 304 spin_lock_init(&pgdat->node_size_lock); 305} 306#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ 307/* 308 * Stub functions for when hotplug is off 309 */ 310static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} 311static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} 312static inline void pgdat_resize_init(struct pglist_data *pgdat) {} 313#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ 314 315#ifdef CONFIG_MEMORY_HOTREMOVE 316 317extern void try_offline_node(int nid); 318extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 319extern int remove_memory(int nid, u64 start, u64 size); 320extern void __remove_memory(int nid, u64 start, u64 size); 321extern int offline_and_remove_memory(int nid, u64 start, u64 size); 322 323#else 324static inline void try_offline_node(int nid) {} 325 326static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) 327{ 328 return -EINVAL; 329} 330 331static inline int remove_memory(int nid, u64 start, u64 size) 332{ 333 return -EBUSY; 334} 335 336static inline void __remove_memory(int nid, u64 start, u64 size) {} 337#endif /* CONFIG_MEMORY_HOTREMOVE */ 338 339extern void set_zone_contiguous(struct zone *zone); 340extern void clear_zone_contiguous(struct zone *zone); 341 342extern void __ref free_area_init_core_hotplug(int nid); 343extern int __add_memory(int nid, u64 start, u64 size); 344extern int add_memory(int nid, u64 start, u64 size); 345extern int add_memory_resource(int nid, struct resource *resource); 346extern int add_memory_driver_managed(int nid, u64 start, u64 size, 347 const char *resource_name); 348extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, 349 unsigned long nr_pages, struct vmem_altmap *altmap); 350extern void remove_pfn_range_from_zone(struct zone *zone, 351 unsigned long start_pfn, 352 unsigned long nr_pages); 353extern bool is_memblock_offlined(struct memory_block *mem); 354extern int sparse_add_section(int nid, unsigned long pfn, 355 unsigned long nr_pages, struct vmem_altmap *altmap); 356extern void sparse_remove_section(struct mem_section *ms, 357 unsigned long pfn, unsigned long nr_pages, 358 unsigned long map_offset, struct vmem_altmap *altmap); 359extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, 360 unsigned long pnum); 361extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, 362 int online_type); 363extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, 364 unsigned long nr_pages); 365#endif /* __LINUX_MEMORY_HOTPLUG_H */