at v4.17-rc3 11 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_MEMORY_HOTPLUG_H 3#define __LINUX_MEMORY_HOTPLUG_H 4 5#include <linux/mmzone.h> 6#include <linux/spinlock.h> 7#include <linux/notifier.h> 8#include <linux/bug.h> 9 10struct page; 11struct zone; 12struct pglist_data; 13struct mem_section; 14struct memory_block; 15struct resource; 16struct vmem_altmap; 17 18#ifdef CONFIG_MEMORY_HOTPLUG 19/* 20 * Return page for the valid pfn only if the page is online. All pfn 21 * walkers which rely on the fully initialized page->flags and others 22 * should use this rather than pfn_valid && pfn_to_page 23 */ 24#define pfn_to_online_page(pfn) \ 25({ \ 26 struct page *___page = NULL; \ 27 unsigned long ___nr = pfn_to_section_nr(pfn); \ 28 \ 29 if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\ 30 ___page = pfn_to_page(pfn); \ 31 ___page; \ 32}) 33 34/* 35 * Types for free bootmem stored in page->lru.next. These have to be in 36 * some random range in unsigned long space for debugging purposes. 37 */ 38enum { 39 MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, 40 SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, 41 MIX_SECTION_INFO, 42 NODE_INFO, 43 MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, 44}; 45 46/* Types for control the zone type of onlined and offlined memory */ 47enum { 48 MMOP_OFFLINE = -1, 49 MMOP_ONLINE_KEEP, 50 MMOP_ONLINE_KERNEL, 51 MMOP_ONLINE_MOVABLE, 52}; 53 54/* 55 * Zone resizing functions 56 * 57 * Note: any attempt to resize a zone should has pgdat_resize_lock() 58 * zone_span_writelock() both held. This ensure the size of a zone 59 * can't be changed while pgdat_resize_lock() held. 60 */ 61static inline unsigned zone_span_seqbegin(struct zone *zone) 62{ 63 return read_seqbegin(&zone->span_seqlock); 64} 65static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 66{ 67 return read_seqretry(&zone->span_seqlock, iv); 68} 69static inline void zone_span_writelock(struct zone *zone) 70{ 71 write_seqlock(&zone->span_seqlock); 72} 73static inline void zone_span_writeunlock(struct zone *zone) 74{ 75 write_sequnlock(&zone->span_seqlock); 76} 77static inline void zone_seqlock_init(struct zone *zone) 78{ 79 seqlock_init(&zone->span_seqlock); 80} 81extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); 82extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); 83extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); 84/* VM interface that may be used by firmware interface */ 85extern int online_pages(unsigned long, unsigned long, int); 86extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, 87 unsigned long *valid_start, unsigned long *valid_end); 88extern void __offline_isolated_pages(unsigned long, unsigned long); 89 90typedef void (*online_page_callback_t)(struct page *page); 91 92extern int set_online_page_callback(online_page_callback_t callback); 93extern int restore_online_page_callback(online_page_callback_t callback); 94 95extern void __online_page_set_limits(struct page *page); 96extern void __online_page_increment_counters(struct page *page); 97extern void __online_page_free(struct page *page); 98 99extern int try_online_node(int nid); 100 101extern bool memhp_auto_online; 102/* If movable_node boot option specified */ 103extern bool movable_node_enabled; 104static inline bool movable_node_is_enabled(void) 105{ 106 return movable_node_enabled; 107} 108 109#ifdef CONFIG_MEMORY_HOTREMOVE 110extern bool is_pageblock_removable_nolock(struct page *page); 111extern int arch_remove_memory(u64 start, u64 size, 112 struct vmem_altmap *altmap); 113extern int __remove_pages(struct zone *zone, unsigned long start_pfn, 114 unsigned long nr_pages, struct vmem_altmap *altmap); 115#endif /* CONFIG_MEMORY_HOTREMOVE */ 116 117/* reasonably generic interface to expand the physical pages */ 118extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 119 struct vmem_altmap *altmap, bool want_memblock); 120 121#ifndef CONFIG_ARCH_HAS_ADD_PAGES 122static inline int add_pages(int nid, unsigned long start_pfn, 123 unsigned long nr_pages, struct vmem_altmap *altmap, 124 bool want_memblock) 125{ 126 return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); 127} 128#else /* ARCH_HAS_ADD_PAGES */ 129int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 130 struct vmem_altmap *altmap, bool want_memblock); 131#endif /* ARCH_HAS_ADD_PAGES */ 132 133#ifdef CONFIG_NUMA 134extern int memory_add_physaddr_to_nid(u64 start); 135#else 136static inline int memory_add_physaddr_to_nid(u64 start) 137{ 138 return 0; 139} 140#endif 141 142#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION 143/* 144 * For supporting node-hotadd, we have to allocate a new pgdat. 145 * 146 * If an arch has generic style NODE_DATA(), 147 * node_data[nid] = kzalloc() works well. But it depends on the architecture. 148 * 149 * In general, generic_alloc_nodedata() is used. 150 * Now, arch_free_nodedata() is just defined for error path of node_hot_add. 151 * 152 */ 153extern pg_data_t *arch_alloc_nodedata(int nid); 154extern void arch_free_nodedata(pg_data_t *pgdat); 155extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); 156 157#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ 158 159#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) 160#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) 161 162#ifdef CONFIG_NUMA 163/* 164 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat. 165 * XXX: kmalloc_node() can't work well to get new node's memory at this time. 166 * Because, pgdat for the new node is not allocated/initialized yet itself. 167 * To use new node's memory, more consideration will be necessary. 168 */ 169#define generic_alloc_nodedata(nid) \ 170({ \ 171 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ 172}) 173/* 174 * This definition is just for error path in node hotadd. 175 * For node hotremove, we have to replace this. 176 */ 177#define generic_free_nodedata(pgdat) kfree(pgdat) 178 179extern pg_data_t *node_data[]; 180static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) 181{ 182 node_data[nid] = pgdat; 183} 184 185#else /* !CONFIG_NUMA */ 186 187/* never called */ 188static inline pg_data_t *generic_alloc_nodedata(int nid) 189{ 190 BUG(); 191 return NULL; 192} 193static inline void generic_free_nodedata(pg_data_t *pgdat) 194{ 195} 196static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) 197{ 198} 199#endif /* CONFIG_NUMA */ 200#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ 201 202#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE 203extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat); 204#else 205static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) 206{ 207} 208#endif 209extern void put_page_bootmem(struct page *page); 210extern void get_page_bootmem(unsigned long ingo, struct page *page, 211 unsigned long type); 212 213void get_online_mems(void); 214void put_online_mems(void); 215 216void mem_hotplug_begin(void); 217void mem_hotplug_done(void); 218 219#else /* ! CONFIG_MEMORY_HOTPLUG */ 220#define pfn_to_online_page(pfn) \ 221({ \ 222 struct page *___page = NULL; \ 223 if (pfn_valid(pfn)) \ 224 ___page = pfn_to_page(pfn); \ 225 ___page; \ 226 }) 227 228static inline unsigned zone_span_seqbegin(struct zone *zone) 229{ 230 return 0; 231} 232static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 233{ 234 return 0; 235} 236static inline void zone_span_writelock(struct zone *zone) {} 237static inline void zone_span_writeunlock(struct zone *zone) {} 238static inline void zone_seqlock_init(struct zone *zone) {} 239 240static inline int mhp_notimplemented(const char *func) 241{ 242 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); 243 dump_stack(); 244 return -ENOSYS; 245} 246 247static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) 248{ 249} 250 251static inline int try_online_node(int nid) 252{ 253 return 0; 254} 255 256static inline void get_online_mems(void) {} 257static inline void put_online_mems(void) {} 258 259static inline void mem_hotplug_begin(void) {} 260static inline void mem_hotplug_done(void) {} 261 262static inline bool movable_node_is_enabled(void) 263{ 264 return false; 265} 266#endif /* ! CONFIG_MEMORY_HOTPLUG */ 267 268#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) 269/* 270 * pgdat resizing functions 271 */ 272static inline 273void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) 274{ 275 spin_lock_irqsave(&pgdat->node_size_lock, *flags); 276} 277static inline 278void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) 279{ 280 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); 281} 282static inline 283void pgdat_resize_init(struct pglist_data *pgdat) 284{ 285 spin_lock_init(&pgdat->node_size_lock); 286} 287#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ 288/* 289 * Stub functions for when hotplug is off 290 */ 291static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} 292static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} 293static inline void pgdat_resize_init(struct pglist_data *pgdat) {} 294#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ 295 296#ifdef CONFIG_MEMORY_HOTREMOVE 297 298extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); 299extern void try_offline_node(int nid); 300extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 301extern void remove_memory(int nid, u64 start, u64 size); 302 303#else 304static inline bool is_mem_section_removable(unsigned long pfn, 305 unsigned long nr_pages) 306{ 307 return false; 308} 309 310static inline void try_offline_node(int nid) {} 311 312static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) 313{ 314 return -EINVAL; 315} 316 317static inline void remove_memory(int nid, u64 start, u64 size) {} 318#endif /* CONFIG_MEMORY_HOTREMOVE */ 319 320extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, 321 void *arg, int (*func)(struct memory_block *, void *)); 322extern int add_memory(int nid, u64 start, u64 size); 323extern int add_memory_resource(int nid, struct resource *resource, bool online); 324extern int arch_add_memory(int nid, u64 start, u64 size, 325 struct vmem_altmap *altmap, bool want_memblock); 326extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, 327 unsigned long nr_pages, struct vmem_altmap *altmap); 328extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 329extern bool is_memblock_offlined(struct memory_block *mem); 330extern void remove_memory(int nid, u64 start, u64 size); 331extern int sparse_add_one_section(struct pglist_data *pgdat, 332 unsigned long start_pfn, struct vmem_altmap *altmap); 333extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, 334 unsigned long map_offset, struct vmem_altmap *altmap); 335extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, 336 unsigned long pnum); 337extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, 338 int online_type); 339extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, 340 unsigned long nr_pages); 341#endif /* __LINUX_MEMORY_HOTPLUG_H */