at v6.3-rc7 11 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_MEMORY_HOTPLUG_H 3#define __LINUX_MEMORY_HOTPLUG_H 4 5#include <linux/mmzone.h> 6#include <linux/spinlock.h> 7#include <linux/notifier.h> 8#include <linux/bug.h> 9 10struct page; 11struct zone; 12struct pglist_data; 13struct mem_section; 14struct memory_group; 15struct resource; 16struct vmem_altmap; 17struct dev_pagemap; 18 19#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION 20/* 21 * For supporting node-hotadd, we have to allocate a new pgdat. 22 * 23 * If an arch has generic style NODE_DATA(), 24 * node_data[nid] = kzalloc() works well. But it depends on the architecture. 25 * 26 * In general, generic_alloc_nodedata() is used. 27 * 28 */ 29extern pg_data_t *arch_alloc_nodedata(int nid); 30extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); 31 32#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ 33 34#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) 35 36#ifdef CONFIG_NUMA 37/* 38 * XXX: node aware allocation can't work well to get new node's memory at this time. 39 * Because, pgdat for the new node is not allocated/initialized yet itself. 40 * To use new node's memory, more consideration will be necessary. 41 */ 42#define generic_alloc_nodedata(nid) \ 43({ \ 44 memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \ 45}) 46 47extern pg_data_t *node_data[]; 48static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) 49{ 50 node_data[nid] = pgdat; 51} 52 53#else /* !CONFIG_NUMA */ 54 55/* never called */ 56static inline pg_data_t *generic_alloc_nodedata(int nid) 57{ 58 BUG(); 59 return NULL; 60} 61static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) 62{ 63} 64#endif /* CONFIG_NUMA */ 65#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ 66 67#ifdef CONFIG_MEMORY_HOTPLUG 68struct page *pfn_to_online_page(unsigned long pfn); 69 70/* Types for control the zone type of onlined and offlined memory */ 71enum { 72 /* Offline the memory. */ 73 MMOP_OFFLINE = 0, 74 /* Online the memory. Zone depends, see default_zone_for_pfn(). */ 75 MMOP_ONLINE, 76 /* Online the memory to ZONE_NORMAL. */ 77 MMOP_ONLINE_KERNEL, 78 /* Online the memory to ZONE_MOVABLE. */ 79 MMOP_ONLINE_MOVABLE, 80}; 81 82/* Flags for add_memory() and friends to specify memory hotplug details. */ 83typedef int __bitwise mhp_t; 84 85/* No special request */ 86#define MHP_NONE ((__force mhp_t)0) 87/* 88 * Allow merging of the added System RAM resource with adjacent, 89 * mergeable resources. After a successful call to add_memory_resource() 90 * with this flag set, the resource pointer must no longer be used as it 91 * might be stale, or the resource might have changed. 92 */ 93#define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) 94 95/* 96 * We want memmap (struct page array) to be self contained. 97 * To do so, we will use the beginning of the hot-added range to build 98 * the page tables for the memmap array that describes the entire range. 99 * Only selected architectures support it with SPARSE_VMEMMAP. 100 */ 101#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1)) 102/* 103 * The nid field specifies a memory group id (mgid) instead. The memory group 104 * implies the node id (nid). 105 */ 106#define MHP_NID_IS_MGID ((__force mhp_t)BIT(2)) 107 108/* 109 * Extended parameters for memory hotplug: 110 * altmap: alternative allocator for memmap array (optional) 111 * pgprot: page protection flags to apply to newly created page tables 112 * (required) 113 */ 114struct mhp_params { 115 struct vmem_altmap *altmap; 116 pgprot_t pgprot; 117 struct dev_pagemap *pgmap; 118}; 119 120bool mhp_range_allowed(u64 start, u64 size, bool need_mapping); 121struct range mhp_get_pluggable_range(bool need_mapping); 122 123/* 124 * Zone resizing functions 125 * 126 * Note: any attempt to resize a zone should has pgdat_resize_lock() 127 * zone_span_writelock() both held. This ensure the size of a zone 128 * can't be changed while pgdat_resize_lock() held. 129 */ 130static inline unsigned zone_span_seqbegin(struct zone *zone) 131{ 132 return read_seqbegin(&zone->span_seqlock); 133} 134static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 135{ 136 return read_seqretry(&zone->span_seqlock, iv); 137} 138static inline void zone_span_writelock(struct zone *zone) 139{ 140 write_seqlock(&zone->span_seqlock); 141} 142static inline void zone_span_writeunlock(struct zone *zone) 143{ 144 write_sequnlock(&zone->span_seqlock); 145} 146static inline void zone_seqlock_init(struct zone *zone) 147{ 148 seqlock_init(&zone->span_seqlock); 149} 150extern void adjust_present_page_count(struct page *page, 151 struct memory_group *group, 152 long nr_pages); 153/* VM interface that may be used by firmware interface */ 154extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, 155 struct zone *zone); 156extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); 157extern int online_pages(unsigned long pfn, unsigned long nr_pages, 158 struct zone *zone, struct memory_group *group); 159extern void __offline_isolated_pages(unsigned long start_pfn, 160 unsigned long end_pfn); 161 162typedef void (*online_page_callback_t)(struct page *page, unsigned int order); 163 164extern void generic_online_page(struct page *page, unsigned int order); 165extern int set_online_page_callback(online_page_callback_t callback); 166extern int restore_online_page_callback(online_page_callback_t callback); 167 168extern int try_online_node(int nid); 169 170extern int arch_add_memory(int nid, u64 start, u64 size, 171 struct mhp_params *params); 172extern u64 max_mem_size; 173 174extern int mhp_online_type_from_str(const char *str); 175 176/* Default online_type (MMOP_*) when new memory blocks are added. */ 177extern int mhp_default_online_type; 178/* If movable_node boot option specified */ 179extern bool movable_node_enabled; 180static inline bool movable_node_is_enabled(void) 181{ 182 return movable_node_enabled; 183} 184 185extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap); 186extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, 187 struct vmem_altmap *altmap); 188 189/* reasonably generic interface to expand the physical pages */ 190extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 191 struct mhp_params *params); 192 193#ifndef CONFIG_ARCH_HAS_ADD_PAGES 194static inline int add_pages(int nid, unsigned long start_pfn, 195 unsigned long nr_pages, struct mhp_params *params) 196{ 197 return __add_pages(nid, start_pfn, nr_pages, params); 198} 199#else /* ARCH_HAS_ADD_PAGES */ 200int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 201 struct mhp_params *params); 202#endif /* ARCH_HAS_ADD_PAGES */ 203 204void get_online_mems(void); 205void put_online_mems(void); 206 207void mem_hotplug_begin(void); 208void mem_hotplug_done(void); 209 210/* See kswapd_is_running() */ 211static inline void pgdat_kswapd_lock(pg_data_t *pgdat) 212{ 213 mutex_lock(&pgdat->kswapd_lock); 214} 215 216static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) 217{ 218 mutex_unlock(&pgdat->kswapd_lock); 219} 220 221static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) 222{ 223 mutex_init(&pgdat->kswapd_lock); 224} 225 226#else /* ! CONFIG_MEMORY_HOTPLUG */ 227#define pfn_to_online_page(pfn) \ 228({ \ 229 struct page *___page = NULL; \ 230 if (pfn_valid(pfn)) \ 231 ___page = pfn_to_page(pfn); \ 232 ___page; \ 233 }) 234 235static inline unsigned zone_span_seqbegin(struct zone *zone) 236{ 237 return 0; 238} 239static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 240{ 241 return 0; 242} 243static inline void zone_span_writelock(struct zone *zone) {} 244static inline void zone_span_writeunlock(struct zone *zone) {} 245static inline void zone_seqlock_init(struct zone *zone) {} 246 247static inline int try_online_node(int nid) 248{ 249 return 0; 250} 251 252static inline void get_online_mems(void) {} 253static inline void put_online_mems(void) {} 254 255static inline void mem_hotplug_begin(void) {} 256static inline void mem_hotplug_done(void) {} 257 258static inline bool movable_node_is_enabled(void) 259{ 260 return false; 261} 262 263static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {} 264static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {} 265static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {} 266#endif /* ! CONFIG_MEMORY_HOTPLUG */ 267 268/* 269 * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some 270 * platforms might override and use arch_get_mappable_range() 271 * for internal non memory hotplug purposes. 272 */ 273struct range arch_get_mappable_range(void); 274 275#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) 276/* 277 * pgdat resizing functions 278 */ 279static inline 280void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) 281{ 282 spin_lock_irqsave(&pgdat->node_size_lock, *flags); 283} 284static inline 285void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) 286{ 287 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); 288} 289static inline 290void pgdat_resize_init(struct pglist_data *pgdat) 291{ 292 spin_lock_init(&pgdat->node_size_lock); 293} 294#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ 295/* 296 * Stub functions for when hotplug is off 297 */ 298static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} 299static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} 300static inline void pgdat_resize_init(struct pglist_data *pgdat) {} 301#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ 302 303#ifdef CONFIG_MEMORY_HOTREMOVE 304 305extern void try_offline_node(int nid); 306extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages, 307 struct zone *zone, struct memory_group *group); 308extern int remove_memory(u64 start, u64 size); 309extern void __remove_memory(u64 start, u64 size); 310extern int offline_and_remove_memory(u64 start, u64 size); 311 312#else 313static inline void try_offline_node(int nid) {} 314 315static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages, 316 struct zone *zone, struct memory_group *group) 317{ 318 return -EINVAL; 319} 320 321static inline int remove_memory(u64 start, u64 size) 322{ 323 return -EBUSY; 324} 325 326static inline void __remove_memory(u64 start, u64 size) {} 327#endif /* CONFIG_MEMORY_HOTREMOVE */ 328 329extern void set_zone_contiguous(struct zone *zone); 330extern void clear_zone_contiguous(struct zone *zone); 331 332#ifdef CONFIG_MEMORY_HOTPLUG 333extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat); 334extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); 335extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); 336extern int add_memory_resource(int nid, struct resource *resource, 337 mhp_t mhp_flags); 338extern int add_memory_driver_managed(int nid, u64 start, u64 size, 339 const char *resource_name, 340 mhp_t mhp_flags); 341extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, 342 unsigned long nr_pages, 343 struct vmem_altmap *altmap, int migratetype); 344extern void remove_pfn_range_from_zone(struct zone *zone, 345 unsigned long start_pfn, 346 unsigned long nr_pages); 347extern int sparse_add_section(int nid, unsigned long pfn, 348 unsigned long nr_pages, struct vmem_altmap *altmap, 349 struct dev_pagemap *pgmap); 350extern void sparse_remove_section(struct mem_section *ms, 351 unsigned long pfn, unsigned long nr_pages, 352 unsigned long map_offset, struct vmem_altmap *altmap); 353extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, 354 unsigned long pnum); 355extern struct zone *zone_for_pfn_range(int online_type, int nid, 356 struct memory_group *group, unsigned long start_pfn, 357 unsigned long nr_pages); 358extern int arch_create_linear_mapping(int nid, u64 start, u64 size, 359 struct mhp_params *params); 360void arch_remove_linear_mapping(u64 start, u64 size); 361extern bool mhp_supports_memmap_on_memory(unsigned long size); 362#endif /* CONFIG_MEMORY_HOTPLUG */ 363 364#endif /* __LINUX_MEMORY_HOTPLUG_H */