at master 10 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_MEMORY_HOTPLUG_H 3#define __LINUX_MEMORY_HOTPLUG_H 4 5#include <linux/mmzone.h> 6#include <linux/spinlock.h> 7#include <linux/notifier.h> 8#include <linux/bug.h> 9 10struct page; 11struct zone; 12struct pglist_data; 13struct mem_section; 14struct memory_group; 15struct resource; 16struct vmem_altmap; 17struct dev_pagemap; 18 19#ifdef CONFIG_MEMORY_HOTPLUG 20struct page *pfn_to_online_page(unsigned long pfn); 21 22/* Types for control the zone type of onlined and offlined memory */ 23enum { 24 /* Offline the memory. */ 25 MMOP_OFFLINE = 0, 26 /* Online the memory. Zone depends, see default_zone_for_pfn(). */ 27 MMOP_ONLINE, 28 /* Online the memory to ZONE_NORMAL. */ 29 MMOP_ONLINE_KERNEL, 30 /* Online the memory to ZONE_MOVABLE. */ 31 MMOP_ONLINE_MOVABLE, 32}; 33 34/* Flags for add_memory() and friends to specify memory hotplug details. */ 35typedef int __bitwise mhp_t; 36 37/* No special request */ 38#define MHP_NONE ((__force mhp_t)0) 39/* 40 * Allow merging of the added System RAM resource with adjacent, 41 * mergeable resources. After a successful call to add_memory_resource() 42 * with this flag set, the resource pointer must no longer be used as it 43 * might be stale, or the resource might have changed. 44 */ 45#define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) 46 47/* 48 * We want memmap (struct page array) to be self contained. 49 * To do so, we will use the beginning of the hot-added range to build 50 * the page tables for the memmap array that describes the entire range. 51 * Only selected architectures support it with SPARSE_VMEMMAP. 52 * This is only a hint, the core kernel can decide to not do this based on 53 * different alignment checks. 54 */ 55#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1)) 56/* 57 * The nid field specifies a memory group id (mgid) instead. The memory group 58 * implies the node id (nid). 59 */ 60#define MHP_NID_IS_MGID ((__force mhp_t)BIT(2)) 61 62/* 63 * Extended parameters for memory hotplug: 64 * altmap: alternative allocator for memmap array (optional) 65 * pgprot: page protection flags to apply to newly created page tables 66 * (required) 67 */ 68struct mhp_params { 69 struct vmem_altmap *altmap; 70 pgprot_t pgprot; 71 struct dev_pagemap *pgmap; 72}; 73 74bool mhp_range_allowed(u64 start, u64 size, bool need_mapping); 75struct range mhp_get_pluggable_range(bool need_mapping); 76bool mhp_supports_memmap_on_memory(void); 77 78/* 79 * Zone resizing functions 80 * 81 * Note: any attempt to resize a zone should has pgdat_resize_lock() 82 * zone_span_writelock() both held. This ensure the size of a zone 83 * can't be changed while pgdat_resize_lock() held. 84 */ 85static inline unsigned zone_span_seqbegin(struct zone *zone) 86{ 87 return read_seqbegin(&zone->span_seqlock); 88} 89static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 90{ 91 return read_seqretry(&zone->span_seqlock, iv); 92} 93static inline void zone_span_writelock(struct zone *zone) 94{ 95 write_seqlock(&zone->span_seqlock); 96} 97static inline void zone_span_writeunlock(struct zone *zone) 98{ 99 write_sequnlock(&zone->span_seqlock); 100} 101static inline void zone_seqlock_init(struct zone *zone) 102{ 103 seqlock_init(&zone->span_seqlock); 104} 105extern void adjust_present_page_count(struct page *page, 106 struct memory_group *group, 107 long nr_pages); 108/* VM interface that may be used by firmware interface */ 109extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, 110 struct zone *zone); 111extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); 112extern int online_pages(unsigned long pfn, unsigned long nr_pages, 113 struct zone *zone, struct memory_group *group); 114extern unsigned long __offline_isolated_pages(unsigned long start_pfn, 115 unsigned long end_pfn); 116 117typedef void (*online_page_callback_t)(struct page *page, unsigned int order); 118 119extern void generic_online_page(struct page *page, unsigned int order); 120extern int set_online_page_callback(online_page_callback_t callback); 121extern int restore_online_page_callback(online_page_callback_t callback); 122 123extern int try_online_node(int nid); 124 125extern int arch_add_memory(int nid, u64 start, u64 size, 126 struct mhp_params *params); 127extern u64 max_mem_size; 128 129extern int mhp_online_type_from_str(const char *str); 130 131/* If movable_node boot option specified */ 132extern bool movable_node_enabled; 133static inline bool movable_node_is_enabled(void) 134{ 135 return movable_node_enabled; 136} 137 138extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap); 139extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, 140 struct vmem_altmap *altmap); 141 142/* reasonably generic interface to expand the physical pages */ 143extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 144 struct mhp_params *params); 145 146#ifndef CONFIG_ARCH_HAS_ADD_PAGES 147static inline int add_pages(int nid, unsigned long start_pfn, 148 unsigned long nr_pages, struct mhp_params *params) 149{ 150 return __add_pages(nid, start_pfn, nr_pages, params); 151} 152#else /* ARCH_HAS_ADD_PAGES */ 153int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 154 struct mhp_params *params); 155#endif /* ARCH_HAS_ADD_PAGES */ 156 157void get_online_mems(void); 158void put_online_mems(void); 159 160void mem_hotplug_begin(void); 161void mem_hotplug_done(void); 162 163/* See kswapd_is_running() */ 164static inline void pgdat_kswapd_lock(pg_data_t *pgdat) 165{ 166 mutex_lock(&pgdat->kswapd_lock); 167} 168 169static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) 170{ 171 mutex_unlock(&pgdat->kswapd_lock); 172} 173 174static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) 175{ 176 mutex_init(&pgdat->kswapd_lock); 177} 178 179#else /* ! CONFIG_MEMORY_HOTPLUG */ 180#define pfn_to_online_page(pfn) \ 181({ \ 182 struct page *___page = NULL; \ 183 if (pfn_valid(pfn)) \ 184 ___page = pfn_to_page(pfn); \ 185 ___page; \ 186 }) 187 188static inline unsigned zone_span_seqbegin(struct zone *zone) 189{ 190 return 0; 191} 192static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 193{ 194 return 0; 195} 196static inline void zone_span_writelock(struct zone *zone) {} 197static inline void zone_span_writeunlock(struct zone *zone) {} 198static inline void zone_seqlock_init(struct zone *zone) {} 199 200static inline int try_online_node(int nid) 201{ 202 return 0; 203} 204 205static inline void get_online_mems(void) {} 206static inline void put_online_mems(void) {} 207 208static inline void mem_hotplug_begin(void) {} 209static inline void mem_hotplug_done(void) {} 210 211static inline bool movable_node_is_enabled(void) 212{ 213 return false; 214} 215 216static inline bool mhp_supports_memmap_on_memory(void) 217{ 218 return false; 219} 220 221static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {} 222static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {} 223static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {} 224#endif /* ! CONFIG_MEMORY_HOTPLUG */ 225 226/* 227 * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some 228 * platforms might override and use arch_get_mappable_range() 229 * for internal non memory hotplug purposes. 230 */ 231struct range arch_get_mappable_range(void); 232 233#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) 234/* 235 * pgdat resizing functions 236 */ 237static inline 238void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) 239{ 240 spin_lock_irqsave(&pgdat->node_size_lock, *flags); 241} 242static inline 243void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) 244{ 245 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); 246} 247static inline 248void pgdat_resize_init(struct pglist_data *pgdat) 249{ 250 spin_lock_init(&pgdat->node_size_lock); 251} 252#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ 253/* 254 * Stub functions for when hotplug is off 255 */ 256static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} 257static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} 258static inline void pgdat_resize_init(struct pglist_data *pgdat) {} 259#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ 260 261#ifdef CONFIG_MEMORY_HOTREMOVE 262 263extern void try_offline_node(int nid); 264extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages, 265 struct zone *zone, struct memory_group *group); 266extern int remove_memory(u64 start, u64 size); 267extern void __remove_memory(u64 start, u64 size); 268extern int offline_and_remove_memory(u64 start, u64 size); 269 270#else 271static inline void try_offline_node(int nid) {} 272 273static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages, 274 struct zone *zone, struct memory_group *group) 275{ 276 return -EINVAL; 277} 278 279static inline int remove_memory(u64 start, u64 size) 280{ 281 return -EBUSY; 282} 283 284static inline void __remove_memory(u64 start, u64 size) {} 285#endif /* CONFIG_MEMORY_HOTREMOVE */ 286 287#ifdef CONFIG_MEMORY_HOTPLUG 288/* Default online_type (MMOP_*) when new memory blocks are added. */ 289extern int mhp_get_default_online_type(void); 290extern void mhp_set_default_online_type(int online_type); 291extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat); 292extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); 293extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); 294extern int add_memory_resource(int nid, struct resource *resource, 295 mhp_t mhp_flags); 296extern int add_memory_driver_managed(int nid, u64 start, u64 size, 297 const char *resource_name, 298 mhp_t mhp_flags); 299extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, 300 unsigned long nr_pages, 301 struct vmem_altmap *altmap, int migratetype, 302 bool isolate_pageblock); 303extern void remove_pfn_range_from_zone(struct zone *zone, 304 unsigned long start_pfn, 305 unsigned long nr_pages); 306extern int sparse_add_section(int nid, unsigned long pfn, 307 unsigned long nr_pages, struct vmem_altmap *altmap, 308 struct dev_pagemap *pgmap); 309extern void sparse_remove_section(unsigned long pfn, unsigned long nr_pages, 310 struct vmem_altmap *altmap); 311extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, 312 unsigned long pnum); 313extern struct zone *zone_for_pfn_range(int online_type, int nid, 314 struct memory_group *group, unsigned long start_pfn, 315 unsigned long nr_pages); 316extern int arch_create_linear_mapping(int nid, u64 start, u64 size, 317 struct mhp_params *params); 318void arch_remove_linear_mapping(u64 start, u64 size); 319#endif /* CONFIG_MEMORY_HOTPLUG */ 320 321#endif /* __LINUX_MEMORY_HOTPLUG_H */