Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __LINUX_MEMORY_HOTPLUG_H
3#define __LINUX_MEMORY_HOTPLUG_H
4
5#include <linux/mmzone.h>
6#include <linux/spinlock.h>
7#include <linux/notifier.h>
8#include <linux/bug.h>
9
10struct page;
11struct zone;
12struct pglist_data;
13struct mem_section;
14struct memory_block;
15struct resource;
16struct vmem_altmap;
17
18#ifdef CONFIG_MEMORY_HOTPLUG
19/*
20 * Return page for the valid pfn only if the page is online. All pfn
21 * walkers which rely on the fully initialized page->flags and others
22 * should use this rather than pfn_valid && pfn_to_page
23 */
24#define pfn_to_online_page(pfn) \
25({ \
26 struct page *___page = NULL; \
27 unsigned long ___pfn = pfn; \
28 unsigned long ___nr = pfn_to_section_nr(___pfn); \
29 \
30 if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
31 pfn_valid_within(___pfn)) \
32 ___page = pfn_to_page(___pfn); \
33 ___page; \
34})
35
36/*
37 * Types for free bootmem stored in page->lru.next. These have to be in
38 * some random range in unsigned long space for debugging purposes.
39 */
40enum {
41 MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
42 SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
43 MIX_SECTION_INFO,
44 NODE_INFO,
45 MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
46};
47
48/* Types for control the zone type of onlined and offlined memory */
49enum {
50 MMOP_OFFLINE = -1,
51 MMOP_ONLINE_KEEP,
52 MMOP_ONLINE_KERNEL,
53 MMOP_ONLINE_MOVABLE,
54};
55
56/*
57 * Restrictions for the memory hotplug:
58 * flags: MHP_ flags
59 * altmap: alternative allocator for memmap array
60 */
61struct mhp_restrictions {
62 unsigned long flags;
63 struct vmem_altmap *altmap;
64};
65
66/*
67 * Zone resizing functions
68 *
69 * Note: any attempt to resize a zone should has pgdat_resize_lock()
70 * zone_span_writelock() both held. This ensure the size of a zone
71 * can't be changed while pgdat_resize_lock() held.
72 */
73static inline unsigned zone_span_seqbegin(struct zone *zone)
74{
75 return read_seqbegin(&zone->span_seqlock);
76}
77static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
78{
79 return read_seqretry(&zone->span_seqlock, iv);
80}
81static inline void zone_span_writelock(struct zone *zone)
82{
83 write_seqlock(&zone->span_seqlock);
84}
85static inline void zone_span_writeunlock(struct zone *zone)
86{
87 write_sequnlock(&zone->span_seqlock);
88}
89static inline void zone_seqlock_init(struct zone *zone)
90{
91 seqlock_init(&zone->span_seqlock);
92}
93extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
94extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
95extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
96/* VM interface that may be used by firmware interface */
97extern int online_pages(unsigned long, unsigned long, int);
98extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
99 unsigned long *valid_start, unsigned long *valid_end);
100extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
101 unsigned long end_pfn);
102
103typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
104
105extern void generic_online_page(struct page *page, unsigned int order);
106extern int set_online_page_callback(online_page_callback_t callback);
107extern int restore_online_page_callback(online_page_callback_t callback);
108
109extern int try_online_node(int nid);
110
111extern int arch_add_memory(int nid, u64 start, u64 size,
112 struct mhp_restrictions *restrictions);
113extern u64 max_mem_size;
114
115extern bool memhp_auto_online;
116/* If movable_node boot option specified */
117extern bool movable_node_enabled;
118static inline bool movable_node_is_enabled(void)
119{
120 return movable_node_enabled;
121}
122
123extern void arch_remove_memory(int nid, u64 start, u64 size,
124 struct vmem_altmap *altmap);
125extern void __remove_pages(struct zone *zone, unsigned long start_pfn,
126 unsigned long nr_pages, struct vmem_altmap *altmap);
127
128/* reasonably generic interface to expand the physical pages */
129extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
130 struct mhp_restrictions *restrictions);
131
132#ifndef CONFIG_ARCH_HAS_ADD_PAGES
133static inline int add_pages(int nid, unsigned long start_pfn,
134 unsigned long nr_pages, struct mhp_restrictions *restrictions)
135{
136 return __add_pages(nid, start_pfn, nr_pages, restrictions);
137}
138#else /* ARCH_HAS_ADD_PAGES */
139int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
140 struct mhp_restrictions *restrictions);
141#endif /* ARCH_HAS_ADD_PAGES */
142
143#ifdef CONFIG_NUMA
144extern int memory_add_physaddr_to_nid(u64 start);
145#else
146static inline int memory_add_physaddr_to_nid(u64 start)
147{
148 return 0;
149}
150#endif
151
152#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
153/*
154 * For supporting node-hotadd, we have to allocate a new pgdat.
155 *
156 * If an arch has generic style NODE_DATA(),
157 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
158 *
159 * In general, generic_alloc_nodedata() is used.
160 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
161 *
162 */
163extern pg_data_t *arch_alloc_nodedata(int nid);
164extern void arch_free_nodedata(pg_data_t *pgdat);
165extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
166
167#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
168
169#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
170#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
171
172#ifdef CONFIG_NUMA
173/*
174 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
175 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
176 * Because, pgdat for the new node is not allocated/initialized yet itself.
177 * To use new node's memory, more consideration will be necessary.
178 */
179#define generic_alloc_nodedata(nid) \
180({ \
181 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
182})
183/*
184 * This definition is just for error path in node hotadd.
185 * For node hotremove, we have to replace this.
186 */
187#define generic_free_nodedata(pgdat) kfree(pgdat)
188
189extern pg_data_t *node_data[];
190static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
191{
192 node_data[nid] = pgdat;
193}
194
195#else /* !CONFIG_NUMA */
196
197/* never called */
198static inline pg_data_t *generic_alloc_nodedata(int nid)
199{
200 BUG();
201 return NULL;
202}
203static inline void generic_free_nodedata(pg_data_t *pgdat)
204{
205}
206static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
207{
208}
209#endif /* CONFIG_NUMA */
210#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
211
212#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
213extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
214#else
215static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
216{
217}
218#endif
219extern void put_page_bootmem(struct page *page);
220extern void get_page_bootmem(unsigned long ingo, struct page *page,
221 unsigned long type);
222
223void get_online_mems(void);
224void put_online_mems(void);
225
226void mem_hotplug_begin(void);
227void mem_hotplug_done(void);
228
229#else /* ! CONFIG_MEMORY_HOTPLUG */
230#define pfn_to_online_page(pfn) \
231({ \
232 struct page *___page = NULL; \
233 if (pfn_valid(pfn)) \
234 ___page = pfn_to_page(pfn); \
235 ___page; \
236 })
237
238static inline unsigned zone_span_seqbegin(struct zone *zone)
239{
240 return 0;
241}
242static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
243{
244 return 0;
245}
246static inline void zone_span_writelock(struct zone *zone) {}
247static inline void zone_span_writeunlock(struct zone *zone) {}
248static inline void zone_seqlock_init(struct zone *zone) {}
249
250static inline int mhp_notimplemented(const char *func)
251{
252 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
253 dump_stack();
254 return -ENOSYS;
255}
256
257static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
258{
259}
260
261static inline int try_online_node(int nid)
262{
263 return 0;
264}
265
266static inline void get_online_mems(void) {}
267static inline void put_online_mems(void) {}
268
269static inline void mem_hotplug_begin(void) {}
270static inline void mem_hotplug_done(void) {}
271
272static inline bool movable_node_is_enabled(void)
273{
274 return false;
275}
276#endif /* ! CONFIG_MEMORY_HOTPLUG */
277
278#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
279/*
280 * pgdat resizing functions
281 */
282static inline
283void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
284{
285 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
286}
287static inline
288void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
289{
290 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
291}
292static inline
293void pgdat_resize_init(struct pglist_data *pgdat)
294{
295 spin_lock_init(&pgdat->node_size_lock);
296}
297#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
298/*
299 * Stub functions for when hotplug is off
300 */
301static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
302static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
303static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
304#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
305
306#ifdef CONFIG_MEMORY_HOTREMOVE
307
308extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
309extern void try_offline_node(int nid);
310extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
311extern int remove_memory(int nid, u64 start, u64 size);
312extern void __remove_memory(int nid, u64 start, u64 size);
313
314#else
315static inline bool is_mem_section_removable(unsigned long pfn,
316 unsigned long nr_pages)
317{
318 return false;
319}
320
321static inline void try_offline_node(int nid) {}
322
323static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
324{
325 return -EINVAL;
326}
327
328static inline int remove_memory(int nid, u64 start, u64 size)
329{
330 return -EBUSY;
331}
332
333static inline void __remove_memory(int nid, u64 start, u64 size) {}
334#endif /* CONFIG_MEMORY_HOTREMOVE */
335
336extern void set_zone_contiguous(struct zone *zone);
337extern void clear_zone_contiguous(struct zone *zone);
338
339extern void __ref free_area_init_core_hotplug(int nid);
340extern int __add_memory(int nid, u64 start, u64 size);
341extern int add_memory(int nid, u64 start, u64 size);
342extern int add_memory_resource(int nid, struct resource *resource);
343extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
344 unsigned long nr_pages, struct vmem_altmap *altmap);
345extern bool is_memblock_offlined(struct memory_block *mem);
346extern int sparse_add_section(int nid, unsigned long pfn,
347 unsigned long nr_pages, struct vmem_altmap *altmap);
348extern void sparse_remove_section(struct mem_section *ms,
349 unsigned long pfn, unsigned long nr_pages,
350 unsigned long map_offset, struct vmem_altmap *altmap);
351extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
352 unsigned long pnum);
353extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
354 int online_type);
355extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
356 unsigned long nr_pages);
357#endif /* __LINUX_MEMORY_HOTPLUG_H */