at v2.6.20-rc6 5.2 kB view raw
1#ifndef __LINUX_MEMORY_HOTPLUG_H 2#define __LINUX_MEMORY_HOTPLUG_H 3 4#include <linux/mmzone.h> 5#include <linux/spinlock.h> 6#include <linux/mmzone.h> 7#include <linux/notifier.h> 8 9struct page; 10struct zone; 11struct pglist_data; 12 13#ifdef CONFIG_MEMORY_HOTPLUG 14/* 15 * pgdat resizing functions 16 */ 17static inline 18void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) 19{ 20 spin_lock_irqsave(&pgdat->node_size_lock, *flags); 21} 22static inline 23void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) 24{ 25 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); 26} 27static inline 28void pgdat_resize_init(struct pglist_data *pgdat) 29{ 30 spin_lock_init(&pgdat->node_size_lock); 31} 32/* 33 * Zone resizing functions 34 */ 35static inline unsigned zone_span_seqbegin(struct zone *zone) 36{ 37 return read_seqbegin(&zone->span_seqlock); 38} 39static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 40{ 41 return read_seqretry(&zone->span_seqlock, iv); 42} 43static inline void zone_span_writelock(struct zone *zone) 44{ 45 write_seqlock(&zone->span_seqlock); 46} 47static inline void zone_span_writeunlock(struct zone *zone) 48{ 49 write_sequnlock(&zone->span_seqlock); 50} 51static inline void zone_seqlock_init(struct zone *zone) 52{ 53 seqlock_init(&zone->span_seqlock); 54} 55extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); 56extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); 57extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); 58/* need some defines for these for archs that don't support it */ 59extern void online_page(struct page *page); 60/* VM interface that may be used by firmware interface */ 61extern int online_pages(unsigned long, unsigned long); 62 63/* reasonably generic interface to expand the physical pages in a zone */ 64extern int __add_pages(struct zone *zone, unsigned long start_pfn, 65 unsigned long nr_pages); 66 67#ifdef CONFIG_NUMA 68extern int memory_add_physaddr_to_nid(u64 start); 69#else 70static inline int memory_add_physaddr_to_nid(u64 start) 71{ 72 return 0; 73} 74#endif 75 76#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION 77/* 78 * For supporting node-hotadd, we have to allocate a new pgdat. 79 * 80 * If an arch has generic style NODE_DATA(), 81 * node_data[nid] = kzalloc() works well. But it depends on the architecture. 82 * 83 * In general, generic_alloc_nodedata() is used. 84 * Now, arch_free_nodedata() is just defined for error path of node_hot_add. 85 * 86 */ 87extern pg_data_t *arch_alloc_nodedata(int nid); 88extern void arch_free_nodedata(pg_data_t *pgdat); 89extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); 90 91#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ 92 93#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) 94#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) 95 96#ifdef CONFIG_NUMA 97/* 98 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat. 99 * XXX: kmalloc_node() can't work well to get new node's memory at this time. 100 * Because, pgdat for the new node is not allocated/initialized yet itself. 101 * To use new node's memory, more consideration will be necessary. 102 */ 103#define generic_alloc_nodedata(nid) \ 104({ \ 105 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ 106}) 107/* 108 * This definition is just for error path in node hotadd. 109 * For node hotremove, we have to replace this. 110 */ 111#define generic_free_nodedata(pgdat) kfree(pgdat) 112 113extern pg_data_t *node_data[]; 114static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) 115{ 116 node_data[nid] = pgdat; 117} 118 119#else /* !CONFIG_NUMA */ 120 121/* never called */ 122static inline pg_data_t *generic_alloc_nodedata(int nid) 123{ 124 BUG(); 125 return NULL; 126} 127static inline void generic_free_nodedata(pg_data_t *pgdat) 128{ 129} 130static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) 131{ 132} 133#endif /* CONFIG_NUMA */ 134#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ 135 136#else /* ! CONFIG_MEMORY_HOTPLUG */ 137/* 138 * Stub functions for when hotplug is off 139 */ 140static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} 141static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} 142static inline void pgdat_resize_init(struct pglist_data *pgdat) {} 143 144static inline unsigned zone_span_seqbegin(struct zone *zone) 145{ 146 return 0; 147} 148static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 149{ 150 return 0; 151} 152static inline void zone_span_writelock(struct zone *zone) {} 153static inline void zone_span_writeunlock(struct zone *zone) {} 154static inline void zone_seqlock_init(struct zone *zone) {} 155 156static inline int mhp_notimplemented(const char *func) 157{ 158 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); 159 dump_stack(); 160 return -ENOSYS; 161} 162 163#endif /* ! CONFIG_MEMORY_HOTPLUG */ 164static inline int __remove_pages(struct zone *zone, unsigned long start_pfn, 165 unsigned long nr_pages) 166{ 167 printk(KERN_WARNING "%s() called, not yet supported\n", __FUNCTION__); 168 dump_stack(); 169 return -ENOSYS; 170} 171 172extern int add_memory(int nid, u64 start, u64 size); 173extern int arch_add_memory(int nid, u64 start, u64 size); 174extern int remove_memory(u64 start, u64 size); 175extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 176 int nr_pages); 177 178#endif /* __LINUX_MEMORY_HOTPLUG_H */