at v2.6.16-rc6 3.2 kB view raw
1#ifndef __LINUX_MEMORY_HOTPLUG_H 2#define __LINUX_MEMORY_HOTPLUG_H 3 4#include <linux/mmzone.h> 5#include <linux/spinlock.h> 6#include <linux/mmzone.h> 7#include <linux/notifier.h> 8 9struct page; 10struct zone; 11struct pglist_data; 12 13#ifdef CONFIG_MEMORY_HOTPLUG 14/* 15 * pgdat resizing functions 16 */ 17static inline 18void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) 19{ 20 spin_lock_irqsave(&pgdat->node_size_lock, *flags); 21} 22static inline 23void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) 24{ 25 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); 26} 27static inline 28void pgdat_resize_init(struct pglist_data *pgdat) 29{ 30 spin_lock_init(&pgdat->node_size_lock); 31} 32/* 33 * Zone resizing functions 34 */ 35static inline unsigned zone_span_seqbegin(struct zone *zone) 36{ 37 return read_seqbegin(&zone->span_seqlock); 38} 39static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 40{ 41 return read_seqretry(&zone->span_seqlock, iv); 42} 43static inline void zone_span_writelock(struct zone *zone) 44{ 45 write_seqlock(&zone->span_seqlock); 46} 47static inline void zone_span_writeunlock(struct zone *zone) 48{ 49 write_sequnlock(&zone->span_seqlock); 50} 51static inline void zone_seqlock_init(struct zone *zone) 52{ 53 seqlock_init(&zone->span_seqlock); 54} 55extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); 56extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); 57extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); 58/* need some defines for these for archs that don't support it */ 59extern void online_page(struct page *page); 60/* VM interface that may be used by firmware interface */ 61extern int add_memory(u64 start, u64 size); 62extern int remove_memory(u64 start, u64 size); 63extern int online_pages(unsigned long, unsigned long); 64 65/* reasonably generic interface to expand the physical pages in a zone */ 66extern int __add_pages(struct zone *zone, unsigned long start_pfn, 67 unsigned long nr_pages); 68#else /* ! CONFIG_MEMORY_HOTPLUG */ 69/* 70 * Stub functions for when hotplug is off 71 */ 72static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} 73static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} 74static inline void pgdat_resize_init(struct pglist_data *pgdat) {} 75 76static inline unsigned zone_span_seqbegin(struct zone *zone) 77{ 78 return 0; 79} 80static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 81{ 82 return 0; 83} 84static inline void zone_span_writelock(struct zone *zone) {} 85static inline void zone_span_writeunlock(struct zone *zone) {} 86static inline void zone_seqlock_init(struct zone *zone) {} 87 88static inline int mhp_notimplemented(const char *func) 89{ 90 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); 91 dump_stack(); 92 return -ENOSYS; 93} 94 95static inline int __add_pages(struct zone *zone, unsigned long start_pfn, 96 unsigned long nr_pages) 97{ 98 return mhp_notimplemented(__FUNCTION__); 99} 100#endif /* ! CONFIG_MEMORY_HOTPLUG */ 101static inline int __remove_pages(struct zone *zone, unsigned long start_pfn, 102 unsigned long nr_pages) 103{ 104 printk(KERN_WARNING "%s() called, not yet supported\n", __FUNCTION__); 105 dump_stack(); 106 return -ENOSYS; 107} 108#endif /* __LINUX_MEMORY_HOTPLUG_H */