at v2.6.15-rc5 3.2 kB view raw
1#ifndef __LINUX_MEMORY_HOTPLUG_H 2#define __LINUX_MEMORY_HOTPLUG_H 3 4#include <linux/mmzone.h> 5#include <linux/spinlock.h> 6#include <linux/mmzone.h> 7#include <linux/notifier.h> 8 9#ifdef CONFIG_MEMORY_HOTPLUG 10/* 11 * pgdat resizing functions 12 */ 13static inline 14void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) 15{ 16 spin_lock_irqsave(&pgdat->node_size_lock, *flags); 17} 18static inline 19void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) 20{ 21 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); 22} 23static inline 24void pgdat_resize_init(struct pglist_data *pgdat) 25{ 26 spin_lock_init(&pgdat->node_size_lock); 27} 28/* 29 * Zone resizing functions 30 */ 31static inline unsigned zone_span_seqbegin(struct zone *zone) 32{ 33 return read_seqbegin(&zone->span_seqlock); 34} 35static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 36{ 37 return read_seqretry(&zone->span_seqlock, iv); 38} 39static inline void zone_span_writelock(struct zone *zone) 40{ 41 write_seqlock(&zone->span_seqlock); 42} 43static inline void zone_span_writeunlock(struct zone *zone) 44{ 45 write_sequnlock(&zone->span_seqlock); 46} 47static inline void zone_seqlock_init(struct zone *zone) 48{ 49 seqlock_init(&zone->span_seqlock); 50} 51extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); 52extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); 53extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); 54/* need some defines for these for archs that don't support it */ 55extern void online_page(struct page *page); 56/* VM interface that may be used by firmware interface */ 57extern int add_memory(u64 start, u64 size); 58extern int remove_memory(u64 start, u64 size); 59extern int online_pages(unsigned long, unsigned long); 60 61/* reasonably generic interface to expand the physical pages in a zone */ 62extern int __add_pages(struct zone *zone, unsigned long start_pfn, 63 unsigned long nr_pages); 64#else /* ! CONFIG_MEMORY_HOTPLUG */ 65/* 66 * Stub functions for when hotplug is off 67 */ 68static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} 69static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} 70static inline void pgdat_resize_init(struct pglist_data *pgdat) {} 71 72static inline unsigned zone_span_seqbegin(struct zone *zone) 73{ 74 return 0; 75} 76static inline int zone_span_seqretry(struct zone *zone, unsigned iv) 77{ 78 return 0; 79} 80static inline void zone_span_writelock(struct zone *zone) {} 81static inline void zone_span_writeunlock(struct zone *zone) {} 82static inline void zone_seqlock_init(struct zone *zone) {} 83 84static inline int mhp_notimplemented(const char *func) 85{ 86 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); 87 dump_stack(); 88 return -ENOSYS; 89} 90 91static inline int __add_pages(struct zone *zone, unsigned long start_pfn, 92 unsigned long nr_pages) 93{ 94 return mhp_notimplemented(__FUNCTION__); 95} 96#endif /* ! CONFIG_MEMORY_HOTPLUG */ 97static inline int __remove_pages(struct zone *zone, unsigned long start_pfn, 98 unsigned long nr_pages) 99{ 100 printk(KERN_WARNING "%s() called, not yet supported\n", __FUNCTION__); 101 dump_stack(); 102 return -ENOSYS; 103} 104#endif /* __LINUX_MEMORY_HOTPLUG_H */