[PATCH] x86_64: add __meminit for memory hotplug

Add __meminit to the __init lineup to ensure functions default
to __init when memory hotplug is not enabled. Replace __devinit
with __meminit on functions that were changed when the memory
hotplug code was introduced.

Signed-off-by: Matt Tolentino <matthew.e.tolentino@intel.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Matt Tolentino and committed by Linus Torvalds c09b4240 44df75e6

+20 -8
+1 -1
arch/i386/mm/init.c
··· 268 pkmap_page_table = pte; 269 } 270 271 - static void __devinit free_new_highpage(struct page *page) 272 { 273 set_page_count(page, 1); 274 __free_page(page);
··· 268 pkmap_page_table = pte; 269 } 270 271 + static void __meminit free_new_highpage(struct page *page) 272 { 273 set_page_count(page, 1); 274 __free_page(page);
+12
include/linux/init.h
··· 241 #define __cpuexitdata __exitdata 242 #endif 243 244 /* Functions marked as __devexit may be discarded at kernel link time, depending 245 on config options. Newer versions of binutils detect references from 246 retained sections to discarded sections and flag an error. Pointers to
··· 241 #define __cpuexitdata __exitdata 242 #endif 243 244 + #ifdef CONFIG_MEMORY_HOTPLUG 245 + #define __meminit 246 + #define __meminitdata 247 + #define __memexit 248 + #define __memexitdata 249 + #else 250 + #define __meminit __init 251 + #define __meminitdata __initdata 252 + #define __memexit __exit 253 + #define __memexitdata __exitdata 254 + #endif 255 + 256 /* Functions marked as __devexit may be discarded at kernel link time, depending 257 on config options. Newer versions of binutils detect references from 258 retained sections to discarded sections and flag an error. Pointers to
+7 -7
mm/page_alloc.c
··· 1735 * up by free_all_bootmem() once the early boot process is 1736 * done. Non-atomic initialization, single-pass. 1737 */ 1738 - void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 1739 unsigned long start_pfn) 1740 { 1741 struct page *page; ··· 1788 memmap_init_zone((size), (nid), (zone), (start_pfn)) 1789 #endif 1790 1791 - static int __devinit zone_batchsize(struct zone *zone) 1792 { 1793 int batch; 1794 ··· 1882 * Dynamically allocate memory for the 1883 * per cpu pageset array in struct zone. 1884 */ 1885 - static int __devinit process_zones(int cpu) 1886 { 1887 struct zone *zone, *dzone; 1888 ··· 1923 } 1924 } 1925 1926 - static int __devinit pageset_cpuup_callback(struct notifier_block *nfb, 1927 unsigned long action, 1928 void *hcpu) 1929 { ··· 1963 1964 #endif 1965 1966 - static __devinit 1967 void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 1968 { 1969 int i; ··· 1983 init_waitqueue_head(zone->wait_table + i); 1984 } 1985 1986 - static __devinit void zone_pcp_init(struct zone *zone) 1987 { 1988 int cpu; 1989 unsigned long batch = zone_batchsize(zone); ··· 2001 zone->name, zone->present_pages, batch); 2002 } 2003 2004 - static __devinit void init_currently_empty_zone(struct zone *zone, 2005 unsigned long zone_start_pfn, unsigned long size) 2006 { 2007 struct pglist_data *pgdat = zone->zone_pgdat;
··· 1735 * up by free_all_bootmem() once the early boot process is 1736 * done. Non-atomic initialization, single-pass. 1737 */ 1738 + void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 1739 unsigned long start_pfn) 1740 { 1741 struct page *page; ··· 1788 memmap_init_zone((size), (nid), (zone), (start_pfn)) 1789 #endif 1790 1791 + static int __meminit zone_batchsize(struct zone *zone) 1792 { 1793 int batch; 1794 ··· 1882 * Dynamically allocate memory for the 1883 * per cpu pageset array in struct zone. 1884 */ 1885 + static int __meminit process_zones(int cpu) 1886 { 1887 struct zone *zone, *dzone; 1888 ··· 1923 } 1924 } 1925 1926 + static int __meminit pageset_cpuup_callback(struct notifier_block *nfb, 1927 unsigned long action, 1928 void *hcpu) 1929 { ··· 1963 1964 #endif 1965 1966 + static __meminit 1967 void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 1968 { 1969 int i; ··· 1983 init_waitqueue_head(zone->wait_table + i); 1984 } 1985 1986 + static __meminit void zone_pcp_init(struct zone *zone) 1987 { 1988 int cpu; 1989 unsigned long batch = zone_batchsize(zone); ··· 2001 zone->name, zone->present_pages, batch); 2002 } 2003 2004 + static __meminit void init_currently_empty_zone(struct zone *zone, 2005 unsigned long zone_start_pfn, unsigned long size) 2006 { 2007 struct pglist_data *pgdat = zone->zone_pgdat;