[PATCH] x86_64: add __meminit for memory hotplug

Add __meminit to the __init lineup to ensure functions default
to __init when memory hotplug is not enabled. Replace __devinit
with __meminit on functions that were changed when the memory
hotplug code was introduced.

Signed-off-by: Matt Tolentino <matthew.e.tolentino@intel.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Matt Tolentino and committed by Linus Torvalds c09b4240 44df75e6

+20 -8
+1 -1
arch/i386/mm/init.c
··· 268 268 pkmap_page_table = pte; 269 269 } 270 270 271 - static void __devinit free_new_highpage(struct page *page) 271 + static void __meminit free_new_highpage(struct page *page) 272 272 { 273 273 set_page_count(page, 1); 274 274 __free_page(page);
+12
include/linux/init.h
··· 241 241 #define __cpuexitdata __exitdata 242 242 #endif 243 243 244 + #ifdef CONFIG_MEMORY_HOTPLUG 245 + #define __meminit 246 + #define __meminitdata 247 + #define __memexit 248 + #define __memexitdata 249 + #else 250 + #define __meminit __init 251 + #define __meminitdata __initdata 252 + #define __memexit __exit 253 + #define __memexitdata __exitdata 254 + #endif 255 + 244 256 /* Functions marked as __devexit may be discarded at kernel link time, depending 245 257 on config options. Newer versions of binutils detect references from 246 258 retained sections to discarded sections and flag an error. Pointers to
+7 -7
mm/page_alloc.c
··· 1735 1735 * up by free_all_bootmem() once the early boot process is 1736 1736 * done. Non-atomic initialization, single-pass. 1737 1737 */ 1738 - void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 1738 + void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 1739 1739 unsigned long start_pfn) 1740 1740 { 1741 1741 struct page *page; ··· 1788 1788 memmap_init_zone((size), (nid), (zone), (start_pfn)) 1789 1789 #endif 1790 1790 1791 - static int __devinit zone_batchsize(struct zone *zone) 1791 + static int __meminit zone_batchsize(struct zone *zone) 1792 1792 { 1793 1793 int batch; 1794 1794 ··· 1882 1882 * Dynamically allocate memory for the 1883 1883 * per cpu pageset array in struct zone. 1884 1884 */ 1885 - static int __devinit process_zones(int cpu) 1885 + static int __meminit process_zones(int cpu) 1886 1886 { 1887 1887 struct zone *zone, *dzone; 1888 1888 ··· 1923 1923 } 1924 1924 } 1925 1925 1926 - static int __devinit pageset_cpuup_callback(struct notifier_block *nfb, 1926 + static int __meminit pageset_cpuup_callback(struct notifier_block *nfb, 1927 1927 unsigned long action, 1928 1928 void *hcpu) 1929 1929 { ··· 1963 1963 1964 1964 #endif 1965 1965 1966 - static __devinit 1966 + static __meminit 1967 1967 void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 1968 1968 { 1969 1969 int i; ··· 1983 1983 init_waitqueue_head(zone->wait_table + i); 1984 1984 } 1985 1985 1986 - static __devinit void zone_pcp_init(struct zone *zone) 1986 + static __meminit void zone_pcp_init(struct zone *zone) 1987 1987 { 1988 1988 int cpu; 1989 1989 unsigned long batch = zone_batchsize(zone); ··· 2001 2001 zone->name, zone->present_pages, batch); 2002 2002 } 2003 2003 2004 - static __devinit void init_currently_empty_zone(struct zone *zone, 2004 + static __meminit void init_currently_empty_zone(struct zone *zone, 2005 2005 unsigned long zone_start_pfn, unsigned long size) 2006 2006 { 2007 2007 struct pglist_data *pgdat = zone->zone_pgdat;