[IA64] Fix large MCA bootmem allocation

The MCA code allocates bootmem memory for NR_CPUS, regardless
of how many cpus the system actually has. This change allocates
memory only for cpus that actually exist.

On my test system with NR_CPUS = 1024, reserved memory was reduced by 130944k.

Before: Memory: 27886976k/28111168k available (8282k code, 242304k reserved, 5928k data, 1792k init)
After: Memory: 28017920k/28111168k available (8282k code, 111360k reserved, 5928k data, 1792k init)

Signed-off-by: Russ Anderson <rja@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>

authored by Russ Anderson and committed by Tony Luck 785285fc 42763935

+26 -29
+26 -29
arch/ia64/kernel/mca.c
··· 17 17 * Copyright (C) 2000 Intel 18 18 * Copyright (C) Chuck Fleckenstein <cfleck@co.intel.com> 19 19 * 20 - * Copyright (C) 1999, 2004 Silicon Graphics, Inc. 20 + * Copyright (C) 1999, 2004-2008 Silicon Graphics, Inc. 21 21 * Copyright (C) Vijay Chander <vijay@engr.sgi.com> 22 22 * 23 23 * Copyright (C) 2006 FUJITSU LIMITED ··· 1762 1762 /* Caller prevents this from being called after init */ 1763 1763 static void * __init_refok mca_bootmem(void) 1764 1764 { 1765 - void *p; 1766 - 1767 - p = alloc_bootmem(sizeof(struct ia64_mca_cpu) * NR_CPUS + 1768 - KERNEL_STACK_SIZE); 1769 - return (void *)ALIGN((unsigned long)p, KERNEL_STACK_SIZE); 1765 + return __alloc_bootmem(sizeof(struct ia64_mca_cpu), 1766 + KERNEL_STACK_SIZE, 0); 1770 1767 } 1771 1768 1772 1769 /* Do per-CPU MCA-related initialization. */ ··· 1771 1774 ia64_mca_cpu_init(void *cpu_data) 1772 1775 { 1773 1776 void *pal_vaddr; 1777 + void *data; 1778 + long sz = sizeof(struct ia64_mca_cpu); 1779 + int cpu = smp_processor_id(); 1774 1780 static int first_time = 1; 1775 1781 1776 - if (first_time) { 1777 - void *mca_data; 1778 - int cpu; 1779 - 1780 - first_time = 0; 1781 - mca_data = mca_bootmem(); 1782 - for (cpu = 0; cpu < NR_CPUS; cpu++) { 1783 - format_mca_init_stack(mca_data, 1784 - offsetof(struct ia64_mca_cpu, mca_stack), 1785 - "MCA", cpu); 1786 - format_mca_init_stack(mca_data, 1787 - offsetof(struct ia64_mca_cpu, init_stack), 1788 - "INIT", cpu); 1789 - __per_cpu_mca[cpu] = __pa(mca_data); 1790 - mca_data += sizeof(struct ia64_mca_cpu); 1791 - } 1792 - } 1793 - 1794 1782 /* 1795 - * The MCA info structure was allocated earlier and its 1796 - * physical address saved in __per_cpu_mca[cpu]. Copy that 1797 - * address * to ia64_mca_data so we can access it as a per-CPU 1798 - * variable. 1783 + * Structure will already be allocated if cpu has been online, 1784 + * then offlined. 1799 1785 */ 1800 - __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()]; 1786 + if (__per_cpu_mca[cpu]) { 1787 + data = __va(__per_cpu_mca[cpu]); 1788 + } else { 1789 + if (first_time) { 1790 + data = mca_bootmem(); 1791 + first_time = 0; 1792 + } else 1793 + data = page_address(alloc_pages_node(numa_node_id(), 1794 + GFP_KERNEL, get_order(sz))); 1795 + if (!data) 1796 + panic("Could not allocate MCA memory for cpu %d\n", 1797 + cpu); 1798 + } 1799 + format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, mca_stack), 1800 + "MCA", cpu); 1801 + format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack), 1802 + "INIT", cpu); 1803 + __get_cpu_var(ia64_mca_data) = __per_cpu_mca[cpu] = __pa(data); 1801 1804 1802 1805 /* 1803 1806 * Stash away a copy of the PTE needed to map the per-CPU page.