Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
dmar, x86: Use function stubs when CONFIG_INTR_REMAP is disabled
x86-64: Fix and clean up AMD Fam10 MMCONF enabling
x86: UV: Address interrupt/IO port operation conflict
x86: Use online node real index in calulate_tbl_offset()
x86, asm: Fix binutils 2.15 build failure

+98 -46
+1 -1
arch/x86/include/asm/msr-index.h
··· 128 128 #define FAM10H_MMIO_CONF_ENABLE (1<<0) 129 129 #define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf 130 130 #define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 131 - #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff 131 + #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL 132 132 #define FAM10H_MMIO_CONF_BASE_SHIFT 20 133 133 #define MSR_FAM10H_NODE_ID 0xc001100c 134 134
+4
arch/x86/include/asm/uv/uv_hub.h
··· 199 199 #define UVH_APICID 0x002D0E00L 200 200 #define UV_APIC_PNODE_SHIFT 6 201 201 202 + #define UV_APICID_HIBIT_MASK 0xffff0000 203 + 202 204 /* Local Bus from cpu's perspective */ 203 205 #define LOCAL_BUS_BASE 0x1c00000 204 206 #define LOCAL_BUS_SIZE (4 * 1024 * 1024) ··· 493 491 } 494 492 } 495 493 494 + extern unsigned int uv_apicid_hibits; 496 495 static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode) 497 496 { 497 + apicid |= uv_apicid_hibits; 498 498 return (1UL << UVH_IPI_INT_SEND_SHFT) | 499 499 ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) | 500 500 (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
+18 -1
arch/x86/include/asm/uv/uv_mmrs.h
··· 5 5 * 6 6 * SGI UV MMR definitions 7 7 * 8 - * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. 8 + * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. 9 9 */ 10 10 11 11 #ifndef _ASM_X86_UV_UV_MMRS_H ··· 751 751 unsigned long node_id : 14; /* RW */ 752 752 unsigned long rsvd_63 : 1; /* */ 753 753 } s; 754 + }; 755 + 756 + /* ========================================================================= */ 757 + /* UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK */ 758 + /* ========================================================================= */ 759 + #define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK 0x320130UL 760 + #define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_32 0x009f0 761 + 762 + #define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_SHFT 0 763 + #define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_MASK 0x00000000ffffffffUL 764 + 765 + union uvh_lb_target_physical_apic_id_mask_u { 766 + unsigned long v; 767 + struct uvh_lb_target_physical_apic_id_mask_s { 768 + unsigned long bit_enables : 32; /* RW */ 769 + unsigned long rsvd_32_63 : 32; /* */ 770 + } s; 754 771 }; 755 772 756 773 /* ========================================================================= */
+23 -2
arch/x86/kernel/apic/x2apic_uv_x.c
··· 44 44 static union uvh_apicid uvh_apicid; 45 45 int uv_min_hub_revision_id; 46 46 EXPORT_SYMBOL_GPL(uv_min_hub_revision_id); 47 + unsigned int uv_apicid_hibits; 48 + EXPORT_SYMBOL_GPL(uv_apicid_hibits); 47 49 static DEFINE_SPINLOCK(uv_nmi_lock); 48 50 49 51 static inline bool is_GRU_range(u64 start, u64 end) ··· 87 85 uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT; 88 86 } 89 87 88 + /* 89 + * Add an extra bit as dictated by bios to the destination apicid of 90 + * interrupts potentially passing through the UV HUB. This prevents 91 + * a deadlock between interrupts and IO port operations. 92 + */ 93 + static void __init uv_set_apicid_hibit(void) 94 + { 95 + union uvh_lb_target_physical_apic_id_mask_u apicid_mask; 96 + unsigned long *mmr; 97 + 98 + mmr = early_ioremap(UV_LOCAL_MMR_BASE | 99 + UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK, sizeof(*mmr)); 100 + apicid_mask.v = *mmr; 101 + early_iounmap(mmr, sizeof(*mmr)); 102 + uv_apicid_hibits = apicid_mask.s.bit_enables & UV_APICID_HIBIT_MASK; 103 + } 104 + 90 105 static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 91 106 { 92 107 int nodeid; ··· 121 102 __get_cpu_var(x2apic_extra_bits) = 122 103 nodeid << (uvh_apicid.s.pnode_shift - 1); 123 104 uv_system_type = UV_NON_UNIQUE_APIC; 105 + uv_set_apicid_hibit(); 124 106 return 1; 125 107 } 126 108 } ··· 175 155 int pnode; 176 156 177 157 pnode = uv_apicid_to_pnode(phys_apicid); 158 + phys_apicid |= uv_apicid_hibits; 178 159 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 179 160 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 180 161 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | ··· 257 236 int cpu = cpumask_first(cpumask); 258 237 259 238 if ((unsigned)cpu < nr_cpu_ids) 260 - return per_cpu(x86_cpu_to_apicid, cpu); 239 + return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits; 261 240 else 262 241 return BAD_APICID; 263 242 } ··· 276 255 if (cpumask_test_cpu(cpu, cpu_online_mask)) 277 256 break; 278 257 } 279 - return per_cpu(x86_cpu_to_apicid, cpu); 258 + return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits; 280 259 } 281 260 282 261 static unsigned int x2apic_get_apic_id(unsigned long x)
+1 -1
arch/x86/kernel/entry_32.S
··· 395 395 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words 396 396 * pushed above; +8 corresponds to copy_thread's esp0 setting. 397 397 */ 398 - pushl_cfi (TI_sysenter_return-THREAD_SIZE_asm+8+4*4)(%esp) 398 + pushl_cfi ((TI_sysenter_return)-THREAD_SIZE_asm+8+4*4)(%esp) 399 399 CFI_REL_OFFSET eip, 0 400 400 401 401 pushl_cfi %eax
+30 -34
arch/x86/kernel/mmconf-fam10h_64.c
··· 25 25 }; 26 26 27 27 static u64 __cpuinitdata fam10h_pci_mmconf_base; 28 - static int __cpuinitdata fam10h_pci_mmconf_base_status; 29 28 30 29 static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = { 31 30 { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, ··· 43 44 return start1 - start2; 44 45 } 45 46 46 - /*[47:0] */ 47 - /* need to avoid (0xfd<<32) and (0xfe<<32), ht used space */ 47 + #define MMCONF_UNIT (1ULL << FAM10H_MMIO_CONF_BASE_SHIFT) 48 + #define MMCONF_MASK (~(MMCONF_UNIT - 1)) 49 + #define MMCONF_SIZE (MMCONF_UNIT << 8) 50 + /* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */ 48 51 #define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) 49 - #define BASE_VALID(b) ((b != (0xfdULL << 32)) && (b != (0xfeULL << 32))) 52 + #define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40)) 50 53 static void __cpuinit get_fam10h_pci_mmconf_base(void) 51 54 { 52 55 int i; ··· 65 64 struct range range[8]; 66 65 67 66 /* only try to get setting from BSP */ 68 - /* -1 or 1 */ 69 - if (fam10h_pci_mmconf_base_status) 67 + if (fam10h_pci_mmconf_base) 70 68 return; 71 69 72 70 if (!early_pci_allowed()) 73 - goto fail; 71 + return; 74 72 75 73 found = 0; 76 74 for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { ··· 91 91 } 92 92 93 93 if (!found) 94 - goto fail; 94 + return; 95 95 96 96 /* SYS_CFG */ 97 97 address = MSR_K8_SYSCFG; ··· 99 99 100 100 /* TOP_MEM2 is not enabled? */ 101 101 if (!(val & (1<<21))) { 102 - tom2 = 0; 102 + tom2 = 1ULL << 32; 103 103 } else { 104 104 /* TOP_MEM2 */ 105 105 address = MSR_K8_TOP_MEM2; 106 106 rdmsrl(address, val); 107 - tom2 = val & (0xffffULL<<32); 107 + tom2 = max(val & 0xffffff800000ULL, 1ULL << 32); 108 108 } 109 109 110 110 if (base <= tom2) 111 - base = tom2 + (1ULL<<32); 111 + base = (tom2 + 2 * MMCONF_UNIT - 1) & MMCONF_MASK; 112 112 113 113 /* 114 114 * need to check if the range is in the high mmio range that is ··· 123 123 if (!(reg & 3)) 124 124 continue; 125 125 126 - start = (((u64)reg) << 8) & (0xffULL << 32); /* 39:16 on 31:8*/ 126 + start = (u64)(reg & 0xffffff00) << 8; /* 39:16 on 31:8*/ 127 127 reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3)); 128 - end = (((u64)reg) << 8) & (0xffULL << 32); /* 39:16 on 31:8*/ 128 + end = ((u64)(reg & 0xffffff00) << 8) | 0xffff; /* 39:16 on 31:8*/ 129 129 130 - if (!end) 130 + if (end < tom2) 131 131 continue; 132 132 133 133 range[hi_mmio_num].start = start; ··· 143 143 144 144 if (range[hi_mmio_num - 1].end < base) 145 145 goto out; 146 - if (range[0].start > base) 146 + if (range[0].start > base + MMCONF_SIZE) 147 147 goto out; 148 148 149 149 /* need to find one window */ 150 - base = range[0].start - (1ULL << 32); 150 + base = (range[0].start & MMCONF_MASK) - MMCONF_UNIT; 151 151 if ((base > tom2) && BASE_VALID(base)) 152 152 goto out; 153 - base = range[hi_mmio_num - 1].end + (1ULL << 32); 154 - if ((base > tom2) && BASE_VALID(base)) 153 + base = (range[hi_mmio_num - 1].end + MMCONF_UNIT) & MMCONF_MASK; 154 + if (BASE_VALID(base)) 155 155 goto out; 156 156 /* need to find window between ranges */ 157 - if (hi_mmio_num > 1) 158 - for (i = 0; i < hi_mmio_num - 1; i++) { 159 - if (range[i + 1].start > (range[i].end + (1ULL << 32))) { 160 - base = range[i].end + (1ULL << 32); 161 - if ((base > tom2) && BASE_VALID(base)) 162 - goto out; 163 - } 157 + for (i = 1; i < hi_mmio_num; i++) { 158 + base = (range[i - 1].end + MMCONF_UNIT) & MMCONF_MASK; 159 + val = range[i].start & MMCONF_MASK; 160 + if (val >= base + MMCONF_SIZE && BASE_VALID(base)) 161 + goto out; 164 162 } 165 - 166 - fail: 167 - fam10h_pci_mmconf_base_status = -1; 168 163 return; 164 + 169 165 out: 170 166 fam10h_pci_mmconf_base = base; 171 - fam10h_pci_mmconf_base_status = 1; 172 167 } 173 168 174 169 void __cpuinit fam10h_check_enable_mmcfg(void) ··· 185 190 186 191 /* only trust the one handle 256 buses, if acpi=off */ 187 192 if (!acpi_pci_disabled || busnbits >= 8) { 188 - u64 base; 189 - base = val & (0xffffULL << 32); 190 - if (fam10h_pci_mmconf_base_status <= 0) { 193 + u64 base = val & MMCONF_MASK; 194 + 195 + if (!fam10h_pci_mmconf_base) { 191 196 fam10h_pci_mmconf_base = base; 192 - fam10h_pci_mmconf_base_status = 1; 193 197 return; 194 198 } else if (fam10h_pci_mmconf_base == base) 195 199 return; ··· 200 206 * with 256 buses 201 207 */ 202 208 get_fam10h_pci_mmconf_base(); 203 - if (fam10h_pci_mmconf_base_status <= 0) 209 + if (!fam10h_pci_mmconf_base) { 210 + pci_probe &= ~PCI_CHECK_ENABLE_AMD_MMCONF; 204 211 return; 212 + } 205 213 206 214 printk(KERN_INFO "Enable MMCONFIG on AMD Family 10h\n"); 207 215 val &= ~((FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT) |
+3 -2
arch/x86/mm/tlb.c
··· 223 223 224 224 static void __cpuinit calculate_tlb_offset(void) 225 225 { 226 - int cpu, node, nr_node_vecs; 226 + int cpu, node, nr_node_vecs, idx = 0; 227 227 /* 228 228 * we are changing tlb_vector_offset for each CPU in runtime, but this 229 229 * will not cause inconsistency, as the write is atomic under X86. we ··· 239 239 nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes; 240 240 241 241 for_each_online_node(node) { 242 - int node_offset = (node % NUM_INVALIDATE_TLB_VECTORS) * 242 + int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) * 243 243 nr_node_vecs; 244 244 int cpu_offset = 0; 245 245 for_each_cpu(cpu, cpumask_of_node(node)) { ··· 248 248 cpu_offset++; 249 249 cpu_offset = cpu_offset % nr_node_vecs; 250 250 } 251 + idx++; 251 252 } 252 253 } 253 254
+1 -1
arch/x86/platform/uv/tlb_uv.c
··· 1455 1455 * the below initialization can't be in firmware because the 1456 1456 * messaging IRQ will be determined by the OS 1457 1457 */ 1458 - apicid = uvhub_to_first_apicid(uvhub); 1458 + apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; 1459 1459 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, 1460 1460 ((apicid << 32) | vector)); 1461 1461 }
+3 -1
arch/x86/platform/uv/uv_time.c
··· 89 89 90 90 apicid = cpu_physical_id(cpu); 91 91 pnode = uv_apicid_to_pnode(apicid); 92 + apicid |= uv_apicid_hibits; 92 93 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 93 94 (apicid << UVH_IPI_INT_APIC_ID_SHFT) | 94 95 (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT); ··· 108 107 static int uv_setup_intr(int cpu, u64 expires) 109 108 { 110 109 u64 val; 110 + unsigned long apicid = cpu_physical_id(cpu) | uv_apicid_hibits; 111 111 int pnode = uv_cpu_to_pnode(cpu); 112 112 113 113 uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, ··· 119 117 UVH_EVENT_OCCURRED0_RTC1_MASK); 120 118 121 119 val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | 122 - ((u64)cpu_physical_id(cpu) << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); 120 + ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); 123 121 124 122 /* Set configuration */ 125 123 uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val);
+14 -3
include/linux/dmar.h
··· 175 175 return 0; 176 176 } 177 177 178 - #define enable_intr_remapping(mode) (-1) 179 - #define disable_intr_remapping() (0) 180 - #define reenable_intr_remapping(mode) (0) 181 178 #define intr_remapping_enabled (0) 179 + 180 + static inline int enable_intr_remapping(int eim) 181 + { 182 + return -1; 183 + } 184 + 185 + static inline void disable_intr_remapping(void) 186 + { 187 + } 188 + 189 + static inline int reenable_intr_remapping(int eim) 190 + { 191 + return 0; 192 + } 182 193 #endif 183 194 184 195 /* Can't use the common MSI interrupt functions