Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86: fix early panic with boot option "nosmp"
x86/oprofile: fix Intel cpu family 6 detection
oprofile: fix CPU unplug panic in ppro_stop()
AMD IOMMU: fix possible race while accessing iommu->need_sync
AMD IOMMU: set device table entry for aliased devices
AMD IOMMU: struct amd_iommu remove padding on 64 bit
x86: fix broken flushing in GART nofullflush path
x86: fix dma_mapping_error for 32bit x86

+41 -39
+12 -12
arch/x86/include/asm/amd_iommu_types.h
··· 251 251 /* Pointer to PCI device of this IOMMU */ 252 252 struct pci_dev *dev; 253 253 254 - /* 255 - * Capability pointer. There could be more than one IOMMU per PCI 256 - * device function if there are more than one AMD IOMMU capability 257 - * pointers. 258 - */ 259 - u16 cap_ptr; 260 - 261 254 /* physical address of MMIO space */ 262 255 u64 mmio_phys; 263 256 /* virtual address of MMIO space */ ··· 258 265 259 266 /* capabilities of that IOMMU read from ACPI */ 260 267 u32 cap; 268 + 269 + /* 270 + * Capability pointer. There could be more than one IOMMU per PCI 271 + * device function if there are more than one AMD IOMMU capability 272 + * pointers. 273 + */ 274 + u16 cap_ptr; 261 275 262 276 /* pci domain of this IOMMU */ 263 277 u16 pci_seg; ··· 284 284 /* size of command buffer */ 285 285 u32 cmd_buf_size; 286 286 287 - /* event buffer virtual address */ 288 - u8 *evt_buf; 289 287 /* size of event buffer */ 290 288 u32 evt_buf_size; 289 + /* event buffer virtual address */ 290 + u8 *evt_buf; 291 291 /* MSI number for event interrupt */ 292 292 u16 evt_msi_num; 293 293 294 - /* if one, we need to send a completion wait command */ 295 - int need_sync; 296 - 297 294 /* true if interrupts for this IOMMU are already enabled */ 298 295 bool int_enabled; 296 + 297 + /* if one, we need to send a completion wait command */ 298 + int need_sync; 299 299 300 300 /* default dma_ops domain for that IOMMU */ 301 301 struct dma_ops_domain *default_dom;
+2 -4
arch/x86/include/asm/dma-mapping.h
··· 71 71 /* Make sure we keep the same behaviour */ 72 72 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 73 73 { 74 - #ifdef CONFIG_X86_32 75 - return 0; 76 - #else 74 + #ifdef CONFIG_X86_64 77 75 struct dma_mapping_ops *ops = get_dma_ops(dev); 78 76 if (ops->mapping_error) 79 77 return ops->mapping_error(dev, dma_addr); 80 78 81 - return (dma_addr == bad_dma_address); 82 79 #endif 80 + return (dma_addr == bad_dma_address); 83 81 } 84 82 85 83 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+16 -20
arch/x86/kernel/amd_iommu.c
··· 187 187 188 188 spin_lock_irqsave(&iommu->lock, flags); 189 189 ret = __iommu_queue_command(iommu, cmd); 190 + if (!ret) 191 + iommu->need_sync = 1; 190 192 spin_unlock_irqrestore(&iommu->lock, flags); 191 193 192 194 return ret; ··· 212 210 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; 213 211 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); 214 212 215 - iommu->need_sync = 0; 216 - 217 213 spin_lock_irqsave(&iommu->lock, flags); 214 + 215 + if (!iommu->need_sync) 216 + goto out; 217 + 218 + iommu->need_sync = 0; 218 219 219 220 ret = __iommu_queue_command(iommu, &cmd); 220 221 ··· 259 254 260 255 ret = iommu_queue_command(iommu, &cmd); 261 256 262 - iommu->need_sync = 1; 263 - 264 257 return ret; 265 258 } 266 259 ··· 283 280 cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; 284 281 285 282 ret = iommu_queue_command(iommu, &cmd); 286 - 287 - iommu->need_sync = 1; 288 283 289 284 return ret; 290 285 } ··· 763 762 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 764 763 765 764 iommu_queue_inv_dev_entry(iommu, devid); 766 - 767 - iommu->need_sync = 1; 768 765 } 769 766 770 767 /***************************************************************************** ··· 856 857 "device ", (*domain)->id); 857 858 print_devid(_bdf, 1); 858 859 } 860 + 861 + if (domain_for_device(_bdf) == NULL) 862 + set_device_domain(*iommu, *domain, _bdf); 859 863 860 864 return 1; 861 865 } ··· 1033 1031 if (addr == bad_dma_address) 1034 1032 goto out; 1035 1033 1036 - if (unlikely(iommu->need_sync)) 1037 - iommu_completion_wait(iommu); 1034 + iommu_completion_wait(iommu); 1038 1035 1039 1036 out: 1040 1037 spin_unlock_irqrestore(&domain->lock, flags); ··· 1061 1060 1062 1061 __unmap_single(iommu, domain->priv, dma_addr, size, dir); 1063 1062 1064 - if (unlikely(iommu->need_sync)) 1065 - iommu_completion_wait(iommu); 1063 + iommu_completion_wait(iommu); 1066 1064 1067 1065 spin_unlock_irqrestore(&domain->lock, flags); 1068 1066 } ··· 1127 1127 goto unmap; 1128 1128 } 1129 1129 1130 - if (unlikely(iommu->need_sync)) 1131 - iommu_completion_wait(iommu); 1130 + iommu_completion_wait(iommu); 1132 1131 1133 1132 out: 1134 1133 spin_unlock_irqrestore(&domain->lock, flags); ··· 1172 1173 s->dma_address = s->dma_length = 0; 1173 1174 } 1174 1175 1175 - if (unlikely(iommu->need_sync)) 1176 - iommu_completion_wait(iommu); 1176 + iommu_completion_wait(iommu); 1177 1177 1178 1178 spin_unlock_irqrestore(&domain->lock, flags); 1179 1179 } ··· 1223 1225 goto out; 1224 1226 } 1225 1227 1226 - if (unlikely(iommu->need_sync)) 1227 - iommu_completion_wait(iommu); 1228 + iommu_completion_wait(iommu); 1228 1229 1229 1230 out: 1230 1231 spin_unlock_irqrestore(&domain->lock, flags); ··· 1254 1257 1255 1258 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 1256 1259 1257 - if (unlikely(iommu->need_sync)) 1258 - iommu_completion_wait(iommu); 1260 + iommu_completion_wait(iommu); 1259 1261 1260 1262 spin_unlock_irqrestore(&domain->lock, flags); 1261 1263
+3
arch/x86/kernel/mpparse.c
··· 604 604 printk(KERN_INFO "Using ACPI for processor (LAPIC) " 605 605 "configuration information\n"); 606 606 607 + if (!mpf) 608 + return; 609 + 607 610 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", 608 611 mpf->mpf_specification); 609 612 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
+2
arch/x86/kernel/pci-gart_64.c
··· 123 123 124 124 spin_lock_irqsave(&iommu_bitmap_lock, flags); 125 125 iommu_area_free(iommu_gart_bitmap, offset, size); 126 + if (offset >= next_bit) 127 + next_bit = offset + size; 126 128 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 127 129 } 128 130
+2 -3
arch/x86/oprofile/nmi_int.c
··· 401 401 *cpu_type = "i386/pii"; 402 402 break; 403 403 case 6 ... 8: 404 + case 10 ... 11: 404 405 *cpu_type = "i386/piii"; 405 406 break; 406 407 case 9: 408 + case 13: 407 409 *cpu_type = "i386/p6_mobile"; 408 - break; 409 - case 10 ... 13: 410 - *cpu_type = "i386/p6"; 411 410 break; 412 411 case 14: 413 412 *cpu_type = "i386/core";
+4
arch/x86/oprofile/op_model_ppro.c
··· 156 156 unsigned int low, high; 157 157 int i; 158 158 159 + if (!reset_value) 160 + return; 159 161 for (i = 0; i < num_counters; ++i) { 160 162 if (reset_value[i]) { 161 163 CTRL_READ(low, high, msrs, i); ··· 173 171 unsigned int low, high; 174 172 int i; 175 173 174 + if (!reset_value) 175 + return; 176 176 for (i = 0; i < num_counters; ++i) { 177 177 if (!reset_value[i]) 178 178 continue;