Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86: fix early panic with boot option "nosmp"
x86/oprofile: fix Intel cpu family 6 detection
oprofile: fix CPU unplug panic in ppro_stop()
AMD IOMMU: fix possible race while accessing iommu->need_sync
AMD IOMMU: set device table entry for aliased devices
AMD IOMMU: struct amd_iommu remove padding on 64 bit
x86: fix broken flushing in GART nofullflush path
x86: fix dma_mapping_error for 32bit x86

+41 -39
+12 -12
arch/x86/include/asm/amd_iommu_types.h
··· 251 /* Pointer to PCI device of this IOMMU */ 252 struct pci_dev *dev; 253 254 - /* 255 - * Capability pointer. There could be more than one IOMMU per PCI 256 - * device function if there are more than one AMD IOMMU capability 257 - * pointers. 258 - */ 259 - u16 cap_ptr; 260 - 261 /* physical address of MMIO space */ 262 u64 mmio_phys; 263 /* virtual address of MMIO space */ ··· 258 259 /* capabilities of that IOMMU read from ACPI */ 260 u32 cap; 261 262 /* pci domain of this IOMMU */ 263 u16 pci_seg; ··· 284 /* size of command buffer */ 285 u32 cmd_buf_size; 286 287 - /* event buffer virtual address */ 288 - u8 *evt_buf; 289 /* size of event buffer */ 290 u32 evt_buf_size; 291 /* MSI number for event interrupt */ 292 u16 evt_msi_num; 293 294 - /* if one, we need to send a completion wait command */ 295 - int need_sync; 296 - 297 /* true if interrupts for this IOMMU are already enabled */ 298 bool int_enabled; 299 300 /* default dma_ops domain for that IOMMU */ 301 struct dma_ops_domain *default_dom;
··· 251 /* Pointer to PCI device of this IOMMU */ 252 struct pci_dev *dev; 253 254 /* physical address of MMIO space */ 255 u64 mmio_phys; 256 /* virtual address of MMIO space */ ··· 265 266 /* capabilities of that IOMMU read from ACPI */ 267 u32 cap; 268 + 269 + /* 270 + * Capability pointer. There could be more than one IOMMU per PCI 271 + * device function if there are more than one AMD IOMMU capability 272 + * pointers. 273 + */ 274 + u16 cap_ptr; 275 276 /* pci domain of this IOMMU */ 277 u16 pci_seg; ··· 284 /* size of command buffer */ 285 u32 cmd_buf_size; 286 287 /* size of event buffer */ 288 u32 evt_buf_size; 289 + /* event buffer virtual address */ 290 + u8 *evt_buf; 291 /* MSI number for event interrupt */ 292 u16 evt_msi_num; 293 294 /* true if interrupts for this IOMMU are already enabled */ 295 bool int_enabled; 296 + 297 + /* if one, we need to send a completion wait command */ 298 + int need_sync; 299 300 /* default dma_ops domain for that IOMMU */ 301 struct dma_ops_domain *default_dom;
+2 -4
arch/x86/include/asm/dma-mapping.h
··· 71 /* Make sure we keep the same behaviour */ 72 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 73 { 74 - #ifdef CONFIG_X86_32 75 - return 0; 76 - #else 77 struct dma_mapping_ops *ops = get_dma_ops(dev); 78 if (ops->mapping_error) 79 return ops->mapping_error(dev, dma_addr); 80 81 - return (dma_addr == bad_dma_address); 82 #endif 83 } 84 85 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
··· 71 /* Make sure we keep the same behaviour */ 72 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 73 { 74 + #ifdef CONFIG_X86_64 75 struct dma_mapping_ops *ops = get_dma_ops(dev); 76 if (ops->mapping_error) 77 return ops->mapping_error(dev, dma_addr); 78 79 #endif 80 + return (dma_addr == bad_dma_address); 81 } 82 83 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+16 -20
arch/x86/kernel/amd_iommu.c
··· 187 188 spin_lock_irqsave(&iommu->lock, flags); 189 ret = __iommu_queue_command(iommu, cmd); 190 spin_unlock_irqrestore(&iommu->lock, flags); 191 192 return ret; ··· 212 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; 213 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); 214 215 - iommu->need_sync = 0; 216 - 217 spin_lock_irqsave(&iommu->lock, flags); 218 219 ret = __iommu_queue_command(iommu, &cmd); 220 ··· 259 260 ret = iommu_queue_command(iommu, &cmd); 261 262 - iommu->need_sync = 1; 263 - 264 return ret; 265 } 266 ··· 283 cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; 284 285 ret = iommu_queue_command(iommu, &cmd); 286 - 287 - iommu->need_sync = 1; 288 289 return ret; 290 } ··· 763 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 764 765 iommu_queue_inv_dev_entry(iommu, devid); 766 - 767 - iommu->need_sync = 1; 768 } 769 770 /***************************************************************************** ··· 856 "device ", (*domain)->id); 857 print_devid(_bdf, 1); 858 } 859 860 return 1; 861 } ··· 1033 if (addr == bad_dma_address) 1034 goto out; 1035 1036 - if (unlikely(iommu->need_sync)) 1037 - iommu_completion_wait(iommu); 1038 1039 out: 1040 spin_unlock_irqrestore(&domain->lock, flags); ··· 1061 1062 __unmap_single(iommu, domain->priv, dma_addr, size, dir); 1063 1064 - if (unlikely(iommu->need_sync)) 1065 - iommu_completion_wait(iommu); 1066 1067 spin_unlock_irqrestore(&domain->lock, flags); 1068 } ··· 1127 goto unmap; 1128 } 1129 1130 - if (unlikely(iommu->need_sync)) 1131 - iommu_completion_wait(iommu); 1132 1133 out: 1134 spin_unlock_irqrestore(&domain->lock, flags); ··· 1172 s->dma_address = s->dma_length = 0; 1173 } 1174 1175 - if (unlikely(iommu->need_sync)) 1176 - iommu_completion_wait(iommu); 1177 1178 spin_unlock_irqrestore(&domain->lock, flags); 1179 } ··· 1223 goto out; 1224 } 1225 1226 - if (unlikely(iommu->need_sync)) 1227 - iommu_completion_wait(iommu); 1228 1229 out: 1230 spin_unlock_irqrestore(&domain->lock, flags); ··· 1254 1255 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 1256 1257 - if (unlikely(iommu->need_sync)) 1258 - iommu_completion_wait(iommu); 1259 1260 spin_unlock_irqrestore(&domain->lock, flags); 1261
··· 187 188 spin_lock_irqsave(&iommu->lock, flags); 189 ret = __iommu_queue_command(iommu, cmd); 190 + if (!ret) 191 + iommu->need_sync = 1; 192 spin_unlock_irqrestore(&iommu->lock, flags); 193 194 return ret; ··· 210 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; 211 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); 212 213 spin_lock_irqsave(&iommu->lock, flags); 214 + 215 + if (!iommu->need_sync) 216 + goto out; 217 + 218 + iommu->need_sync = 0; 219 220 ret = __iommu_queue_command(iommu, &cmd); 221 ··· 254 255 ret = iommu_queue_command(iommu, &cmd); 256 257 return ret; 258 } 259 ··· 280 cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; 281 282 ret = iommu_queue_command(iommu, &cmd); 283 284 return ret; 285 } ··· 762 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 763 764 iommu_queue_inv_dev_entry(iommu, devid); 765 } 766 767 /***************************************************************************** ··· 857 "device ", (*domain)->id); 858 print_devid(_bdf, 1); 859 } 860 + 861 + if (domain_for_device(_bdf) == NULL) 862 + set_device_domain(*iommu, *domain, _bdf); 863 864 return 1; 865 } ··· 1031 if (addr == bad_dma_address) 1032 goto out; 1033 1034 + iommu_completion_wait(iommu); 1035 1036 out: 1037 spin_unlock_irqrestore(&domain->lock, flags); ··· 1060 1061 __unmap_single(iommu, domain->priv, dma_addr, size, dir); 1062 1063 + iommu_completion_wait(iommu); 1064 1065 spin_unlock_irqrestore(&domain->lock, flags); 1066 } ··· 1127 goto unmap; 1128 } 1129 1130 + iommu_completion_wait(iommu); 1131 1132 out: 1133 spin_unlock_irqrestore(&domain->lock, flags); ··· 1173 s->dma_address = s->dma_length = 0; 1174 } 1175 1176 + iommu_completion_wait(iommu); 1177 1178 spin_unlock_irqrestore(&domain->lock, flags); 1179 } ··· 1225 goto out; 1226 } 1227 1228 + iommu_completion_wait(iommu); 1229 1230 out: 1231 spin_unlock_irqrestore(&domain->lock, flags); ··· 1257 1258 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 1259 1260 + iommu_completion_wait(iommu); 1261 1262 spin_unlock_irqrestore(&domain->lock, flags); 1263
+3
arch/x86/kernel/mpparse.c
··· 604 printk(KERN_INFO "Using ACPI for processor (LAPIC) " 605 "configuration information\n"); 606 607 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", 608 mpf->mpf_specification); 609 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
··· 604 printk(KERN_INFO "Using ACPI for processor (LAPIC) " 605 "configuration information\n"); 606 607 + if (!mpf) 608 + return; 609 + 610 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", 611 mpf->mpf_specification); 612 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
+2
arch/x86/kernel/pci-gart_64.c
··· 123 124 spin_lock_irqsave(&iommu_bitmap_lock, flags); 125 iommu_area_free(iommu_gart_bitmap, offset, size); 126 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 127 } 128
··· 123 124 spin_lock_irqsave(&iommu_bitmap_lock, flags); 125 iommu_area_free(iommu_gart_bitmap, offset, size); 126 + if (offset >= next_bit) 127 + next_bit = offset + size; 128 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 129 } 130
+2 -3
arch/x86/oprofile/nmi_int.c
··· 401 *cpu_type = "i386/pii"; 402 break; 403 case 6 ... 8: 404 *cpu_type = "i386/piii"; 405 break; 406 case 9: 407 *cpu_type = "i386/p6_mobile"; 408 - break; 409 - case 10 ... 13: 410 - *cpu_type = "i386/p6"; 411 break; 412 case 14: 413 *cpu_type = "i386/core";
··· 401 *cpu_type = "i386/pii"; 402 break; 403 case 6 ... 8: 404 + case 10 ... 11: 405 *cpu_type = "i386/piii"; 406 break; 407 case 9: 408 + case 13: 409 *cpu_type = "i386/p6_mobile"; 410 break; 411 case 14: 412 *cpu_type = "i386/core";
+4
arch/x86/oprofile/op_model_ppro.c
··· 156 unsigned int low, high; 157 int i; 158 159 for (i = 0; i < num_counters; ++i) { 160 if (reset_value[i]) { 161 CTRL_READ(low, high, msrs, i); ··· 173 unsigned int low, high; 174 int i; 175 176 for (i = 0; i < num_counters; ++i) { 177 if (!reset_value[i]) 178 continue;
··· 156 unsigned int low, high; 157 int i; 158 159 + if (!reset_value) 160 + return; 161 for (i = 0; i < num_counters; ++i) { 162 if (reset_value[i]) { 163 CTRL_READ(low, high, msrs, i); ··· 171 unsigned int low, high; 172 int i; 173 174 + if (!reset_value) 175 + return; 176 for (i = 0; i < num_counters; ++i) { 177 if (!reset_value[i]) 178 continue;