Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6:
ACPI/PCI: another multiple _OSC memory leak fix
x86/PCI: X86_PAT & mprotect
PCI: enable nv_msi_ht_cap_quirk for ALi bridges
PCI: Make the intel-iommu_wait_op macro work when jiffies are not running
ACPI/PCI: handle multiple _OSC
ACPI/PCI: handle multiple _OSC
x86/PCI: fix broken ISA DMA
PCI ACPI: fix uninitialized variable in __pci_osc_support_set

+89 -39
+5 -3
arch/x86/kernel/pci-dma.c
··· 385 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) 386 return memory; 387 388 - if (!dev) 389 dev = &fallback_dev; 390 dma_mask = dev->coherent_dma_mask; 391 if (dma_mask == 0) 392 - dma_mask = DMA_32BIT_MASK; 393 394 /* Device not DMA able */ 395 if (dev->dma_mask == NULL) ··· 405 larger than 16MB and in this case we have a chance of 406 finding fitting memory in the next higher zone first. If 407 not retry with true GFP_DMA. -AK */ 408 - if (dma_mask <= DMA_32BIT_MASK) 409 gfp |= GFP_DMA32; 410 #endif 411
··· 385 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) 386 return memory; 387 388 + if (!dev) { 389 dev = &fallback_dev; 390 + gfp |= GFP_DMA; 391 + } 392 dma_mask = dev->coherent_dma_mask; 393 if (dma_mask == 0) 394 + dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK; 395 396 /* Device not DMA able */ 397 if (dev->dma_mask == NULL) ··· 403 larger than 16MB and in this case we have a chance of 404 finding fitting memory in the next higher zone first. If 405 not retry with true GFP_DMA. -AK */ 406 + if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) 407 gfp |= GFP_DMA32; 408 #endif 409
+1 -3
arch/x86/pci/i386.c
··· 301 prot = pgprot_val(vma->vm_page_prot); 302 if (pat_wc_enabled && write_combine) 303 prot |= _PAGE_CACHE_WC; 304 - else if (pat_wc_enabled) 305 /* 306 * ioremap() and ioremap_nocache() defaults to UC MINUS for now. 307 * To avoid attribute conflicts, request UC MINUS here 308 * aswell. 309 */ 310 prot |= _PAGE_CACHE_UC_MINUS; 311 - else if (boot_cpu_data.x86 > 3) 312 - prot |= _PAGE_CACHE_UC; 313 314 vma->vm_page_prot = __pgprot(prot); 315
··· 301 prot = pgprot_val(vma->vm_page_prot); 302 if (pat_wc_enabled && write_combine) 303 prot |= _PAGE_CACHE_WC; 304 + else if (pat_wc_enabled || boot_cpu_data.x86 > 3) 305 /* 306 * ioremap() and ioremap_nocache() defaults to UC MINUS for now. 307 * To avoid attribute conflicts, request UC MINUS here 308 * aswell. 309 */ 310 prot |= _PAGE_CACHE_UC_MINUS; 311 312 vma->vm_page_prot = __pgprot(prot); 313
+3 -3
drivers/pci/intel-iommu.c
··· 49 50 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 51 52 - #define DMAR_OPERATION_TIMEOUT (HZ*60) /* 1m */ 53 54 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) 55 ··· 490 491 #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ 492 {\ 493 - unsigned long start_time = jiffies;\ 494 while (1) {\ 495 sts = op (iommu->reg + offset);\ 496 if (cond)\ 497 break;\ 498 - if (time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT))\ 499 panic("DMAR hardware is malfunctioning\n");\ 500 cpu_relax();\ 501 }\
··· 49 50 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 51 52 + #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */ 53 54 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) 55 ··· 490 491 #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ 492 {\ 493 + cycles_t start_time = get_cycles();\ 494 while (1) {\ 495 sts = op (iommu->reg + offset);\ 496 if (cond)\ 497 break;\ 498 + if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ 499 panic("DMAR hardware is malfunctioning\n");\ 500 cpu_relax();\ 501 }\
+79 -30
drivers/pci/pci-acpi.c
··· 19 #include <linux/pci-acpi.h> 20 #include "pci.h" 21 22 - static u32 ctrlset_buf[3] = {0, 0, 0}; 23 - static u32 global_ctrlsets = 0; 24 static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66}; 25 26 static acpi_status ··· 60 union acpi_object *out_obj; 61 u32 osc_dw0; 62 acpi_status *ret_status = (acpi_status *)retval; 63 64 - 65 /* Setting up input parameters */ 66 input.count = 4; 67 input.pointer = in_params; ··· 93 in_params[2].integer.value = 3; 94 in_params[3].type = ACPI_TYPE_BUFFER; 95 in_params[3].buffer.length = 12; 96 - in_params[3].buffer.pointer = (u8 *)context; 97 98 status = acpi_evaluate_object(handle, "_OSC", &input, &output); 99 - if (ACPI_FAILURE (status)) { 100 - *ret_status = status; 101 - return status; 102 - } 103 out_obj = output.pointer; 104 105 if (out_obj->type != ACPI_TYPE_BUFFER) { ··· 116 printk(KERN_DEBUG "_OSC invalid revision\n"); 117 if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { 118 /* Update Global Control Set */ 119 - global_ctrlsets = *((u32 *)(out_obj->buffer.pointer+8)); 120 status = AE_OK; 121 goto query_osc_out; 122 } ··· 126 } 127 128 /* Update Global Control Set */ 129 - global_ctrlsets = *((u32 *)(out_obj->buffer.pointer + 8)); 130 status = AE_OK; 131 132 query_osc_out: 133 kfree(output.pointer); 134 *ret_status = status; 135 return status; 136 } 137 ··· 215 **/ 216 acpi_status __pci_osc_support_set(u32 flags, const char *hid) 217 { 218 - u32 temp; 219 - acpi_status retval; 220 221 if (!(flags & OSC_SUPPORT_MASKS)) { 222 return AE_TYPE; 223 } 224 - ctrlset_buf[OSC_SUPPORT_TYPE] |= (flags & OSC_SUPPORT_MASKS); 225 - 226 - /* do _OSC query for all possible controls */ 227 - temp = ctrlset_buf[OSC_CONTROL_TYPE]; 228 - ctrlset_buf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; 229 - ctrlset_buf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS; 230 acpi_get_devices(hid, 231 acpi_query_osc, 232 - ctrlset_buf, 233 (void **) &retval ); 234 - ctrlset_buf[OSC_QUERY_TYPE] = !OSC_QUERY_ENABLE; 235 - ctrlset_buf[OSC_CONTROL_TYPE] = temp; 236 - if (ACPI_FAILURE(retval)) { 237 - /* no osc support at all */ 238 - ctrlset_buf[OSC_SUPPORT_TYPE] = 0; 239 - } 240 return AE_OK; 241 } 242 ··· 238 { 239 acpi_status status; 240 u32 ctrlset; 241 242 ctrlset = (flags & OSC_CONTROL_MASKS); 243 if (!ctrlset) { 244 return AE_TYPE; 245 } 246 - if (ctrlset_buf[OSC_SUPPORT_TYPE] && 247 - ((global_ctrlsets & ctrlset) != ctrlset)) { 248 return AE_SUPPORT; 249 } 250 - ctrlset_buf[OSC_CONTROL_TYPE] |= ctrlset; 251 - status = acpi_run_osc(handle, ctrlset_buf); 252 if (ACPI_FAILURE (status)) { 253 - ctrlset_buf[OSC_CONTROL_TYPE] &= ~ctrlset; 254 } 255 256 return status;
··· 19 #include <linux/pci-acpi.h> 20 #include "pci.h" 21 22 + struct acpi_osc_data { 23 + acpi_handle handle; 24 + u32 ctrlset_buf[3]; 25 + u32 global_ctrlsets; 26 + struct list_head sibiling; 27 + }; 28 + static LIST_HEAD(acpi_osc_data_list); 29 + 30 + static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle) 31 + { 32 + struct acpi_osc_data *data; 33 + 34 + list_for_each_entry(data, &acpi_osc_data_list, sibiling) { 35 + if (data->handle == handle) 36 + return data; 37 + } 38 + data = kzalloc(sizeof(*data), GFP_KERNEL); 39 + if (!data) 40 + return NULL; 41 + INIT_LIST_HEAD(&data->sibiling); 42 + data->handle = handle; 43 + list_add_tail(&data->sibiling, &acpi_osc_data_list); 44 + return data; 45 + } 46 + 47 static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66}; 48 49 static acpi_status ··· 37 union acpi_object *out_obj; 38 u32 osc_dw0; 39 acpi_status *ret_status = (acpi_status *)retval; 40 + struct acpi_osc_data *osc_data; 41 + u32 flags = (unsigned long)context, temp; 42 + acpi_handle tmp; 43 44 + status = acpi_get_handle(handle, "_OSC", &tmp); 45 + if (ACPI_FAILURE(status)) 46 + return status; 47 + 48 + osc_data = acpi_get_osc_data(handle); 49 + if (!osc_data) { 50 + printk(KERN_ERR "acpi osc data array is full\n"); 51 + return AE_ERROR; 52 + } 53 + 54 + osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] |= (flags & OSC_SUPPORT_MASKS); 55 + 56 + /* do _OSC query for all possible controls */ 57 + temp = osc_data->ctrlset_buf[OSC_CONTROL_TYPE]; 58 + osc_data->ctrlset_buf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; 59 + osc_data->ctrlset_buf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS; 60 + 61 /* Setting up input parameters */ 62 input.count = 4; 63 input.pointer = in_params; ··· 51 in_params[2].integer.value = 3; 52 in_params[3].type = ACPI_TYPE_BUFFER; 53 in_params[3].buffer.length = 12; 54 + in_params[3].buffer.pointer = (u8 *)osc_data->ctrlset_buf; 55 56 status = acpi_evaluate_object(handle, "_OSC", &input, &output); 57 + if (ACPI_FAILURE(status)) 58 + goto out_nofree; 59 out_obj = output.pointer; 60 61 if (out_obj->type != ACPI_TYPE_BUFFER) { ··· 76 printk(KERN_DEBUG "_OSC invalid revision\n"); 77 if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { 78 /* Update Global Control Set */ 79 + osc_data->global_ctrlsets = 80 + *((u32 *)(out_obj->buffer.pointer + 8)); 81 status = AE_OK; 82 goto query_osc_out; 83 } ··· 85 } 86 87 /* Update Global Control Set */ 88 + osc_data->global_ctrlsets = *((u32 *)(out_obj->buffer.pointer + 8)); 89 status = AE_OK; 90 91 query_osc_out: 92 kfree(output.pointer); 93 + out_nofree: 94 *ret_status = status; 95 + 96 + osc_data->ctrlset_buf[OSC_QUERY_TYPE] = !OSC_QUERY_ENABLE; 97 + osc_data->ctrlset_buf[OSC_CONTROL_TYPE] = temp; 98 + if (ACPI_FAILURE(status)) { 99 + /* no osc support at all */ 100 + osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] = 0; 101 + } 102 + 103 return status; 104 } 105 ··· 165 **/ 166 acpi_status __pci_osc_support_set(u32 flags, const char *hid) 167 { 168 + acpi_status retval = AE_NOT_FOUND; 169 170 if (!(flags & OSC_SUPPORT_MASKS)) { 171 return AE_TYPE; 172 } 173 acpi_get_devices(hid, 174 acpi_query_osc, 175 + (void *)(unsigned long)flags, 176 (void **) &retval ); 177 return AE_OK; 178 } 179 ··· 201 { 202 acpi_status status; 203 u32 ctrlset; 204 + acpi_handle tmp; 205 + struct acpi_osc_data *osc_data; 206 + 207 + status = acpi_get_handle(handle, "_OSC", &tmp); 208 + if (ACPI_FAILURE(status)) 209 + return status; 210 + 211 + osc_data = acpi_get_osc_data(handle); 212 + if (!osc_data) { 213 + printk(KERN_ERR "acpi osc data array is full\n"); 214 + return AE_ERROR; 215 + } 216 217 ctrlset = (flags & OSC_CONTROL_MASKS); 218 if (!ctrlset) { 219 return AE_TYPE; 220 } 221 + if (osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] && 222 + ((osc_data->global_ctrlsets & ctrlset) != ctrlset)) { 223 return AE_SUPPORT; 224 } 225 + osc_data->ctrlset_buf[OSC_CONTROL_TYPE] |= ctrlset; 226 + status = acpi_run_osc(handle, osc_data->ctrlset_buf); 227 if (ACPI_FAILURE (status)) { 228 + osc_data->ctrlset_buf[OSC_CONTROL_TYPE] &= ~ctrlset; 229 } 230 231 return status;
+1
drivers/pci/quirks.c
··· 1826 } 1827 } 1828 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk); 1829 1830 static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev) 1831 {
··· 1826 } 1827 } 1828 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk); 1829 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk); 1830 1831 static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev) 1832 {