Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6

* 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (27 commits)
x86: allocate space within a region top-down
x86: update iomem_resource end based on CPU physical address capabilities
x86/PCI: allocate space from the end of a region, not the beginning
PCI: allocate bus resources from the top down
resources: support allocating space within a region from the top down
resources: handle overflow when aligning start of available area
resources: ensure callback doesn't allocate outside available space
resources: factor out resource_clip() to simplify find_resource()
resources: add a default alignf to simplify find_resource()
x86/PCI: MMCONFIG: fix region end calculation
PCI: Add support for polling PME state on suspended legacy PCI devices
PCI: Export some PCI PM functionality
PCI: fix message typo
PCI: log vendor/device ID always
PCI: update Intel chipset names and defines
PCI: use new ccflags variable in Makefile
PCI: add PCI_MSIX_TABLE/PBA defines
PCI: add PCI vendor id for STmicroelectronics
x86/PCI: irq and pci_ids patch for Intel Patsburg DeviceIDs
PCI: OLPC: Only enable PCI configuration type override on XO-1
...

+395 -70
+5
Documentation/kernel-parameters.txt
··· 2175 2175 reset_devices [KNL] Force drivers to reset the underlying device 2176 2176 during initialization. 2177 2177 2178 + resource_alloc_from_bottom 2179 + Allocate new resources from the beginning of available 2180 + space, not the end. If you need to use this, please 2181 + report a bug. 2182 + 2178 2183 resume= [SWSUSP] 2179 2184 Specify the partition device for software suspend 2180 2185
+2
arch/x86/kernel/setup.c
··· 769 769 770 770 x86_init.oem.arch_setup(); 771 771 772 + resource_alloc_from_bottom = 0; 773 + iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; 772 774 setup_memory_map(); 773 775 parse_setup_data(); 774 776 /* update the e820_saved too */
+11 -6
arch/x86/pci/i386.c
··· 65 65 resource_size_t size, resource_size_t align) 66 66 { 67 67 struct pci_dev *dev = data; 68 - resource_size_t start = res->start; 68 + resource_size_t start = round_down(res->end - size + 1, align); 69 69 70 70 if (res->flags & IORESOURCE_IO) { 71 - if (skip_isa_ioresource_align(dev)) 72 - return start; 73 - if (start & 0x300) 74 - start = (start + 0x3ff) & ~0x3ff; 71 + 72 + /* 73 + * If we're avoiding ISA aliases, the largest contiguous I/O 74 + * port space is 256 bytes. Clearing bits 9 and 10 preserves 75 + * all 256-byte and smaller alignments, so the result will 76 + * still be correctly aligned. 77 + */ 78 + if (!skip_isa_ioresource_align(dev)) 79 + start &= ~0x300; 75 80 } else if (res->flags & IORESOURCE_MEM) { 76 81 if (start < BIOS_END) 77 - start = BIOS_END; 82 + start = res->end; /* fail; no space */ 78 83 } 79 84 return start; 80 85 }
+6 -5
arch/x86/pci/irq.c
··· 584 584 case PCI_DEVICE_ID_INTEL_ICH9_3: 585 585 case PCI_DEVICE_ID_INTEL_ICH9_4: 586 586 case PCI_DEVICE_ID_INTEL_ICH9_5: 587 - case PCI_DEVICE_ID_INTEL_TOLAPAI_0: 587 + case PCI_DEVICE_ID_INTEL_EP80579_0: 588 588 case PCI_DEVICE_ID_INTEL_ICH10_0: 589 589 case PCI_DEVICE_ID_INTEL_ICH10_1: 590 590 case PCI_DEVICE_ID_INTEL_ICH10_2: 591 591 case PCI_DEVICE_ID_INTEL_ICH10_3: 592 + case PCI_DEVICE_ID_INTEL_PATSBURG_LPC: 592 593 r->name = "PIIX/ICH"; 593 594 r->get = pirq_piix_get; 594 595 r->set = pirq_piix_set; 595 596 return 1; 596 597 } 597 598 598 - if ((device >= PCI_DEVICE_ID_INTEL_PCH_LPC_MIN) && 599 - (device <= PCI_DEVICE_ID_INTEL_PCH_LPC_MAX)) { 599 + if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN) && 600 + (device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX)) { 600 601 r->name = "PIIX/ICH"; 601 602 r->get = pirq_piix_get; 602 603 r->set = pirq_piix_set; 603 604 return 1; 604 605 } 605 606 606 - if ((device >= PCI_DEVICE_ID_INTEL_CPT_LPC_MIN) && 607 - (device <= PCI_DEVICE_ID_INTEL_CPT_LPC_MAX)) { 607 + if ((device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN) && 608 + (device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX)) { 608 609 r->name = "PIIX/ICH"; 609 610 r->get = pirq_piix_get; 610 611 r->set = pirq_piix_set;
+1 -3
arch/x86/pci/mmconfig-shared.c
··· 65 65 int end, u64 addr) 66 66 { 67 67 struct pci_mmcfg_region *new; 68 - int num_buses; 69 68 struct resource *res; 70 69 71 70 if (addr == 0) ··· 81 82 82 83 list_add_sorted(new); 83 84 84 - num_buses = end - start + 1; 85 85 res = &new->res; 86 86 res->start = addr + PCI_MMCFG_BUS_OFFSET(start); 87 - res->end = addr + PCI_MMCFG_BUS_OFFSET(num_buses) - 1; 87 + res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1; 88 88 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 89 89 snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN, 90 90 "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
+2 -2
drivers/i2c/busses/Kconfig
··· 95 95 ESB2 96 96 ICH8 97 97 ICH9 98 - Tolapai 98 + EP80579 (Tolapai) 99 99 ICH10 100 - 3400/5 Series (PCH) 100 + 5/3400 Series (PCH) 101 101 Cougar Point (PCH) 102 102 103 103 This driver can also be built as a module. If so, the module
+5 -5
drivers/i2c/busses/i2c-i801.c
··· 38 38 82801G (ICH7) 0x27da 32 hard yes yes yes 39 39 82801H (ICH8) 0x283e 32 hard yes yes yes 40 40 82801I (ICH9) 0x2930 32 hard yes yes yes 41 - Tolapai 0x5032 32 hard yes yes yes 41 + EP80579 (Tolapai) 0x5032 32 hard yes yes yes 42 42 ICH10 0x3a30 32 hard yes yes yes 43 43 ICH10 0x3a60 32 hard yes yes yes 44 - 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes 44 + 5/3400 Series (PCH) 0x3b30 32 hard yes yes yes 45 45 Cougar Point (PCH) 0x1c22 32 hard yes yes yes 46 46 47 47 Features supported by this driver: ··· 587 587 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_17) }, 588 588 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) }, 589 589 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) }, 590 - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TOLAPAI_1) }, 590 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EP80579_1) }, 591 591 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) }, 592 592 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) }, 593 - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) }, 594 - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) }, 593 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS) }, 594 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS) }, 595 595 { 0, } 596 596 }; 597 597
+1 -3
drivers/pci/Makefile
··· 65 65 66 66 obj-$(CONFIG_PCI_STUB) += pci-stub.o 67 67 68 - ifeq ($(CONFIG_PCI_DEBUG),y) 69 - EXTRA_CFLAGS += -DDEBUG 70 - endif 68 + ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
+48 -5
drivers/pci/bus.c
··· 64 64 } 65 65 } 66 66 67 + /* 68 + * Find the highest-address bus resource below the cursor "res". If the 69 + * cursor is NULL, return the highest resource. 70 + */ 71 + static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus, 72 + unsigned int type, 73 + struct resource *res) 74 + { 75 + struct resource *r, *prev = NULL; 76 + int i; 77 + 78 + pci_bus_for_each_resource(bus, r, i) { 79 + if (!r) 80 + continue; 81 + 82 + if ((r->flags & IORESOURCE_TYPE_BITS) != type) 83 + continue; 84 + 85 + /* If this resource is at or past the cursor, skip it */ 86 + if (res) { 87 + if (r == res) 88 + continue; 89 + if (r->end > res->end) 90 + continue; 91 + if (r->end == res->end && r->start > res->start) 92 + continue; 93 + } 94 + 95 + if (!prev) 96 + prev = r; 97 + 98 + /* 99 + * A small resource is higher than a large one that ends at 100 + * the same address. 101 + */ 102 + if (r->end > prev->end || 103 + (r->end == prev->end && r->start > prev->start)) 104 + prev = r; 105 + } 106 + 107 + return prev; 108 + } 109 + 67 110 /** 68 111 * pci_bus_alloc_resource - allocate a resource from a parent bus 69 112 * @bus: PCI bus ··· 132 89 resource_size_t), 133 90 void *alignf_data) 134 91 { 135 - int i, ret = -ENOMEM; 92 + int ret = -ENOMEM; 136 93 struct resource *r; 137 94 resource_size_t max = -1; 95 + unsigned int type = res->flags & IORESOURCE_TYPE_BITS; 138 96 139 97 type_mask |= IORESOURCE_IO | IORESOURCE_MEM; 140 98 ··· 143 99 if (!(res->flags & IORESOURCE_MEM_64)) 144 100 max = PCIBIOS_MAX_MEM_32; 145 101 146 - pci_bus_for_each_resource(bus, r, i) { 147 - if (!r) 148 - continue; 149 - 102 + /* Look for space at highest addresses first */ 103 + r = pci_bus_find_resource_prev(bus, type, NULL); 104 + for ( ; r; r = pci_bus_find_resource_prev(bus, type, r)) { 150 105 /* type_mask must match */ 151 106 if ((res->flags ^ r->flags) & type_mask) 152 107 continue;
+2 -2
drivers/pci/hotplug/ibmphp_hpc.c
··· 133 133 debug ("%s - Entry\n", __func__); 134 134 135 135 mutex_init(&sem_hpcaccess); 136 - init_MUTEX (&semOperations); 137 - init_MUTEX_LOCKED (&sem_exit); 136 + sema_init(&semOperations, 1); 137 + sema_init(&sem_exit, 0); 138 138 to_debug = 0; 139 139 140 140 debug ("%s - Exit\n", __func__);
+2 -2
drivers/pci/msi.h
··· 22 22 #define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT)) 23 23 #define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT)) 24 24 25 - #define msix_table_offset_reg(base) (base + 0x04) 26 - #define msix_pba_offset_reg(base) (base + 0x08) 25 + #define msix_table_offset_reg(base) (base + PCI_MSIX_TABLE) 26 + #define msix_pba_offset_reg(base) (base + PCI_MSIX_PBA) 27 27 #define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1) 28 28 #define multi_msix_capable(control) msix_table_size((control)) 29 29
+78 -1
drivers/pci/pci.c
··· 38 38 39 39 unsigned int pci_pm_d3_delay; 40 40 41 + static void pci_pme_list_scan(struct work_struct *work); 42 + 43 + static LIST_HEAD(pci_pme_list); 44 + static DEFINE_MUTEX(pci_pme_list_mutex); 45 + static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan); 46 + 47 + struct pci_pme_device { 48 + struct list_head list; 49 + struct pci_dev *dev; 50 + }; 51 + 52 + #define PME_TIMEOUT 1000 /* How long between PME checks */ 53 + 41 54 static void pci_dev_d3_sleep(struct pci_dev *dev) 42 55 { 43 56 unsigned int delay = dev->d3_delay; ··· 1344 1331 return !!(dev->pme_support & (1 << state)); 1345 1332 } 1346 1333 1334 + static void pci_pme_list_scan(struct work_struct *work) 1335 + { 1336 + struct pci_pme_device *pme_dev; 1337 + 1338 + mutex_lock(&pci_pme_list_mutex); 1339 + if (!list_empty(&pci_pme_list)) { 1340 + list_for_each_entry(pme_dev, &pci_pme_list, list) 1341 + pci_pme_wakeup(pme_dev->dev, NULL); 1342 + schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT)); 1343 + } 1344 + mutex_unlock(&pci_pme_list_mutex); 1345 + } 1346 + 1347 + /** 1348 + * pci_external_pme - is a device an external PCI PME source? 1349 + * @dev: PCI device to check 1350 + * 1351 + */ 1352 + 1353 + static bool pci_external_pme(struct pci_dev *dev) 1354 + { 1355 + if (pci_is_pcie(dev) || dev->bus->number == 0) 1356 + return false; 1357 + return true; 1358 + } 1359 + 1347 1360 /** 1348 1361 * pci_pme_active - enable or disable PCI device's PME# function 1349 1362 * @dev: PCI device to handle. ··· 1393 1354 1394 1355 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 1395 1356 1357 + /* PCI (as opposed to PCIe) PME requires that the device have 1358 + its PME# line hooked up correctly. Not all hardware vendors 1359 + do this, so the PME never gets delivered and the device 1360 + remains asleep. The easiest way around this is to 1361 + periodically walk the list of suspended devices and check 1362 + whether any have their PME flag set. The assumption is that 1363 + we'll wake up often enough anyway that this won't be a huge 1364 + hit, and the power savings from the devices will still be a 1365 + win. */ 1366 + 1367 + if (pci_external_pme(dev)) { 1368 + struct pci_pme_device *pme_dev; 1369 + if (enable) { 1370 + pme_dev = kmalloc(sizeof(struct pci_pme_device), 1371 + GFP_KERNEL); 1372 + if (!pme_dev) 1373 + goto out; 1374 + pme_dev->dev = dev; 1375 + mutex_lock(&pci_pme_list_mutex); 1376 + list_add(&pme_dev->list, &pci_pme_list); 1377 + if (list_is_singular(&pci_pme_list)) 1378 + schedule_delayed_work(&pci_pme_work, 1379 + msecs_to_jiffies(PME_TIMEOUT)); 1380 + mutex_unlock(&pci_pme_list_mutex); 1381 + } else { 1382 + mutex_lock(&pci_pme_list_mutex); 1383 + list_for_each_entry(pme_dev, &pci_pme_list, list) { 1384 + if (pme_dev->dev == dev) { 1385 + list_del(&pme_dev->list); 1386 + kfree(pme_dev); 1387 + break; 1388 + } 1389 + } 1390 + mutex_unlock(&pci_pme_list_mutex); 1391 + } 1392 + } 1393 + 1394 + out: 1396 1395 dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n", 1397 1396 enable ? "enabled" : "disabled"); 1398 1397 } ··· 2766 2689 2767 2690 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 2768 2691 if (!ret) 2769 - ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); 2692 + ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); 2770 2693 2771 2694 return ret; 2772 2695 }
-3
drivers/pci/pci.h
··· 63 63 extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops); 64 64 extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state); 65 65 extern void pci_disable_enabled_device(struct pci_dev *dev); 66 - extern bool pci_check_pme_status(struct pci_dev *dev); 67 66 extern int pci_finish_runtime_suspend(struct pci_dev *dev); 68 - extern void pci_wakeup_event(struct pci_dev *dev); 69 67 extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign); 70 - extern void pci_pme_wakeup_bus(struct pci_bus *bus); 71 68 extern void pci_pm_init(struct pci_dev *dev); 72 69 extern void platform_pci_wakeup_init(struct pci_dev *dev); 73 70 extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
+1 -1
drivers/pci/pcie/aer/aerdrv.c
··· 416 416 */ 417 417 static int __init aer_service_init(void) 418 418 { 419 - if (!pci_aer_available()) 419 + if (!pci_aer_available() || aer_acpi_firmware_first()) 420 420 return -ENXIO; 421 421 return pcie_port_service_register(&aerdriver); 422 422 }
+3
drivers/pci/pcie/aer/aerdrv.h
··· 132 132 133 133 #ifdef CONFIG_ACPI_APEI 134 134 extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev); 135 + extern bool aer_acpi_firmware_first(void); 135 136 #else 136 137 static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev) 137 138 { ··· 140 139 return pci_dev->__aer_firmware_first; 141 140 return 0; 142 141 } 142 + 143 + static inline bool aer_acpi_firmware_first(void) { return false; } 143 144 #endif 144 145 145 146 static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
+34
drivers/pci/pcie/aer/aerdrv_acpi.c
··· 93 93 aer_set_firmware_first(dev); 94 94 return dev->__aer_firmware_first; 95 95 } 96 + 97 + static bool aer_firmware_first; 98 + 99 + static int aer_hest_parse_aff(struct acpi_hest_header *hest_hdr, void *data) 100 + { 101 + struct acpi_hest_aer_common *p; 102 + 103 + if (aer_firmware_first) 104 + return 0; 105 + 106 + switch (hest_hdr->type) { 107 + case ACPI_HEST_TYPE_AER_ROOT_PORT: 108 + case ACPI_HEST_TYPE_AER_ENDPOINT: 109 + case ACPI_HEST_TYPE_AER_BRIDGE: 110 + p = (struct acpi_hest_aer_common *)(hest_hdr + 1); 111 + aer_firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); 112 + default: 113 + return 0; 114 + } 115 + } 116 + 117 + /** 118 + * aer_acpi_firmware_first - Check if APEI should control AER. 119 + */ 120 + bool aer_acpi_firmware_first(void) 121 + { 122 + static bool parsed = false; 123 + 124 + if (!parsed) { 125 + apei_hest_parse(aer_hest_parse_aff, NULL); 126 + parsed = true; 127 + } 128 + return aer_firmware_first; 129 + } 96 130 #endif
+1 -1
drivers/pci/pcie/aer/aerdrv_core.c
··· 754 754 { 755 755 struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler); 756 756 struct pcie_device *p_device = rpc->rpd; 757 - struct aer_err_source e_src; 757 + struct aer_err_source uninitialized_var(e_src); 758 758 759 759 mutex_lock(&rpc->rpc_mutex); 760 760 while (get_e_source(rpc, &e_src))
+1 -1
drivers/pci/pcie/portdrv_acpi.c
··· 49 49 | OSC_PCI_EXPRESS_PME_CONTROL; 50 50 51 51 if (pci_aer_available()) { 52 - if (pcie_aer_get_firmware_first(port)) 52 + if (aer_acpi_firmware_first()) 53 53 dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n"); 54 54 else 55 55 flags |= OSC_PCI_EXPRESS_AER_CONTROL;
+2 -2
drivers/pci/probe.c
··· 961 961 dev->class = class; 962 962 class >>= 8; 963 963 964 - dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n", 965 - dev->vendor, dev->device, class, dev->hdr_type); 964 + dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %d class %#08x\n", 965 + dev->vendor, dev->device, dev->hdr_type, class); 966 966 967 967 /* need to have dev->class ready */ 968 968 dev->cfg_size = pci_cfg_space_size(dev);
+1
drivers/pci/proc.c
··· 303 303 .read = proc_bus_pci_read, 304 304 .write = proc_bus_pci_write, 305 305 .unlocked_ioctl = proc_bus_pci_ioctl, 306 + .compat_ioctl = proc_bus_pci_ioctl, 306 307 #ifdef HAVE_PCI_MMAP 307 308 .open = proc_bus_pci_open, 308 309 .release = proc_bus_pci_release,
+31
drivers/pci/quirks.c
··· 2297 2297 PCI_DEVICE_ID_NVIDIA_NVENET_15, 2298 2298 nvenet_msi_disable); 2299 2299 2300 + /* 2301 + * Some versions of the MCP55 bridge from nvidia have a legacy irq routing 2302 + * config register. This register controls the routing of legacy interrupts 2303 + * from devices that route through the MCP55. If this register is misprogramed 2304 + * interrupts are only sent to the bsp, unlike conventional systems where the 2305 + * irq is broadxast to all online cpus. Not having this register set 2306 + * properly prevents kdump from booting up properly, so lets make sure that 2307 + * we have it set correctly. 2308 + * Note this is an undocumented register. 2309 + */ 2310 + static void __devinit nvbridge_check_legacy_irq_routing(struct pci_dev *dev) 2311 + { 2312 + u32 cfg; 2313 + 2314 + pci_read_config_dword(dev, 0x74, &cfg); 2315 + 2316 + if (cfg & ((1 << 2) | (1 << 15))) { 2317 + printk(KERN_INFO "Rewriting irq routing register on MCP55\n"); 2318 + cfg &= ~((1 << 2) | (1 << 15)); 2319 + pci_write_config_dword(dev, 0x74, cfg); 2320 + } 2321 + } 2322 + 2323 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 2324 + PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0, 2325 + nvbridge_check_legacy_irq_routing); 2326 + 2327 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 2328 + PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4, 2329 + nvbridge_check_legacy_irq_routing); 2330 + 2300 2331 static int __devinit ht_check_msi_mapping(struct pci_dev *dev) 2301 2332 { 2302 2333 int pos, ttl = 48;
+1 -1
drivers/pci/setup-res.c
··· 85 85 } 86 86 } 87 87 res->flags &= ~IORESOURCE_UNSET; 88 - dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx]\n", 88 + dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n", 89 89 resno, res, (unsigned long long)region.start, 90 90 (unsigned long long)region.end); 91 91 }
+1
include/linux/ioport.h
··· 112 112 /* PC/ISA/whatever - the normal PC address spaces: IO and memory */ 113 113 extern struct resource ioport_resource; 114 114 extern struct resource iomem_resource; 115 + extern int resource_alloc_from_bottom; 115 116 116 117 extern struct resource *request_resource_conflict(struct resource *root, struct resource *new); 117 118 extern int request_resource(struct resource *root, struct resource *new);
+4 -1
include/linux/pci.h
··· 541 541 struct module; 542 542 struct pci_driver { 543 543 struct list_head node; 544 - char *name; 544 + const char *name; 545 545 const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */ 546 546 int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ 547 547 void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ ··· 819 819 int pci_prepare_to_sleep(struct pci_dev *dev); 820 820 int pci_back_from_sleep(struct pci_dev *dev); 821 821 bool pci_dev_run_wake(struct pci_dev *dev); 822 + bool pci_check_pme_status(struct pci_dev *dev); 823 + void pci_wakeup_event(struct pci_dev *dev); 824 + void pci_pme_wakeup_bus(struct pci_bus *bus); 822 825 823 826 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, 824 827 bool enable)
+13 -8
include/linux/pci_ids.h
··· 767 767 #define PCI_DEVICE_ID_ELSA_MICROLINK 0x1000 768 768 #define PCI_DEVICE_ID_ELSA_QS3000 0x3000 769 769 770 + #define PCI_VENDOR_ID_STMICRO 0x104A 771 + 770 772 #define PCI_VENDOR_ID_BUSLOGIC 0x104B 771 773 #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140 772 774 #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040 ··· 1253 1251 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2 0x0348 1254 1252 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000 0x034C 1255 1253 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E 1254 + #define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0 0x0360 1255 + #define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4 0x0364 1256 1256 #define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373 1257 1257 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7 1258 1258 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS 0x03EB ··· 2462 2458 #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 2463 2459 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 2464 2460 #define PCI_DEVICE_ID_INTEL_IOAT 0x1a38 2465 - #define PCI_DEVICE_ID_INTEL_CPT_SMBUS 0x1c22 2466 - #define PCI_DEVICE_ID_INTEL_CPT_LPC_MIN 0x1c41 2467 - #define PCI_DEVICE_ID_INTEL_CPT_LPC_MAX 0x1c5f 2461 + #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 2462 + #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN 0x1c41 2463 + #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f 2464 + #define PCI_DEVICE_ID_INTEL_PATSBURG_LPC 0x1d40 2468 2465 #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410 2469 2466 #define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411 2470 2467 #define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413 ··· 2674 2669 #define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a 2675 2670 #define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30 2676 2671 #define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60 2677 - #define PCI_DEVICE_ID_INTEL_PCH_LPC_MIN 0x3b00 2678 - #define PCI_DEVICE_ID_INTEL_PCH_LPC_MAX 0x3b1f 2679 - #define PCI_DEVICE_ID_INTEL_PCH_SMBUS 0x3b30 2672 + #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN 0x3b00 2673 + #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX 0x3b1f 2674 + #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 2680 2675 #define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f 2681 2676 #define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 2682 2677 #define PCI_DEVICE_ID_INTEL_5100_21 0x65f5 ··· 2685 2680 #define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035 2686 2681 #define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036 2687 2682 #define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff 2688 - #define PCI_DEVICE_ID_INTEL_TOLAPAI_0 0x5031 2689 - #define PCI_DEVICE_ID_INTEL_TOLAPAI_1 0x5032 2683 + #define PCI_DEVICE_ID_INTEL_EP80579_0 0x5031 2684 + #define PCI_DEVICE_ID_INTEL_EP80579_1 0x5032 2690 2685 #define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000 2691 2686 #define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010 2692 2687 #define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
+4 -2
include/linux/pci_regs.h
··· 300 300 #define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */ 301 301 #define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */ 302 302 303 - /* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */ 303 + /* MSI-X registers */ 304 304 #define PCI_MSIX_FLAGS 2 305 305 #define PCI_MSIX_FLAGS_QSIZE 0x7FF 306 306 #define PCI_MSIX_FLAGS_ENABLE (1 << 15) 307 307 #define PCI_MSIX_FLAGS_MASKALL (1 << 14) 308 - #define PCI_MSIX_FLAGS_BIRMASK (7 << 0) 308 + #define PCI_MSIX_TABLE 4 309 + #define PCI_MSIX_PBA 8 310 + #define PCI_MSIX_FLAGS_BIRMASK (7 << 0) 309 311 310 312 /* CompactPCI Hotswap Register */ 311 313
+135 -16
kernel/resource.c
··· 40 40 41 41 static DEFINE_RWLOCK(resource_lock); 42 42 43 + /* 44 + * By default, we allocate free space bottom-up. The architecture can request 45 + * top-down by clearing this flag. The user can override the architecture's 46 + * choice with the "resource_alloc_from_bottom" kernel boot option, but that 47 + * should only be a debugging tool. 48 + */ 49 + int resource_alloc_from_bottom = 1; 50 + 51 + static __init int setup_alloc_from_bottom(char *s) 52 + { 53 + printk(KERN_INFO 54 + "resource: allocating from bottom-up; please report a bug\n"); 55 + resource_alloc_from_bottom = 1; 56 + return 0; 57 + } 58 + early_param("resource_alloc_from_bottom", setup_alloc_from_bottom); 59 + 43 60 static void *r_next(struct seq_file *m, void *v, loff_t *pos) 44 61 { 45 62 struct resource *p = v; ··· 374 357 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; 375 358 } 376 359 360 + static resource_size_t simple_align_resource(void *data, 361 + const struct resource *avail, 362 + resource_size_t size, 363 + resource_size_t align) 364 + { 365 + return avail->start; 366 + } 367 + 368 + static void resource_clip(struct resource *res, resource_size_t min, 369 + resource_size_t max) 370 + { 371 + if (res->start < min) 372 + res->start = min; 373 + if (res->end > max) 374 + res->end = max; 375 + } 376 + 377 + static bool resource_contains(struct resource *res1, struct resource *res2) 378 + { 379 + return res1->start <= res2->start && res1->end >= res2->end; 380 + } 381 + 382 + /* 383 + * Find the resource before "child" in the sibling list of "root" children. 384 + */ 385 + static struct resource *find_sibling_prev(struct resource *root, struct resource *child) 386 + { 387 + struct resource *this; 388 + 389 + for (this = root->child; this; this = this->sibling) 390 + if (this->sibling == child) 391 + return this; 392 + 393 + return NULL; 394 + } 395 + 377 396 /* 378 397 * Find empty slot in the resource tree given range and alignment. 398 + * This version allocates from the end of the root resource first. 399 + */ 400 + static int find_resource_from_top(struct resource *root, struct resource *new, 401 + resource_size_t size, resource_size_t min, 402 + resource_size_t max, resource_size_t align, 403 + resource_size_t (*alignf)(void *, 404 + const struct resource *, 405 + resource_size_t, 406 + resource_size_t), 407 + void *alignf_data) 408 + { 409 + struct resource *this; 410 + struct resource tmp, avail, alloc; 411 + 412 + tmp.start = root->end; 413 + tmp.end = root->end; 414 + 415 + this = find_sibling_prev(root, NULL); 416 + for (;;) { 417 + if (this) { 418 + if (this->end < root->end) 419 + tmp.start = this->end + 1; 420 + } else 421 + tmp.start = root->start; 422 + 423 + resource_clip(&tmp, min, max); 424 + 425 + /* Check for overflow after ALIGN() */ 426 + avail = *new; 427 + avail.start = ALIGN(tmp.start, align); 428 + avail.end = tmp.end; 429 + if (avail.start >= tmp.start) { 430 + alloc.start = alignf(alignf_data, &avail, size, align); 431 + alloc.end = alloc.start + size - 1; 432 + if (resource_contains(&avail, &alloc)) { 433 + new->start = alloc.start; 434 + new->end = alloc.end; 435 + return 0; 436 + } 437 + } 438 + 439 + if (!this || this->start == root->start) 440 + break; 441 + 442 + tmp.end = this->start - 1; 443 + this = find_sibling_prev(root, this); 444 + } 445 + return -EBUSY; 446 + } 447 + 448 + /* 449 + * Find empty slot in the resource tree given range and alignment. 450 + * This version allocates from the beginning of the root resource first. 379 451 */ 380 452 static int find_resource(struct resource *root, struct resource *new, 381 453 resource_size_t size, resource_size_t min, ··· 476 370 void *alignf_data) 477 371 { 478 372 struct resource *this = root->child; 479 - struct resource tmp = *new; 373 + struct resource tmp = *new, avail, alloc; 480 374 481 375 tmp.start = root->start; 482 376 /* 483 - * Skip past an allocated resource that starts at 0, since the assignment 484 - * of this->start - 1 to tmp->end below would cause an underflow. 377 + * Skip past an allocated resource that starts at 0, since the 378 + * assignment of this->start - 1 to tmp->end below would cause an 379 + * underflow. 485 380 */ 486 381 if (this && this->start == 0) { 487 382 tmp.start = this->end + 1; 488 383 this = this->sibling; 489 384 } 490 - for(;;) { 385 + for (;;) { 491 386 if (this) 492 387 tmp.end = this->start - 1; 493 388 else 494 389 tmp.end = root->end; 495 - if (tmp.start < min) 496 - tmp.start = min; 497 - if (tmp.end > max) 498 - tmp.end = max; 499 - tmp.start = ALIGN(tmp.start, align); 500 - if (alignf) 501 - tmp.start = alignf(alignf_data, &tmp, size, align); 502 - if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) { 503 - new->start = tmp.start; 504 - new->end = tmp.start + size - 1; 505 - return 0; 390 + 391 + resource_clip(&tmp, min, max); 392 + 393 + /* Check for overflow after ALIGN() */ 394 + avail = *new; 395 + avail.start = ALIGN(tmp.start, align); 396 + avail.end = tmp.end; 397 + if (avail.start >= tmp.start) { 398 + alloc.start = alignf(alignf_data, &avail, size, align); 399 + alloc.end = alloc.start + size - 1; 400 + if (resource_contains(&avail, &alloc)) { 401 + new->start = alloc.start; 402 + new->end = alloc.end; 403 + return 0; 404 + } 506 405 } 406 + 507 407 if (!this) 508 408 break; 409 + 509 410 tmp.start = this->end + 1; 510 411 this = this->sibling; 511 412 } ··· 541 428 { 542 429 int err; 543 430 431 + if (!alignf) 432 + alignf = simple_align_resource; 433 + 544 434 write_lock(&resource_lock); 545 - err = find_resource(root, new, size, min, max, align, alignf, alignf_data); 435 + if (resource_alloc_from_bottom) 436 + err = find_resource(root, new, size, min, max, align, alignf, alignf_data); 437 + else 438 + err = find_resource_from_top(root, new, size, min, max, align, alignf, alignf_data); 546 439 if (err >= 0 && __request_resource(root, new)) 547 440 err = -EBUSY; 548 441 write_unlock(&resource_lock);