Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6

* 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (98 commits)
PCI PM: Put PM callbacks in the order of execution
PCI PM: Run default PM callbacks for all devices using new framework
PCI PM: Register power state of devices during initialization
PCI PM: Call pci_fixup_device from legacy routines
PCI PM: Rearrange code in pci-driver.c
PCI PM: Avoid touching devices behind bridges in unknown state
PCI PM: Move pci_has_legacy_pm_support
PCI PM: Power-manage devices without drivers during suspend-resume
PCI PM: Add suspend counterpart of pci_reenable_device
PCI PM: Fix poweroff and restore callbacks
PCI: Use msleep instead of cpu_relax during ASPM link retraining
PCI: PCIe portdrv: Add kerneldoc comments to remining core funtions
PCI: PCIe portdrv: Rearrange code so that related things are together
PCI: PCIe portdrv: Fix suspend and resume of PCI Express port services
PCI: PCIe portdrv: Add kerneldoc comments to some core functions
x86/PCI: Do not use interrupt links for devices using MSI-X
net: sfc: Use pci_clear_master() to disable bus mastering
PCI: Add pci_clear_master() as opposite of pci_set_master()
PCI hotplug: remove redundant test in cpq hotplug
PCI: pciehp: cleanup register and field definitions
...

+2193 -1375
+2 -1
Documentation/PCI/pci.txt
··· 294 294 295 295 pci_set_master() will enable DMA by setting the bus master bit 296 296 in the PCI_COMMAND register. It also fixes the latency timer value if 297 - it's set to something bogus by the BIOS. 297 + it's set to something bogus by the BIOS. pci_clear_master() will 298 + disable DMA by clearing the bus master bit. 298 299 299 300 If the PCI device can use the PCI Memory-Write-Invalidate transaction, 300 301 call pci_set_mwi(). This enables the PCI_COMMAND bit for Mem-Wr-Inval
+4
Documentation/kernel-parameters.txt
··· 919 919 920 920 inttest= [IA64] 921 921 922 + iomem= Disable strict checking of access to MMIO memory 923 + strict regions from userspace. 924 + relaxed 925 + 922 926 iommu= [x86] 923 927 off 924 928 force
-18
arch/alpha/kernel/pci.c
··· 320 320 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); 321 321 } 322 322 323 - /* Most Alphas have straight-forward swizzling needs. */ 324 - 325 - u8 __init 326 - common_swizzle(struct pci_dev *dev, u8 *pinp) 327 - { 328 - u8 pin = *pinp; 329 - 330 - while (dev->bus->parent) { 331 - pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); 332 - /* Move up the chain of bridges. */ 333 - dev = dev->bus->self; 334 - } 335 - *pinp = pin; 336 - 337 - /* The slot is the slot of the last bridge. */ 338 - return PCI_SLOT(dev->devfn); 339 - } 340 - 341 323 void 342 324 pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, 343 325 struct resource *res)
+4 -9
arch/alpha/kernel/pci_impl.h
··· 106 106 * Where A = pin 1, B = pin 2 and so on and pin=0 = default = A. 107 107 * Thus, each swizzle is ((pin-1) + (device#-4)) % 4 108 108 * 109 - * The following code swizzles for exactly one bridge. The routine 110 - * common_swizzle below handles multiple bridges. But there are a 111 - * couple boards that do strange things, so we define this here. 109 + * pci_swizzle_interrupt_pin() swizzles for exactly one bridge. The routine 110 + * pci_common_swizzle() handles multiple bridges. But there are a 111 + * couple boards that do strange things. 112 112 */ 113 - 114 - static inline u8 bridge_swizzle(u8 pin, u8 slot) 115 - { 116 - return (((pin-1) + slot) % 4) + 1; 117 - } 118 113 119 114 120 115 /* The following macro is used to implement the table-based irq mapping ··· 179 184 extern unsigned long alpha_agpgart_size; 180 185 181 186 extern void common_init_pci(void); 182 - extern u8 common_swizzle(struct pci_dev *, u8 *); 187 + #define common_swizzle pci_common_swizzle 183 188 extern struct pci_controller *alloc_pci_controller(void); 184 189 extern struct resource *alloc_resource(void); 185 190
+1 -1
arch/alpha/kernel/sys_dp264.c
··· 481 481 slot = PCI_SLOT(dev->devfn); 482 482 break; 483 483 } 484 - pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ; 484 + pin = pci_swizzle_interrupt_pin(dev, pin); 485 485 486 486 /* Move up the chain of bridges. */ 487 487 dev = dev->bus->self;
+1 -1
arch/alpha/kernel/sys_eiger.c
··· 204 204 break; 205 205 } 206 206 /* Must be a card-based bridge. */ 207 - pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); 207 + pin = pci_swizzle_interrupt_pin(dev, pin); 208 208 209 209 /* Move up the chain of bridges. */ 210 210 dev = dev->bus->self;
+1 -1
arch/alpha/kernel/sys_miata.c
··· 219 219 slot = PCI_SLOT(dev->devfn) + 9; 220 220 break; 221 221 } 222 - pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); 222 + pin = pci_swizzle_interrupt_pin(dev, pin); 223 223 224 224 /* Move up the chain of bridges. */ 225 225 dev = dev->bus->self;
+1 -1
arch/alpha/kernel/sys_noritake.c
··· 257 257 slot = PCI_SLOT(dev->devfn) + 15; 258 258 break; 259 259 } 260 - pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ; 260 + pin = pci_swizzle_interrupt_pin(dev, pin); 261 261 262 262 /* Move up the chain of bridges. */ 263 263 dev = dev->bus->self;
+1 -1
arch/alpha/kernel/sys_ruffian.c
··· 160 160 slot = PCI_SLOT(dev->devfn) + 10; 161 161 break; 162 162 } 163 - pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); 163 + pin = pci_swizzle_interrupt_pin(dev, pin); 164 164 165 165 /* Move up the chain of bridges. */ 166 166 dev = dev->bus->self;
+1 -1
arch/alpha/kernel/sys_sable.c
··· 425 425 slot = PCI_SLOT(dev->devfn) + 11; 426 426 break; 427 427 } 428 - pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ; 428 + pin = pci_swizzle_interrupt_pin(dev, pin); 429 429 430 430 /* Move up the chain of bridges. */ 431 431 dev = dev->bus->self;
+1 -1
arch/arm/include/asm/mach/pci.h
··· 42 42 /* 43 43 * This is the standard PCI-PCI bridge swizzling algorithm. 44 44 */ 45 - u8 pci_std_swizzle(struct pci_dev *dev, u8 *pinp); 45 + #define pci_std_swizzle pci_common_swizzle 46 46 47 47 /* 48 48 * Call this with your hw_pci struct to initialise the PCI system.
-27
arch/arm/kernel/bios32.c
··· 480 480 #endif 481 481 482 482 /* 483 - * This is the standard PCI-PCI bridge swizzling algorithm: 484 - * 485 - * Dev: 0 1 2 3 486 - * A A B C D 487 - * B B C D A 488 - * C C D A B 489 - * D D A B C 490 - * ^^^^^^^^^^ irq pin on bridge 491 - */ 492 - u8 __devinit pci_std_swizzle(struct pci_dev *dev, u8 *pinp) 493 - { 494 - int pin = *pinp - 1; 495 - 496 - while (dev->bus->self) { 497 - pin = (pin + PCI_SLOT(dev->devfn)) & 3; 498 - /* 499 - * move up the chain of bridges, 500 - * swizzling as we go. 501 - */ 502 - dev = dev->bus->self; 503 - } 504 - *pinp = pin + 1; 505 - 506 - return PCI_SLOT(dev->devfn); 507 - } 508 - 509 - /* 510 483 * Swizzle the device pin each time we cross a bridge. 511 484 * This might update pin and returns the slot number. 512 485 */
+2 -9
arch/arm/mach-integrator/pci.c
··· 63 63 * 64 64 * Where A = pin 1, B = pin 2 and so on and pin=0 = default = A. 65 65 * Thus, each swizzle is ((pin-1) + (device#-4)) % 4 66 - * 67 - * The following code swizzles for exactly one bridge. 68 66 */ 69 - static inline int bridge_swizzle(int pin, unsigned int slot) 70 - { 71 - return (pin + slot) & 3; 72 - } 73 67 74 68 /* 75 69 * This routine handles multiple bridges. ··· 75 81 if (pin == 0) 76 82 pin = 1; 77 83 78 - pin -= 1; 79 84 while (dev->bus->self) { 80 - pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); 85 + pin = pci_swizzle_interrupt_pin(dev, pin); 81 86 /* 82 87 * move up the chain of bridges, swizzling as we go. 83 88 */ 84 89 dev = dev->bus->self; 85 90 } 86 - *pinp = pin + 1; 91 + *pinp = pin; 87 92 88 93 return PCI_SLOT(dev->devfn); 89 94 }
-6
arch/mips/pci/pci-ip27.c
··· 146 146 return 0; 147 147 } 148 148 149 - /* Most MIPS systems have straight-forward swizzling needs. */ 150 - static inline u8 bridge_swizzle(u8 pin, u8 slot) 151 - { 152 - return (((pin - 1) + slot) % 4) + 1; 153 - } 154 - 155 149 static inline struct pci_dev *bridge_root_dev(struct pci_dev *dev) 156 150 { 157 151 while (dev->bus->parent) {
+1 -23
arch/mips/pci/pci.c
··· 149 149 "Skipping PCI bus scan due to resource conflict\n"); 150 150 } 151 151 152 - /* Most MIPS systems have straight-forward swizzling needs. */ 153 - 154 - static inline u8 bridge_swizzle(u8 pin, u8 slot) 155 - { 156 - return (((pin - 1) + slot) % 4) + 1; 157 - } 158 - 159 - static u8 __init common_swizzle(struct pci_dev *dev, u8 *pinp) 160 - { 161 - u8 pin = *pinp; 162 - 163 - while (dev->bus->parent) { 164 - pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); 165 - /* Move up the chain of bridges. */ 166 - dev = dev->bus->self; 167 - } 168 - *pinp = pin; 169 - 170 - /* The slot is the slot of the last bridge. */ 171 - return PCI_SLOT(dev->devfn); 172 - } 173 - 174 152 static int __init pcibios_init(void) 175 153 { 176 154 struct pci_controller *hose; ··· 157 179 for (hose = hose_head; hose; hose = hose->next) 158 180 pcibios_scanbus(hose); 159 181 160 - pci_fixup_irqs(common_swizzle, pcibios_map_irq); 182 + pci_fixup_irqs(pci_common_swizzle, pcibios_map_irq); 161 183 162 184 pci_initialized = 1; 163 185
+1 -6
arch/powerpc/kernel/prom_parse.c
··· 232 232 } 233 233 EXPORT_SYMBOL_GPL(of_pci_address_to_resource); 234 234 235 - static u8 of_irq_pci_swizzle(u8 slot, u8 pin) 236 - { 237 - return (((pin - 1) + slot) % 4) + 1; 238 - } 239 - 240 235 int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) 241 236 { 242 237 struct device_node *dn, *ppnode; ··· 301 306 /* We can only get here if we hit a P2P bridge with no node, 302 307 * let's do standard swizzling and try again 303 308 */ 304 - lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec); 309 + lspec = pci_swizzle_interrupt_pin(pdev, lspec); 305 310 pdev = ppdev; 306 311 } 307 312
+2 -7
arch/sh/drivers/pci/ops-cayman.c
··· 5 5 #include <cpu/irq.h> 6 6 #include "pci-sh5.h" 7 7 8 - static inline u8 bridge_swizzle(u8 pin, u8 slot) 9 - { 10 - return (((pin - 1) + slot) % 4) + 1; 11 - } 12 - 13 8 int __init pcibios_map_platform_irq(struct pci_dev *dev, u8 slot, u8 pin) 14 9 { 15 10 int result = -1; ··· 37 42 while (dev->bus->number > 0) { 38 43 39 44 slot = path[i].slot = PCI_SLOT(dev->devfn); 40 - pin = path[i].pin = bridge_swizzle(pin, slot); 45 + pin = path[i].pin = pci_swizzle_interrupt_pin(dev, pin); 41 46 dev = dev->bus->self; 42 47 i++; 43 48 if (i > 3) panic("PCI path to root bus too long!\n"); ··· 51 56 if ((slot < 3) || (i == 0)) { 52 57 /* Bus 0 (incl. PCI-PCI bridge itself) : perform the final 53 58 swizzle now. */ 54 - result = IRQ_INTA + bridge_swizzle(pin, slot) - 1; 59 + result = IRQ_INTA + pci_swizzle_interrupt_pin(dev, pin) - 1; 55 60 } else { 56 61 i--; 57 62 slot = path[i].slot;
+1 -21
arch/sh/drivers/pci/pci.c
··· 21 21 #include <linux/init.h> 22 22 #include <asm/io.h> 23 23 24 - static inline u8 bridge_swizzle(u8 pin, u8 slot) 25 - { 26 - return (((pin - 1) + slot) % 4) + 1; 27 - } 28 - 29 - static u8 __init simple_swizzle(struct pci_dev *dev, u8 *pinp) 30 - { 31 - u8 pin = *pinp; 32 - 33 - while (dev->bus->parent) { 34 - pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); 35 - /* Move up the chain of bridges. */ 36 - dev = dev->bus->self; 37 - } 38 - *pinp = pin; 39 - 40 - /* The slot is the slot of the last bridge. */ 41 - return PCI_SLOT(dev->devfn); 42 - } 43 - 44 24 static int __init pcibios_init(void) 45 25 { 46 26 struct pci_channel *p; ··· 41 61 busno = bus->subordinate + 1; 42 62 } 43 63 44 - pci_fixup_irqs(simple_swizzle, pcibios_map_platform_irq); 64 + pci_fixup_irqs(pci_common_swizzle, pcibios_map_platform_irq); 45 65 46 66 return 0; 47 67 }
+1 -1
arch/x86/kernel/pci-dma.c
··· 38 38 be probably a smaller DMA mask, but this is bug-to-bug compatible 39 39 to older i386. */ 40 40 struct device x86_dma_fallback_dev = { 41 - .bus_id = "fallback device", 41 + .init_name = "fallback device", 42 42 .coherent_dma_mask = DMA_32BIT_MASK, 43 43 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask, 44 44 };
+2
arch/x86/mm/init_32.c
··· 328 328 { 329 329 if (pagenr <= 256) 330 330 return 1; 331 + if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) 332 + return 0; 331 333 if (!page_is_ram(pagenr)) 332 334 return 1; 333 335 return 0;
+2
arch/x86/mm/init_64.c
··· 888 888 { 889 889 if (pagenr <= 256) 890 890 return 1; 891 + if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) 892 + return 0; 891 893 if (!page_is_ram(pagenr)) 892 894 return 1; 893 895 return 0;
+3 -4
arch/x86/pci/acpi.c
··· 210 210 if (bus && node != -1) { 211 211 #ifdef CONFIG_ACPI_NUMA 212 212 if (pxm >= 0) 213 - printk(KERN_DEBUG "bus %02x -> pxm %d -> node %d\n", 214 - busnum, pxm, node); 213 + dev_printk(KERN_DEBUG, &bus->dev, 214 + "on NUMA node %d (pxm %d)\n", node, pxm); 215 215 #else 216 - printk(KERN_DEBUG "bus %02x -> node %d\n", 217 - busnum, node); 216 + dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node); 218 217 #endif 219 218 } 220 219
+10 -2
arch/x86/pci/common.c
··· 551 551 if ((err = pci_enable_resources(dev, mask)) < 0) 552 552 return err; 553 553 554 - if (!dev->msi_enabled) 554 + if (!pci_dev_msi_enabled(dev)) 555 555 return pcibios_enable_irq(dev); 556 556 return 0; 557 557 } 558 558 559 559 void pcibios_disable_device (struct pci_dev *dev) 560 560 { 561 - if (!dev->msi_enabled && pcibios_disable_irq) 561 + if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq) 562 562 pcibios_disable_irq(dev); 563 + } 564 + 565 + int pci_ext_cfg_avail(struct pci_dev *dev) 566 + { 567 + if (raw_pci_ext_ops) 568 + return 1; 569 + else 570 + return 0; 563 571 } 564 572 565 573 struct pci_bus * __devinit pci_scan_bus_on_node(int busno, struct pci_ops *ops, int node)
+2 -2
arch/x86/pci/i386.c
··· 129 129 pr = pci_find_parent_resource(dev, r); 130 130 if (!r->start || !pr || 131 131 request_resource(pr, r) < 0) { 132 - dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx); 132 + dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx); 133 133 /* 134 134 * Something is wrong with the region. 135 135 * Invalidate the resource to prevent ··· 170 170 r->flags, disabled, pass); 171 171 pr = pci_find_parent_resource(dev, r); 172 172 if (!pr || request_resource(pr, r) < 0) { 173 - dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx); 173 + dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx); 174 174 /* We'll assign a new address later */ 175 175 r->end -= r->start; 176 176 r->start = 0;
+2 -1
arch/x86/pci/init.c
··· 12 12 type = pci_direct_probe(); 13 13 #endif 14 14 15 - pci_mmcfg_early_init(); 15 + if (!(pci_probe & PCI_PROBE_NOEARLY)) 16 + pci_mmcfg_early_init(); 16 17 17 18 #ifdef CONFIG_PCI_OLPC 18 19 if (!pci_olpc_init())
+25 -29
arch/x86/pci/irq.c
··· 533 533 { 534 534 struct pci_dev *bridge; 535 535 int pin = pci_get_interrupt_pin(dev, &bridge); 536 - return pcibios_set_irq_routing(bridge, pin, irq); 536 + return pcibios_set_irq_routing(bridge, pin - 1, irq); 537 537 } 538 538 539 539 #endif ··· 887 887 dev_dbg(&dev->dev, "no interrupt pin\n"); 888 888 return 0; 889 889 } 890 - pin = pin - 1; 891 890 892 891 /* Find IRQ routing entry */ 893 892 ··· 896 897 info = pirq_get_info(dev); 897 898 if (!info) { 898 899 dev_dbg(&dev->dev, "PCI INT %c not found in routing table\n", 899 - 'A' + pin); 900 + 'A' + pin - 1); 900 901 return 0; 901 902 } 902 - pirq = info->irq[pin].link; 903 - mask = info->irq[pin].bitmap; 903 + pirq = info->irq[pin - 1].link; 904 + mask = info->irq[pin - 1].bitmap; 904 905 if (!pirq) { 905 - dev_dbg(&dev->dev, "PCI INT %c not routed\n", 'A' + pin); 906 + dev_dbg(&dev->dev, "PCI INT %c not routed\n", 'A' + pin - 1); 906 907 return 0; 907 908 } 908 909 dev_dbg(&dev->dev, "PCI INT %c -> PIRQ %02x, mask %04x, excl %04x", 909 - 'A' + pin, pirq, mask, pirq_table->exclusive_irqs); 910 + 'A' + pin - 1, pirq, mask, pirq_table->exclusive_irqs); 910 911 mask &= pcibios_irq_mask; 911 912 912 913 /* Work around broken HP Pavilion Notebooks which assign USB to ··· 948 949 newirq = i; 949 950 } 950 951 } 951 - dev_dbg(&dev->dev, "PCI INT %c -> newirq %d", 'A' + pin, newirq); 952 + dev_dbg(&dev->dev, "PCI INT %c -> newirq %d", 'A' + pin - 1, newirq); 952 953 953 954 /* Check if it is hardcoded */ 954 955 if ((pirq & 0xf0) == 0xf0) { ··· 976 977 return 0; 977 978 } 978 979 } 979 - dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n", msg, 'A' + pin, irq); 980 + dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n", msg, 'A' + pin - 1, irq); 980 981 981 982 /* Update IRQ for all devices with the same pirq value */ 982 983 while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) { 983 984 pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin); 984 985 if (!pin) 985 986 continue; 986 - pin--; 987 + 987 988 info = pirq_get_info(dev2); 988 989 if (!info) 989 990 continue; 990 - if (info->irq[pin].link == pirq) { 991 + if (info->irq[pin - 1].link == pirq) { 991 992 /* 992 993 * We refuse to override the dev->irq 993 994 * information. Give a warning! ··· 1041 1042 dev = NULL; 1042 1043 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 1043 1044 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); 1045 + if (!pin) 1046 + continue; 1047 + 1044 1048 #ifdef CONFIG_X86_IO_APIC 1045 1049 /* 1046 1050 * Recalculate IRQ numbers if we use the I/O APIC. ··· 1051 1049 if (io_apic_assign_pci_irqs) { 1052 1050 int irq; 1053 1051 1054 - if (!pin) 1055 - continue; 1056 - 1057 1052 /* 1058 1053 * interrupt pins are numbered starting from 1 1059 1054 */ 1060 - pin--; 1061 1055 irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, 1062 - PCI_SLOT(dev->devfn), pin); 1056 + PCI_SLOT(dev->devfn), pin - 1); 1063 1057 /* 1064 1058 * Busses behind bridges are typically not listed in the 1065 1059 * MP-table. In this case we have to look up the IRQ ··· 1068 1070 struct pci_dev *bridge = dev->bus->self; 1069 1071 int bus; 1070 1072 1071 - pin = (pin + PCI_SLOT(dev->devfn)) % 4; 1073 + pin = pci_swizzle_interrupt_pin(dev, pin); 1072 1074 bus = bridge->bus->number; 1073 1075 irq = IO_APIC_get_PCI_irq_vector(bus, 1074 - PCI_SLOT(bridge->devfn), pin); 1076 + PCI_SLOT(bridge->devfn), pin - 1); 1075 1077 if (irq >= 0) 1076 1078 dev_warn(&dev->dev, 1077 1079 "using bridge %s INT %c to " 1078 1080 "get IRQ %d\n", 1079 1081 pci_name(bridge), 1080 - 'A' + pin, irq); 1082 + 'A' + pin - 1, irq); 1081 1083 } 1082 1084 if (irq >= 0) { 1083 1085 dev_info(&dev->dev, 1084 1086 "PCI->APIC IRQ transform: INT %c " 1085 1087 "-> IRQ %d\n", 1086 - 'A' + pin, irq); 1088 + 'A' + pin - 1, irq); 1087 1089 dev->irq = irq; 1088 1090 } 1089 1091 } ··· 1091 1093 /* 1092 1094 * Still no IRQ? Try to lookup one... 1093 1095 */ 1094 - if (pin && !dev->irq) 1096 + if (!dev->irq) 1095 1097 pcibios_lookup_irq(dev, 0); 1096 1098 } 1097 1099 } ··· 1218 1220 if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) { 1219 1221 char *msg = ""; 1220 1222 1221 - pin--; /* interrupt pins are numbered starting from 1 */ 1222 - 1223 1223 if (io_apic_assign_pci_irqs) { 1224 1224 int irq; 1225 1225 1226 - irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin); 1226 + irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin - 1); 1227 1227 /* 1228 1228 * Busses behind bridges are typically not listed in the MP-table. 1229 1229 * In this case we have to look up the IRQ based on the parent bus, ··· 1232 1236 while (irq < 0 && dev->bus->parent) { /* go back to the bridge */ 1233 1237 struct pci_dev *bridge = dev->bus->self; 1234 1238 1235 - pin = (pin + PCI_SLOT(dev->devfn)) % 4; 1239 + pin = pci_swizzle_interrupt_pin(dev, pin); 1236 1240 irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, 1237 - PCI_SLOT(bridge->devfn), pin); 1241 + PCI_SLOT(bridge->devfn), pin - 1); 1238 1242 if (irq >= 0) 1239 1243 dev_warn(&dev->dev, "using bridge %s " 1240 1244 "INT %c to get IRQ %d\n", 1241 - pci_name(bridge), 'A' + pin, 1245 + pci_name(bridge), 'A' + pin - 1, 1242 1246 irq); 1243 1247 dev = bridge; 1244 1248 } 1245 1249 dev = temp_dev; 1246 1250 if (irq >= 0) { 1247 1251 dev_info(&dev->dev, "PCI->APIC IRQ transform: " 1248 - "INT %c -> IRQ %d\n", 'A' + pin, irq); 1252 + "INT %c -> IRQ %d\n", 'A' + pin - 1, irq); 1249 1253 dev->irq = irq; 1250 1254 return 0; 1251 1255 } else ··· 1264 1268 return 0; 1265 1269 1266 1270 dev_warn(&dev->dev, "can't find IRQ for PCI INT %c%s\n", 1267 - 'A' + pin, msg); 1271 + 'A' + pin - 1, msg); 1268 1272 } 1269 1273 return 0; 1270 1274 }
+1 -19
arch/x86/pci/visws.c
··· 24 24 25 25 unsigned int pci_bus0, pci_bus1; 26 26 27 - static inline u8 bridge_swizzle(u8 pin, u8 slot) 28 - { 29 - return (((pin - 1) + slot) % 4) + 1; 30 - } 31 - 32 - static u8 __init visws_swizzle(struct pci_dev *dev, u8 *pinp) 33 - { 34 - u8 pin = *pinp; 35 - 36 - while (dev->bus->self) { /* Move up the chain of bridges. */ 37 - pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); 38 - dev = dev->bus->self; 39 - } 40 - *pinp = pin; 41 - 42 - return PCI_SLOT(dev->devfn); 43 - } 44 - 45 27 static int __init visws_map_irq(struct pci_dev *dev, u8 slot, u8 pin) 46 28 { 47 29 int irq, bus = dev->bus->number; ··· 88 106 raw_pci_ops = &pci_direct_conf1; 89 107 pci_scan_bus_with_sysdata(pci_bus0); 90 108 pci_scan_bus_with_sysdata(pci_bus1); 91 - pci_fixup_irqs(visws_swizzle, visws_map_irq); 109 + pci_fixup_irqs(pci_common_swizzle, visws_map_irq); 92 110 pcibios_resource_survey(); 93 111 return 0; 94 112 }
+20
drivers/acpi/pci_root.c
··· 31 31 #include <linux/spinlock.h> 32 32 #include <linux/pm.h> 33 33 #include <linux/pci.h> 34 + #include <linux/pci-acpi.h> 34 35 #include <linux/acpi.h> 35 36 #include <acpi/acpi_bus.h> 36 37 #include <acpi/acpi_drivers.h> ··· 194 193 unsigned long long value = 0; 195 194 acpi_handle handle = NULL; 196 195 struct acpi_device *child; 196 + u32 flags, base_flags; 197 197 198 198 199 199 if (!device) ··· 211 209 device->driver_data = root; 212 210 213 211 device->ops.bind = acpi_pci_bind; 212 + 213 + /* 214 + * All supported architectures that use ACPI have support for 215 + * PCI domains, so we indicate this in _OSC support capabilities. 216 + */ 217 + flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; 218 + pci_acpi_osc_support(device->handle, flags); 214 219 215 220 /* 216 221 * Segment ··· 343 334 */ 344 335 list_for_each_entry(child, &device->children, node) 345 336 acpi_pci_bridge_scan(child); 337 + 338 + /* Indicate support for various _OSC capabilities. */ 339 + if (pci_ext_cfg_avail(root->bus->self)) 340 + flags |= OSC_EXT_PCI_CONFIG_SUPPORT; 341 + if (pcie_aspm_enabled()) 342 + flags |= OSC_ACTIVE_STATE_PWR_SUPPORT | 343 + OSC_CLOCK_PWR_CAPABILITY_SUPPORT; 344 + if (pci_msi_enabled()) 345 + flags |= OSC_MSI_SUPPORT; 346 + if (flags != base_flags) 347 + pci_acpi_osc_support(device->handle, flags); 346 348 347 349 end: 348 350 if (result) {
+1 -1
drivers/net/e1000e/netdev.c
··· 4807 4807 } 4808 4808 } 4809 4809 4810 - err = pci_request_selected_regions(pdev, 4810 + err = pci_request_selected_regions_exclusive(pdev, 4811 4811 pci_select_bars(pdev, IORESOURCE_MEM), 4812 4812 e1000e_driver_name); 4813 4813 if (err)
+2 -2
drivers/net/sfc/falcon.c
··· 1403 1403 } 1404 1404 1405 1405 /* Disable both devices */ 1406 - pci_disable_device(efx->pci_dev); 1406 + pci_clear_master(efx->pci_dev); 1407 1407 if (FALCON_IS_DUAL_FUNC(efx)) 1408 - pci_disable_device(nic_data->pci_dev2); 1408 + pci_clear_master(nic_data->pci_dev2); 1409 1409 falcon_disable_interrupts(efx); 1410 1410 1411 1411 if (++n_int_errors < FALCON_MAX_INT_ERRORS) {
+2 -2
drivers/parisc/dino.c
··· 547 547 ** The additional "-1" adjusts for skewing the IRQ<->slot. 548 548 */ 549 549 dino_cfg_read(dev->bus, dev->devfn, PCI_INTERRUPT_PIN, 1, &irq_pin); 550 - dev->irq = (irq_pin + PCI_SLOT(dev->devfn) - 1) % 4 ; 550 + dev->irq = pci_swizzle_interrupt_pin(dev, irq_pin) - 1; 551 551 552 552 /* Shouldn't really need to do this but it's in case someone tries 553 553 ** to bypass PCI services and look at the card themselves. ··· 672 672 673 673 dino_cfg_read(dev->bus, dev->devfn, 674 674 PCI_INTERRUPT_PIN, 1, &irq_pin); 675 - irq_pin = (irq_pin + PCI_SLOT(dev->devfn) - 1) % 4 ; 675 + irq_pin = pci_swizzle_interrupt_pin(dev, irq_pin) - 1; 676 676 printk(KERN_WARNING "Device %s has undefined IRQ, " 677 677 "setting to %d\n", pci_name(dev), irq_pin); 678 678 dino_cfg_write(dev->bus, dev->devfn,
+1 -2
drivers/parisc/iosapic.c
··· 519 519 ** 520 520 ** Advantage is it's really easy to implement. 521 521 */ 522 - intr_pin = ((intr_pin-1)+PCI_SLOT(pcidev->devfn)) % 4; 523 - intr_pin++; /* convert back to INTA-D (1-4) */ 522 + intr_pin = pci_swizzle_interrupt_pin(pcidev, intr_pin); 524 523 #endif /* PCI_BRIDGE_FUNCS */ 525 524 526 525 /*
+9
drivers/pci/Kconfig
··· 42 42 43 43 When in doubt, say N. 44 44 45 + config PCI_STUB 46 + tristate "PCI Stub driver" 47 + depends on PCI 48 + help 49 + Say Y or M here if you want be able to reserve a PCI device 50 + when it is going to be assigned to a guest operating system. 51 + 52 + When in doubt, say N. 53 + 45 54 config HT_IRQ 46 55 bool "Interrupts on hypertransport devices" 47 56 default y
+2
drivers/pci/Makefile
··· 53 53 54 54 obj-$(CONFIG_PCI_SYSCALL) += syscall.o 55 55 56 + obj-$(CONFIG_PCI_STUB) += pci-stub.o 57 + 56 58 ifeq ($(CONFIG_PCI_DEBUG),y) 57 59 EXTRA_CFLAGS += -DDEBUG 58 60 endif
+150 -74
drivers/pci/access.c
··· 66 66 EXPORT_SYMBOL(pci_bus_write_config_word); 67 67 EXPORT_SYMBOL(pci_bus_write_config_dword); 68 68 69 + 70 + /** 71 + * pci_read_vpd - Read one entry from Vital Product Data 72 + * @dev: pci device struct 73 + * @pos: offset in vpd space 74 + * @count: number of bytes to read 75 + * @buf: pointer to where to store result 76 + * 77 + */ 78 + ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf) 79 + { 80 + if (!dev->vpd || !dev->vpd->ops) 81 + return -ENODEV; 82 + return dev->vpd->ops->read(dev, pos, count, buf); 83 + } 84 + EXPORT_SYMBOL(pci_read_vpd); 85 + 86 + /** 87 + * pci_write_vpd - Write entry to Vital Product Data 88 + * @dev: pci device struct 89 + * @pos: offset in vpd space 90 + * @count: number of bytes to read 91 + * @val: value to write 92 + * 93 + */ 94 + ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf) 95 + { 96 + if (!dev->vpd || !dev->vpd->ops) 97 + return -ENODEV; 98 + return dev->vpd->ops->write(dev, pos, count, buf); 99 + } 100 + EXPORT_SYMBOL(pci_write_vpd); 101 + 69 102 /* 70 103 * The following routines are to prevent the user from accessing PCI config 71 104 * space when it's unsafe to do so. Some devices require this during BIST and ··· 166 133 167 134 struct pci_vpd_pci22 { 168 135 struct pci_vpd base; 169 - spinlock_t lock; /* controls access to hardware and the flags */ 170 - u8 cap; 136 + struct mutex lock; 137 + u16 flag; 171 138 bool busy; 172 - bool flag; /* value of F bit to wait for */ 139 + u8 cap; 173 140 }; 174 141 175 - /* Wait for last operation to complete */ 142 + /* 143 + * Wait for last operation to complete. 144 + * This code has to spin since there is no other notification from the PCI 145 + * hardware. Since the VPD is often implemented by serial attachment to an 146 + * EEPROM, it may take many milliseconds to complete. 147 + */ 176 148 static int pci_vpd_pci22_wait(struct pci_dev *dev) 177 149 { 178 150 struct pci_vpd_pci22 *vpd = 179 151 container_of(dev->vpd, struct pci_vpd_pci22, base); 180 - u16 flag, status; 181 - int wait; 152 + unsigned long timeout = jiffies + HZ/20 + 2; 153 + u16 status; 182 154 int ret; 183 155 184 156 if (!vpd->busy) 185 157 return 0; 186 158 187 - flag = vpd->flag ? PCI_VPD_ADDR_F : 0; 188 - wait = vpd->flag ? 10 : 1000; /* read: 100 us; write: 10 ms */ 189 159 for (;;) { 190 - ret = pci_user_read_config_word(dev, 191 - vpd->cap + PCI_VPD_ADDR, 160 + ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR, 192 161 &status); 193 - if (ret < 0) 162 + if (ret) 194 163 return ret; 195 - if ((status & PCI_VPD_ADDR_F) == flag) { 164 + 165 + if ((status & PCI_VPD_ADDR_F) == vpd->flag) { 196 166 vpd->busy = false; 197 167 return 0; 198 168 } 199 - if (wait-- == 0) 169 + 170 + if (time_after(jiffies, timeout)) 200 171 return -ETIMEDOUT; 201 - udelay(10); 172 + if (fatal_signal_pending(current)) 173 + return -EINTR; 174 + if (!cond_resched()) 175 + udelay(10); 202 176 } 203 177 } 204 178 205 - static int pci_vpd_pci22_read(struct pci_dev *dev, int pos, int size, 206 - char *buf) 179 + static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count, 180 + void *arg) 207 181 { 208 182 struct pci_vpd_pci22 *vpd = 209 183 container_of(dev->vpd, struct pci_vpd_pci22, base); 210 - u32 val; 211 184 int ret; 212 - int begin, end, i; 185 + loff_t end = pos + count; 186 + u8 *buf = arg; 213 187 214 - if (pos < 0 || pos > vpd->base.len || size > vpd->base.len - pos) 188 + if (pos < 0 || pos > vpd->base.len || end > vpd->base.len) 215 189 return -EINVAL; 216 - if (size == 0) 217 - return 0; 218 190 219 - spin_lock_irq(&vpd->lock); 191 + if (mutex_lock_killable(&vpd->lock)) 192 + return -EINTR; 193 + 220 194 ret = pci_vpd_pci22_wait(dev); 221 195 if (ret < 0) 222 196 goto out; 223 - ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, 224 - pos & ~3); 225 - if (ret < 0) 226 - goto out; 227 - vpd->busy = true; 228 - vpd->flag = 1; 229 - ret = pci_vpd_pci22_wait(dev); 230 - if (ret < 0) 231 - goto out; 232 - ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, 233 - &val); 234 - out: 235 - spin_unlock_irq(&vpd->lock); 236 - if (ret < 0) 237 - return ret; 238 197 239 - /* Convert to bytes */ 240 - begin = pos & 3; 241 - end = min(4, begin + size); 242 - for (i = 0; i < end; ++i) { 243 - if (i >= begin) 244 - *buf++ = val; 245 - val >>= 8; 198 + while (pos < end) { 199 + u32 val; 200 + unsigned int i, skip; 201 + 202 + ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, 203 + pos & ~3); 204 + if (ret < 0) 205 + break; 206 + vpd->busy = true; 207 + vpd->flag = PCI_VPD_ADDR_F; 208 + ret = pci_vpd_pci22_wait(dev); 209 + if (ret < 0) 210 + break; 211 + 212 + ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val); 213 + if (ret < 0) 214 + break; 215 + 216 + skip = pos & 3; 217 + for (i = 0; i < sizeof(u32); i++) { 218 + if (i >= skip) { 219 + *buf++ = val; 220 + if (++pos == end) 221 + break; 222 + } 223 + val >>= 8; 224 + } 246 225 } 247 - return end - begin; 226 + out: 227 + mutex_unlock(&vpd->lock); 228 + return ret ? ret : count; 248 229 } 249 230 250 - static int pci_vpd_pci22_write(struct pci_dev *dev, int pos, int size, 251 - const char *buf) 231 + static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count, 232 + const void *arg) 252 233 { 253 234 struct pci_vpd_pci22 *vpd = 254 235 container_of(dev->vpd, struct pci_vpd_pci22, base); 255 - u32 val; 256 - int ret; 236 + const u8 *buf = arg; 237 + loff_t end = pos + count; 238 + int ret = 0; 257 239 258 - if (pos < 0 || pos > vpd->base.len || pos & 3 || 259 - size > vpd->base.len - pos || size < 4) 240 + if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len) 260 241 return -EINVAL; 261 242 262 - val = (u8) *buf++; 263 - val |= ((u8) *buf++) << 8; 264 - val |= ((u8) *buf++) << 16; 265 - val |= ((u32)(u8) *buf++) << 24; 243 + if (mutex_lock_killable(&vpd->lock)) 244 + return -EINTR; 266 245 267 - spin_lock_irq(&vpd->lock); 268 246 ret = pci_vpd_pci22_wait(dev); 269 247 if (ret < 0) 270 248 goto out; 271 - ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, 272 - val); 273 - if (ret < 0) 274 - goto out; 275 - ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, 276 - pos | PCI_VPD_ADDR_F); 277 - if (ret < 0) 278 - goto out; 279 - vpd->busy = true; 280 - vpd->flag = 0; 281 - ret = pci_vpd_pci22_wait(dev); 249 + 250 + while (pos < end) { 251 + u32 val; 252 + 253 + val = *buf++; 254 + val |= *buf++ << 8; 255 + val |= *buf++ << 16; 256 + val |= *buf++ << 24; 257 + 258 + ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val); 259 + if (ret < 0) 260 + break; 261 + ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, 262 + pos | PCI_VPD_ADDR_F); 263 + if (ret < 0) 264 + break; 265 + 266 + vpd->busy = true; 267 + vpd->flag = 0; 268 + ret = pci_vpd_pci22_wait(dev); 269 + 270 + pos += sizeof(u32); 271 + } 282 272 out: 283 - spin_unlock_irq(&vpd->lock); 284 - if (ret < 0) 285 - return ret; 286 - 287 - return 4; 273 + mutex_unlock(&vpd->lock); 274 + return ret ? ret : count; 288 275 } 289 276 290 277 static void pci_vpd_pci22_release(struct pci_dev *dev) ··· 312 259 kfree(container_of(dev->vpd, struct pci_vpd_pci22, base)); 313 260 } 314 261 315 - static struct pci_vpd_ops pci_vpd_pci22_ops = { 262 + static const struct pci_vpd_ops pci_vpd_pci22_ops = { 316 263 .read = pci_vpd_pci22_read, 317 264 .write = pci_vpd_pci22_write, 318 265 .release = pci_vpd_pci22_release, ··· 332 279 333 280 vpd->base.len = PCI_VPD_PCI22_SIZE; 334 281 vpd->base.ops = &pci_vpd_pci22_ops; 335 - spin_lock_init(&vpd->lock); 282 + mutex_init(&vpd->lock); 336 283 vpd->cap = cap; 337 284 vpd->busy = false; 338 285 dev->vpd = &vpd->base; 339 286 return 0; 340 287 } 288 + 289 + /** 290 + * pci_vpd_truncate - Set available Vital Product Data size 291 + * @dev: pci device struct 292 + * @size: available memory in bytes 293 + * 294 + * Adjust size of available VPD area. 295 + */ 296 + int pci_vpd_truncate(struct pci_dev *dev, size_t size) 297 + { 298 + if (!dev->vpd) 299 + return -EINVAL; 300 + 301 + /* limited by the access method */ 302 + if (size > dev->vpd->len) 303 + return -EINVAL; 304 + 305 + dev->vpd->len = size; 306 + dev->vpd->attr->size = size; 307 + 308 + return 0; 309 + } 310 + EXPORT_SYMBOL(pci_vpd_truncate); 341 311 342 312 /** 343 313 * pci_block_user_cfg_access - Block userspace PCI config reads/writes
+51 -36
drivers/pci/bus.c
··· 71 71 } 72 72 73 73 /** 74 - * add a single device 74 + * pci_bus_add_device - add a single device 75 75 * @dev: device to add 76 76 * 77 77 * This adds a single pci device to the global ··· 91 91 } 92 92 93 93 /** 94 + * pci_bus_add_child - add a child bus 95 + * @bus: bus to add 96 + * 97 + * This adds sysfs entries for a single bus 98 + */ 99 + int pci_bus_add_child(struct pci_bus *bus) 100 + { 101 + int retval; 102 + 103 + if (bus->bridge) 104 + bus->dev.parent = bus->bridge; 105 + 106 + retval = device_register(&bus->dev); 107 + if (retval) 108 + return retval; 109 + 110 + bus->is_added = 1; 111 + 112 + retval = device_create_file(&bus->dev, &dev_attr_cpuaffinity); 113 + if (retval) 114 + return retval; 115 + 116 + retval = device_create_file(&bus->dev, &dev_attr_cpulistaffinity); 117 + 118 + /* Create legacy_io and legacy_mem files for this bus */ 119 + pci_create_legacy_files(bus); 120 + 121 + return retval; 122 + } 123 + 124 + /** 94 125 * pci_bus_add_devices - insert newly discovered PCI devices 95 126 * @bus: bus to check for new devices 96 127 * ··· 136 105 void pci_bus_add_devices(struct pci_bus *bus) 137 106 { 138 107 struct pci_dev *dev; 139 - struct pci_bus *child_bus; 108 + struct pci_bus *child; 140 109 int retval; 141 110 142 111 list_for_each_entry(dev, &bus->devices, bus_list) { ··· 151 120 list_for_each_entry(dev, &bus->devices, bus_list) { 152 121 BUG_ON(!dev->is_added); 153 122 123 + child = dev->subordinate; 154 124 /* 155 125 * If there is an unattached subordinate bus, attach 156 126 * it and then scan for unattached PCI devices. 157 127 */ 158 - if (dev->subordinate) { 159 - if (list_empty(&dev->subordinate->node)) { 160 - down_write(&pci_bus_sem); 161 - list_add_tail(&dev->subordinate->node, 162 - &dev->bus->children); 163 - up_write(&pci_bus_sem); 164 - } 165 - pci_bus_add_devices(dev->subordinate); 166 - 167 - /* register the bus with sysfs as the parent is now 168 - * properly registered. */ 169 - child_bus = dev->subordinate; 170 - if (child_bus->is_added) 171 - continue; 172 - child_bus->dev.parent = child_bus->bridge; 173 - retval = device_register(&child_bus->dev); 174 - if (retval) 175 - dev_err(&dev->dev, "Error registering pci_bus," 176 - " continuing...\n"); 177 - else { 178 - child_bus->is_added = 1; 179 - retval = device_create_file(&child_bus->dev, 180 - &dev_attr_cpuaffinity); 181 - } 182 - if (retval) 183 - dev_err(&dev->dev, "Error creating cpuaffinity" 184 - " file, continuing...\n"); 185 - 186 - retval = device_create_file(&child_bus->dev, 187 - &dev_attr_cpulistaffinity); 188 - if (retval) 189 - dev_err(&dev->dev, 190 - "Error creating cpulistaffinity" 191 - " file, continuing...\n"); 128 + if (!child) 129 + continue; 130 + if (list_empty(&child->node)) { 131 + down_write(&pci_bus_sem); 132 + list_add_tail(&child->node, &dev->bus->children); 133 + up_write(&pci_bus_sem); 192 134 } 135 + pci_bus_add_devices(child); 136 + 137 + /* 138 + * register the bus with sysfs as the parent is now 139 + * properly registered. 140 + */ 141 + if (child->is_added) 142 + continue; 143 + retval = pci_bus_add_child(child); 144 + if (retval) 145 + dev_err(&dev->dev, "Error adding bus, continuing\n"); 193 146 } 194 147 } 195 148
+3
drivers/pci/hotplug/Makefile
··· 55 55 pciehp_ctrl.o \ 56 56 pciehp_pci.o \ 57 57 pciehp_hpc.o 58 + ifdef CONFIG_ACPI 59 + pciehp-objs += pciehp_acpi.o 60 + endif 58 61 59 62 shpchp-objs := shpchp_core.o \ 60 63 shpchp_ctrl.o \
+69
drivers/pci/hotplug/acpi_pcihp.c
··· 501 501 } 502 502 EXPORT_SYMBOL_GPL(acpi_root_bridge); 503 503 504 + 505 + static int is_ejectable(acpi_handle handle) 506 + { 507 + acpi_status status; 508 + acpi_handle tmp; 509 + unsigned long long removable; 510 + status = acpi_get_handle(handle, "_ADR", &tmp); 511 + if (ACPI_FAILURE(status)) 512 + return 0; 513 + status = acpi_get_handle(handle, "_EJ0", &tmp); 514 + if (ACPI_SUCCESS(status)) 515 + return 1; 516 + status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable); 517 + if (ACPI_SUCCESS(status) && removable) 518 + return 1; 519 + return 0; 520 + } 521 + 522 + /** 523 + * acpi_pcihp_check_ejectable - check if handle is ejectable ACPI PCI slot 524 + * @pbus: the PCI bus of the PCI slot corresponding to 'handle' 525 + * @handle: ACPI handle to check 526 + * 527 + * Return 1 if handle is ejectable PCI slot, 0 otherwise. 528 + */ 529 + int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle) 530 + { 531 + acpi_handle bridge_handle, parent_handle; 532 + 533 + if (!(bridge_handle = acpi_pci_get_bridge_handle(pbus))) 534 + return 0; 535 + if ((ACPI_FAILURE(acpi_get_parent(handle, &parent_handle)))) 536 + return 0; 537 + if (bridge_handle != parent_handle) 538 + return 0; 539 + return is_ejectable(handle); 540 + } 541 + EXPORT_SYMBOL_GPL(acpi_pci_check_ejectable); 542 + 543 + static acpi_status 544 + check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv) 545 + { 546 + int *found = (int *)context; 547 + if (is_ejectable(handle)) { 548 + *found = 1; 549 + return AE_CTRL_TERMINATE; 550 + } 551 + return AE_OK; 552 + } 553 + 554 + /** 555 + * acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots 556 + * @pbus - PCI bus to scan 557 + * 558 + * Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise. 559 + */ 560 + int acpi_pci_detect_ejectable(struct pci_bus *pbus) 561 + { 562 + acpi_handle handle; 563 + int found = 0; 564 + 565 + if (!(handle = acpi_pci_get_bridge_handle(pbus))) 566 + return 0; 567 + acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, 568 + check_hotplug, (void *)&found, NULL); 569 + return found; 570 + } 571 + EXPORT_SYMBOL_GPL(acpi_pci_detect_ejectable); 572 + 504 573 module_param(debug_acpi, bool, 0644); 505 574 MODULE_PARM_DESC(debug_acpi, "Debugging mode for ACPI enabled or not");
+1 -1
drivers/pci/hotplug/acpiphp.h
··· 44 44 do { \ 45 45 if (acpiphp_debug) \ 46 46 printk(KERN_DEBUG "%s: " format, \ 47 - MY_NAME , ## arg); \ 47 + MY_NAME , ## arg); \ 48 48 } while (0) 49 49 #define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg) 50 50 #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
+21 -96
drivers/pci/hotplug/acpiphp_glue.c
··· 46 46 #include <linux/kernel.h> 47 47 #include <linux/pci.h> 48 48 #include <linux/pci_hotplug.h> 49 + #include <linux/pci-acpi.h> 49 50 #include <linux/mutex.h> 50 51 51 52 #include "../pci.h" ··· 63 62 static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus); 64 63 static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context); 65 64 66 - 67 - /* 68 - * initialization & terminatation routines 69 - */ 70 - 71 - /** 72 - * is_ejectable - determine if a slot is ejectable 73 - * @handle: handle to acpi namespace 74 - * 75 - * Ejectable slot should satisfy at least these conditions: 76 - * 77 - * 1. has _ADR method 78 - * 2. has _EJ0 method 79 - * 80 - * optionally 81 - * 82 - * 1. has _STA method 83 - * 2. has _PS0 method 84 - * 3. has _PS3 method 85 - * 4. .. 86 - */ 87 - static int is_ejectable(acpi_handle handle) 88 - { 89 - acpi_status status; 90 - acpi_handle tmp; 91 - 92 - status = acpi_get_handle(handle, "_ADR", &tmp); 93 - if (ACPI_FAILURE(status)) { 94 - return 0; 95 - } 96 - 97 - status = acpi_get_handle(handle, "_EJ0", &tmp); 98 - if (ACPI_FAILURE(status)) { 99 - return 0; 100 - } 101 - 102 - return 1; 103 - } 104 - 105 - 106 - /* callback routine to check for the existence of ejectable slots */ 107 - static acpi_status 108 - is_ejectable_slot(acpi_handle handle, u32 lvl, void *context, void **rv) 109 - { 110 - int *count = (int *)context; 111 - 112 - if (is_ejectable(handle)) { 113 - (*count)++; 114 - /* only one ejectable slot is enough */ 115 - return AE_CTRL_TERMINATE; 116 - } else { 117 - return AE_OK; 118 - } 119 - } 120 - 121 65 /* callback routine to check for the existence of a pci dock device */ 122 66 static acpi_status 123 67 is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv) ··· 76 130 return AE_OK; 77 131 } 78 132 } 79 - 80 - 81 - 82 133 83 134 /* 84 135 * the _DCK method can do funny things... and sometimes not ··· 103 160 104 161 if (((buses >> 8) & 0xff) != bus->secondary) { 105 162 buses = (buses & 0xff000000) 106 - | ((unsigned int)(bus->primary) << 0) 107 - | ((unsigned int)(bus->secondary) << 8) 108 - | ((unsigned int)(bus->subordinate) << 16); 163 + | ((unsigned int)(bus->primary) << 0) 164 + | ((unsigned int)(bus->secondary) << 8) 165 + | ((unsigned int)(bus->subordinate) << 16); 109 166 pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses); 110 167 } 111 168 return NOTIFY_OK; ··· 127 184 acpi_status status = AE_OK; 128 185 unsigned long long adr, sun; 129 186 int device, function, retval; 187 + struct pci_bus *pbus = bridge->pci_bus; 130 188 131 - status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); 132 - 133 - if (ACPI_FAILURE(status)) 189 + if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle)) 134 190 return AE_OK; 135 191 136 - status = acpi_get_handle(handle, "_EJ0", &tmp); 137 - 138 - if (ACPI_FAILURE(status) && !(is_dock_device(handle))) 139 - return AE_OK; 140 - 192 + acpi_evaluate_integer(handle, "_ADR", NULL, &adr); 141 193 device = (adr >> 16) & 0xffff; 142 194 function = adr & 0xffff; 143 195 ··· 143 205 INIT_LIST_HEAD(&newfunc->sibling); 144 206 newfunc->handle = handle; 145 207 newfunc->function = function; 146 - if (ACPI_SUCCESS(status)) 208 + 209 + if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) 147 210 newfunc->flags = FUNC_HAS_EJ0; 148 211 149 212 if (ACPI_SUCCESS(acpi_get_handle(handle, "_STA", &tmp))) ··· 195 256 bridge->nr_slots++; 196 257 197 258 dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n", 198 - slot->sun, pci_domain_nr(bridge->pci_bus), 199 - bridge->pci_bus->number, slot->device); 259 + slot->sun, pci_domain_nr(pbus), pbus->number, device); 200 260 retval = acpiphp_register_hotplug_slot(slot); 201 261 if (retval) { 202 262 if (retval == -EBUSY) ··· 212 274 list_add_tail(&newfunc->sibling, &slot->funcs); 213 275 214 276 /* associate corresponding pci_dev */ 215 - newfunc->pci_dev = pci_get_slot(bridge->pci_bus, 216 - PCI_DEVFN(device, function)); 277 + newfunc->pci_dev = pci_get_slot(pbus, PCI_DEVFN(device, function)); 217 278 if (newfunc->pci_dev) { 218 279 slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); 219 280 } ··· 261 324 262 325 263 326 /* see if it's worth looking at this bridge */ 264 - static int detect_ejectable_slots(acpi_handle *bridge_handle) 327 + static int detect_ejectable_slots(struct pci_bus *pbus) 265 328 { 266 - acpi_status status; 267 - int count; 268 - 269 - count = 0; 270 - 271 - /* only check slots defined directly below bridge object */ 272 - status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, (u32)1, 273 - is_ejectable_slot, (void *)&count, NULL); 274 - 275 - /* 276 - * we also need to add this bridge if there is a dock bridge or 277 - * other pci device on a dock station (removable) 278 - */ 279 - if (!count) 280 - status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, 281 - (u32)1, is_pci_dock_device, (void *)&count, 282 - NULL); 283 - 284 - return count; 329 + int found = acpi_pci_detect_ejectable(pbus); 330 + if (!found) { 331 + acpi_handle bridge_handle = acpi_pci_get_bridge_handle(pbus); 332 + acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, (u32)1, 333 + is_pci_dock_device, (void *)&found, NULL); 334 + } 335 + return found; 285 336 } 286 337 287 338 ··· 479 554 goto out; 480 555 481 556 /* check if this bridge has ejectable slots */ 482 - if ((detect_ejectable_slots(handle) > 0)) { 557 + if ((detect_ejectable_slots(dev->subordinate) > 0)) { 483 558 dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev)); 484 559 add_p2p_bridge(handle, dev); 485 560 } ··· 540 615 } 541 616 542 617 /* check if this bridge has ejectable slots */ 543 - if (detect_ejectable_slots(handle) > 0) { 618 + if (detect_ejectable_slots(pci_bus) > 0) { 544 619 dbg("found PCI host-bus bridge with hot-pluggable slots\n"); 545 620 add_host_bridge(handle, pci_bus); 546 621 }
+1 -1
drivers/pci/hotplug/acpiphp_ibm.c
··· 271 271 dbg("%s: generationg bus event\n", __func__); 272 272 acpi_bus_generate_proc_event(note->device, note->event, detail); 273 273 acpi_bus_generate_netlink_event(note->device->pnp.device_class, 274 - note->device->dev.bus_id, 274 + dev_name(&note->device->dev), 275 275 note->event, detail); 276 276 } else 277 277 note->event = event;
+3 -3
drivers/pci/hotplug/cpqphp_ctrl.c
··· 1954 1954 return ; 1955 1955 } 1956 1956 1957 - if (func != NULL && ctrl != NULL) { 1957 + if (ctrl != NULL) { 1958 1958 if (cpqhp_process_SI(ctrl, func) != 0) { 1959 1959 amber_LED_on(ctrl, hp_slot); 1960 1960 green_LED_off(ctrl, hp_slot); ··· 2604 2604 for (cloop = 0; cloop < 4; cloop++) { 2605 2605 if (irqs.valid_INT & (0x01 << cloop)) { 2606 2606 rc = cpqhp_set_irq(func->bus, func->device, 2607 - 0x0A + cloop, irqs.interrupt[cloop]); 2607 + cloop + 1, irqs.interrupt[cloop]); 2608 2608 if (rc) 2609 2609 goto free_and_out; 2610 2610 } ··· 2945 2945 } 2946 2946 2947 2947 if (!behind_bridge) { 2948 - rc = cpqhp_set_irq(func->bus, func->device, temp_byte + 0x09, IRQ); 2948 + rc = cpqhp_set_irq(func->bus, func->device, temp_byte, IRQ); 2949 2949 if (rc) 2950 2950 return 1; 2951 2951 } else {
+1 -1
drivers/pci/hotplug/cpqphp_pci.c
··· 171 171 fakebus->number = bus_num; 172 172 dbg("%s: dev %d, bus %d, pin %d, num %d\n", 173 173 __func__, dev_num, bus_num, int_pin, irq_num); 174 - rc = pcibios_set_irq_routing(fakedev, int_pin - 0x0a, irq_num); 174 + rc = pcibios_set_irq_routing(fakedev, int_pin - 1, irq_num); 175 175 kfree(fakedev); 176 176 kfree(fakebus); 177 177 dbg("%s: rc %d\n", __func__, rc);
+1
drivers/pci/hotplug/fakephp.c
··· 324 324 325 325 if (test_and_set_bit(0, &dslot->removed)) { 326 326 dbg("Slot already scheduled for removal\n"); 327 + pci_dev_put(dev); 327 328 return -ENODEV; 328 329 } 329 330
+14 -1
drivers/pci/hotplug/pciehp.h
··· 220 220 #include <acpi/actypes.h> 221 221 #include <linux/pci-acpi.h> 222 222 223 + extern void __init pciehp_acpi_slot_detection_init(void); 224 + extern int pciehp_acpi_slot_detection_check(struct pci_dev *dev); 225 + 226 + static inline void pciehp_firmware_init(void) 227 + { 228 + pciehp_acpi_slot_detection_init(); 229 + } 230 + 223 231 static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev) 224 232 { 233 + int retval; 225 234 u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | 226 235 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); 227 - return acpi_get_hp_hw_control_from_firmware(dev, flags); 236 + retval = acpi_get_hp_hw_control_from_firmware(dev, flags); 237 + if (retval) 238 + return retval; 239 + return pciehp_acpi_slot_detection_check(dev); 228 240 } 229 241 230 242 static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, ··· 247 235 return 0; 248 236 } 249 237 #else 238 + #define pciehp_firmware_init() do {} while (0) 250 239 #define pciehp_get_hp_hw_control_from_firmware(dev) 0 251 240 #define pciehp_get_hp_params_from_firmware(dev, hpp) (-ENODEV) 252 241 #endif /* CONFIG_ACPI */
+141
drivers/pci/hotplug/pciehp_acpi.c
··· 1 + /* 2 + * ACPI related functions for PCI Express Hot Plug driver. 3 + * 4 + * Copyright (C) 2008 Kenji Kaneshige 5 + * Copyright (C) 2008 Fujitsu Limited. 6 + * 7 + * All rights reserved. 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License as published by 11 + * the Free Software Foundation; either version 2 of the License, or (at 12 + * your option) any later version. 13 + * 14 + * This program is distributed in the hope that it will be useful, but 15 + * WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 17 + * NON INFRINGEMENT. See the GNU General Public License for more 18 + * details. 19 + * 20 + * You should have received a copy of the GNU General Public License 21 + * along with this program; if not, write to the Free Software 22 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 23 + * 24 + */ 25 + 26 + #include <linux/acpi.h> 27 + #include <linux/pci.h> 28 + #include <linux/pci_hotplug.h> 29 + #include "pciehp.h" 30 + 31 + #define PCIEHP_DETECT_PCIE (0) 32 + #define PCIEHP_DETECT_ACPI (1) 33 + #define PCIEHP_DETECT_AUTO (2) 34 + #define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO 35 + 36 + static int slot_detection_mode; 37 + static char *pciehp_detect_mode; 38 + module_param(pciehp_detect_mode, charp, 0444); 39 + MODULE_PARM_DESC(pciehp_detect_mode, 40 + "Slot detection mode: pcie, acpi, auto\n" 41 + " pcie - Use PCIe based slot detection\n" 42 + " acpi - Use ACPI for slot detection\n" 43 + " auto(default) - Auto select mode. Use acpi option if duplicate\n" 44 + " slot ids are found. Otherwise, use pcie option\n"); 45 + 46 + int pciehp_acpi_slot_detection_check(struct pci_dev *dev) 47 + { 48 + if (slot_detection_mode != PCIEHP_DETECT_ACPI) 49 + return 0; 50 + if (acpi_pci_detect_ejectable(dev->subordinate)) 51 + return 0; 52 + return -ENODEV; 53 + } 54 + 55 + static int __init parse_detect_mode(void) 56 + { 57 + if (!pciehp_detect_mode) 58 + return PCIEHP_DETECT_DEFAULT; 59 + if (!strcmp(pciehp_detect_mode, "pcie")) 60 + return PCIEHP_DETECT_PCIE; 61 + if (!strcmp(pciehp_detect_mode, "acpi")) 62 + return PCIEHP_DETECT_ACPI; 63 + if (!strcmp(pciehp_detect_mode, "auto")) 64 + return PCIEHP_DETECT_AUTO; 65 + warn("bad specifier '%s' for pciehp_detect_mode. Use default\n", 66 + pciehp_detect_mode); 67 + return PCIEHP_DETECT_DEFAULT; 68 + } 69 + 70 + static struct pcie_port_service_id __initdata port_pci_ids[] = { 71 + { 72 + .vendor = PCI_ANY_ID, 73 + .device = PCI_ANY_ID, 74 + .port_type = PCIE_ANY_PORT, 75 + .service_type = PCIE_PORT_SERVICE_HP, 76 + .driver_data = 0, 77 + }, { /* end: all zeroes */ } 78 + }; 79 + 80 + static int __initdata dup_slot_id; 81 + static int __initdata acpi_slot_detected; 82 + static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots); 83 + 84 + /* Dummy driver for dumplicate name detection */ 85 + static int __init dummy_probe(struct pcie_device *dev, 86 + const struct pcie_port_service_id *id) 87 + { 88 + int pos; 89 + u32 slot_cap; 90 + struct slot *slot, *tmp; 91 + struct pci_dev *pdev = dev->port; 92 + struct pci_bus *pbus = pdev->subordinate; 93 + if (!(slot = kzalloc(sizeof(*slot), GFP_KERNEL))) 94 + return -ENOMEM; 95 + /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ 96 + if (pciehp_get_hp_hw_control_from_firmware(pdev)) 97 + return -ENODEV; 98 + if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP))) 99 + return -ENODEV; 100 + pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap); 101 + slot->number = slot_cap >> 19; 102 + list_for_each_entry(tmp, &dummy_slots, slot_list) { 103 + if (tmp->number == slot->number) 104 + dup_slot_id++; 105 + } 106 + list_add_tail(&slot->slot_list, &dummy_slots); 107 + if (!acpi_slot_detected && acpi_pci_detect_ejectable(pbus)) 108 + acpi_slot_detected = 1; 109 + return -ENODEV; /* dummy driver always returns error */ 110 + } 111 + 112 + static struct pcie_port_service_driver __initdata dummy_driver = { 113 + .name = "pciehp_dummy", 114 + .id_table = port_pci_ids, 115 + .probe = dummy_probe, 116 + }; 117 + 118 + static int __init select_detection_mode(void) 119 + { 120 + struct slot *slot, *tmp; 121 + pcie_port_service_register(&dummy_driver); 122 + pcie_port_service_unregister(&dummy_driver); 123 + list_for_each_entry_safe(slot, tmp, &dummy_slots, slot_list) { 124 + list_del(&slot->slot_list); 125 + kfree(slot); 126 + } 127 + if (acpi_slot_detected && dup_slot_id) 128 + return PCIEHP_DETECT_ACPI; 129 + return PCIEHP_DETECT_PCIE; 130 + } 131 + 132 + void __init pciehp_acpi_slot_detection_init(void) 133 + { 134 + slot_detection_mode = parse_detect_mode(); 135 + if (slot_detection_mode != PCIEHP_DETECT_AUTO) 136 + goto out; 137 + slot_detection_mode = select_detection_mode(); 138 + out: 139 + if (slot_detection_mode == PCIEHP_DETECT_ACPI) 140 + info("Using ACPI for slot detection.\n"); 141 + }
+1
drivers/pci/hotplug/pciehp_core.c
··· 522 522 { 523 523 int retval = 0; 524 524 525 + pciehp_firmware_init(); 525 526 retval = pcie_port_service_register(&hpdriver_portdrv); 526 527 dbg("pcie_port_service_register = %d\n", retval); 527 528 info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+12 -14
drivers/pci/hotplug/pciehp_ctrl.c
··· 178 178 "Issue of Slot Power Off command failed\n"); 179 179 return; 180 180 } 181 + /* 182 + * After turning power off, we must wait for at least 1 second 183 + * before taking any action that relies on power having been 184 + * removed from the slot/adapter. 185 + */ 186 + msleep(1000); 181 187 } 182 - 183 - /* 184 - * After turning power off, we must wait for at least 1 second 185 - * before taking any action that relies on power having been 186 - * removed from the slot/adapter. 187 - */ 188 - msleep(1000); 189 188 190 189 if (PWR_LED(ctrl)) 191 190 pslot->hpc_ops->green_led_off(pslot); ··· 285 286 "Issue of Slot Disable command failed\n"); 286 287 return retval; 287 288 } 289 + /* 290 + * After turning power off, we must wait for at least 1 second 291 + * before taking any action that relies on power having been 292 + * removed from the slot/adapter. 293 + */ 294 + msleep(1000); 288 295 } 289 - 290 - /* 291 - * After turning power off, we must wait for at least 1 second 292 - * before taking any action that relies on power having been 293 - * removed from the slot/adapter. 294 - */ 295 - msleep(1000); 296 296 297 297 if (PWR_LED(ctrl)) 298 298 /* turn off Green LED */
+99 -230
drivers/pci/hotplug/pciehp_hpc.c
··· 42 42 43 43 static atomic_t pciehp_num_controllers = ATOMIC_INIT(0); 44 44 45 - struct ctrl_reg { 46 - u8 cap_id; 47 - u8 nxt_ptr; 48 - u16 cap_reg; 49 - u32 dev_cap; 50 - u16 dev_ctrl; 51 - u16 dev_status; 52 - u32 lnk_cap; 53 - u16 lnk_ctrl; 54 - u16 lnk_status; 55 - u32 slot_cap; 56 - u16 slot_ctrl; 57 - u16 slot_status; 58 - u16 root_ctrl; 59 - u16 rsvp; 60 - u32 root_status; 61 - } __attribute__ ((packed)); 62 - 63 - /* offsets to the controller registers based on the above structure layout */ 64 - enum ctrl_offsets { 65 - PCIECAPID = offsetof(struct ctrl_reg, cap_id), 66 - NXTCAPPTR = offsetof(struct ctrl_reg, nxt_ptr), 67 - CAPREG = offsetof(struct ctrl_reg, cap_reg), 68 - DEVCAP = offsetof(struct ctrl_reg, dev_cap), 69 - DEVCTRL = offsetof(struct ctrl_reg, dev_ctrl), 70 - DEVSTATUS = offsetof(struct ctrl_reg, dev_status), 71 - LNKCAP = offsetof(struct ctrl_reg, lnk_cap), 72 - LNKCTRL = offsetof(struct ctrl_reg, lnk_ctrl), 73 - LNKSTATUS = offsetof(struct ctrl_reg, lnk_status), 74 - SLOTCAP = offsetof(struct ctrl_reg, slot_cap), 75 - SLOTCTRL = offsetof(struct ctrl_reg, slot_ctrl), 76 - SLOTSTATUS = offsetof(struct ctrl_reg, slot_status), 77 - ROOTCTRL = offsetof(struct ctrl_reg, root_ctrl), 78 - ROOTSTATUS = offsetof(struct ctrl_reg, root_status), 79 - }; 80 - 81 45 static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) 82 46 { 83 47 struct pci_dev *dev = ctrl->pci_dev; ··· 66 102 return pci_write_config_dword(dev, ctrl->cap_base + reg, value); 67 103 } 68 104 69 - /* Field definitions in PCI Express Capabilities Register */ 70 - #define CAP_VER 0x000F 71 - #define DEV_PORT_TYPE 0x00F0 72 - #define SLOT_IMPL 0x0100 73 - #define MSG_NUM 0x3E00 74 - 75 - /* Device or Port Type */ 76 - #define NAT_ENDPT 0x00 77 - #define LEG_ENDPT 0x01 78 - #define ROOT_PORT 0x04 79 - #define UP_STREAM 0x05 80 - #define DN_STREAM 0x06 81 - #define PCIE_PCI_BRDG 0x07 82 - #define PCI_PCIE_BRDG 0x10 83 - 84 - /* Field definitions in Device Capabilities Register */ 85 - #define DATTN_BUTTN_PRSN 0x1000 86 - #define DATTN_LED_PRSN 0x2000 87 - #define DPWR_LED_PRSN 0x4000 88 - 89 - /* Field definitions in Link Capabilities Register */ 90 - #define MAX_LNK_SPEED 0x000F 91 - #define MAX_LNK_WIDTH 0x03F0 92 - #define LINK_ACTIVE_REPORTING 0x00100000 93 - 94 - /* Link Width Encoding */ 95 - #define LNK_X1 0x01 96 - #define LNK_X2 0x02 97 - #define LNK_X4 0x04 98 - #define LNK_X8 0x08 99 - #define LNK_X12 0x0C 100 - #define LNK_X16 0x10 101 - #define LNK_X32 0x20 102 - 103 - /*Field definitions of Link Status Register */ 104 - #define LNK_SPEED 0x000F 105 - #define NEG_LINK_WD 0x03F0 106 - #define LNK_TRN_ERR 0x0400 107 - #define LNK_TRN 0x0800 108 - #define SLOT_CLK_CONF 0x1000 109 - #define LINK_ACTIVE 0x2000 110 - 111 - /* Field definitions in Slot Capabilities Register */ 112 - #define ATTN_BUTTN_PRSN 0x00000001 113 - #define PWR_CTRL_PRSN 0x00000002 114 - #define MRL_SENS_PRSN 0x00000004 115 - #define ATTN_LED_PRSN 0x00000008 116 - #define PWR_LED_PRSN 0x00000010 117 - #define HP_SUPR_RM_SUP 0x00000020 118 - #define HP_CAP 0x00000040 119 - #define SLOT_PWR_VALUE 0x000003F8 120 - #define SLOT_PWR_LIMIT 0x00000C00 121 - #define PSN 0xFFF80000 /* PSN: Physical Slot Number */ 122 - 123 - /* Field definitions in Slot Control Register */ 124 - #define ATTN_BUTTN_ENABLE 0x0001 125 - #define PWR_FAULT_DETECT_ENABLE 0x0002 126 - #define MRL_DETECT_ENABLE 0x0004 127 - #define PRSN_DETECT_ENABLE 0x0008 128 - #define CMD_CMPL_INTR_ENABLE 0x0010 129 - #define HP_INTR_ENABLE 0x0020 130 - #define ATTN_LED_CTRL 0x00C0 131 - #define PWR_LED_CTRL 0x0300 132 - #define PWR_CTRL 0x0400 133 - #define EMI_CTRL 0x0800 134 - 135 - /* Attention indicator and Power indicator states */ 136 - #define LED_ON 0x01 137 - #define LED_BLINK 0x10 138 - #define LED_OFF 0x11 139 - 140 105 /* Power Control Command */ 141 106 #define POWER_ON 0 142 - #define POWER_OFF 0x0400 143 - 144 - /* EMI Status defines */ 145 - #define EMI_DISENGAGED 0 146 - #define EMI_ENGAGED 1 147 - 148 - /* Field definitions in Slot Status Register */ 149 - #define ATTN_BUTTN_PRESSED 0x0001 150 - #define PWR_FAULT_DETECTED 0x0002 151 - #define MRL_SENS_CHANGED 0x0004 152 - #define PRSN_DETECT_CHANGED 0x0008 153 - #define CMD_COMPLETED 0x0010 154 - #define MRL_STATE 0x0020 155 - #define PRSN_STATE 0x0040 156 - #define EMI_STATE 0x0080 157 - #define EMI_STATUS_BIT 7 107 + #define POWER_OFF PCI_EXP_SLTCTL_PCC 158 108 159 109 static irqreturn_t pcie_isr(int irq, void *dev_id); 160 110 static void start_int_poll_timer(struct controller *ctrl, int sec); ··· 131 253 static int pcie_poll_cmd(struct controller *ctrl) 132 254 { 133 255 u16 slot_status; 134 - int timeout = 1000; 256 + int err, timeout = 1000; 135 257 136 - if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) { 137 - if (slot_status & CMD_COMPLETED) { 138 - pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED); 139 - return 1; 140 - } 258 + err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 259 + if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) { 260 + pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); 261 + return 1; 141 262 } 142 263 while (timeout > 0) { 143 264 msleep(10); 144 265 timeout -= 10; 145 - if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) { 146 - if (slot_status & CMD_COMPLETED) { 147 - pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED); 148 - return 1; 149 - } 266 + err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 267 + if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) { 268 + pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); 269 + return 1; 150 270 } 151 271 } 152 272 return 0; /* timeout */ ··· 178 302 179 303 mutex_lock(&ctrl->ctrl_lock); 180 304 181 - retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 305 + retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 182 306 if (retval) { 183 307 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", 184 308 __func__); 185 309 goto out; 186 310 } 187 311 188 - if (slot_status & CMD_COMPLETED) { 312 + if (slot_status & PCI_EXP_SLTSTA_CC) { 189 313 if (!ctrl->no_cmd_complete) { 190 314 /* 191 315 * After 1 sec and CMD_COMPLETED still not set, just ··· 208 332 } 209 333 } 210 334 211 - retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); 335 + retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl); 212 336 if (retval) { 213 337 ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); 214 338 goto out; ··· 218 342 slot_ctrl |= (cmd & mask); 219 343 ctrl->cmd_busy = 1; 220 344 smp_mb(); 221 - retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); 345 + retval = pciehp_writew(ctrl, PCI_EXP_SLTCTL, slot_ctrl); 222 346 if (retval) 223 347 ctrl_err(ctrl, "Cannot write to SLOTCTRL register\n"); 224 348 ··· 232 356 * completed interrupt is not enabled, we need to poll 233 357 * command completed event. 234 358 */ 235 - if (!(slot_ctrl & HP_INTR_ENABLE) || 236 - !(slot_ctrl & CMD_CMPL_INTR_ENABLE)) 359 + if (!(slot_ctrl & PCI_EXP_SLTCTL_HPIE) || 360 + !(slot_ctrl & PCI_EXP_SLTCTL_CCIE)) 237 361 poll = 1; 238 362 pcie_wait_cmd(ctrl, poll); 239 363 } ··· 246 370 { 247 371 u16 link_status; 248 372 249 - if (pciehp_readw(ctrl, LNKSTATUS, &link_status)) 373 + if (pciehp_readw(ctrl, PCI_EXP_LNKSTA, &link_status)) 250 374 return 0; 251 - return !!(link_status & LINK_ACTIVE); 375 + return !!(link_status & PCI_EXP_LNKSTA_DLLLA); 252 376 } 253 377 254 378 static void pcie_wait_link_active(struct controller *ctrl) ··· 288 412 } else 289 413 msleep(1000); 290 414 291 - retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); 415 + retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); 292 416 if (retval) { 293 417 ctrl_err(ctrl, "Cannot read LNKSTATUS register\n"); 294 418 return retval; 295 419 } 296 420 297 421 ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); 298 - if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) || 299 - !(lnk_status & NEG_LINK_WD)) { 422 + if ((lnk_status & PCI_EXP_LNKSTA_LT) || 423 + !(lnk_status & PCI_EXP_LNKSTA_NLW)) { 300 424 ctrl_err(ctrl, "Link Training Error occurs \n"); 301 425 retval = -1; 302 426 return retval; ··· 312 436 u8 atten_led_state; 313 437 int retval = 0; 314 438 315 - retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); 439 + retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl); 316 440 if (retval) { 317 441 ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); 318 442 return retval; 319 443 } 320 444 321 445 ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", 322 - __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); 446 + __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl); 323 447 324 - atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6; 448 + atten_led_state = (slot_ctrl & PCI_EXP_SLTCTL_AIC) >> 6; 325 449 326 450 switch (atten_led_state) { 327 451 case 0: ··· 351 475 u8 pwr_state; 352 476 int retval = 0; 353 477 354 - retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); 478 + retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl); 355 479 if (retval) { 356 480 ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); 357 481 return retval; 358 482 } 359 483 ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", 360 - __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); 484 + __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl); 361 485 362 - pwr_state = (slot_ctrl & PWR_CTRL) >> 10; 486 + pwr_state = (slot_ctrl & PCI_EXP_SLTCTL_PCC) >> 10; 363 487 364 488 switch (pwr_state) { 365 489 case 0: ··· 380 504 { 381 505 struct controller *ctrl = slot->ctrl; 382 506 u16 slot_status; 383 - int retval = 0; 507 + int retval; 384 508 385 - retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 509 + retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 386 510 if (retval) { 387 511 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", 388 512 __func__); 389 513 return retval; 390 514 } 391 - 392 - *status = (((slot_status & MRL_STATE) >> 5) == 0) ? 0 : 1; 393 - 515 + *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS); 394 516 return 0; 395 517 } 396 518 ··· 396 522 { 397 523 struct controller *ctrl = slot->ctrl; 398 524 u16 slot_status; 399 - u8 card_state; 400 - int retval = 0; 525 + int retval; 401 526 402 - retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 527 + retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 403 528 if (retval) { 404 529 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", 405 530 __func__); 406 531 return retval; 407 532 } 408 - card_state = (u8)((slot_status & PRSN_STATE) >> 6); 409 - *status = (card_state == 1) ? 1 : 0; 410 - 533 + *status = !!(slot_status & PCI_EXP_SLTSTA_PDS); 411 534 return 0; 412 535 } 413 536 ··· 412 541 { 413 542 struct controller *ctrl = slot->ctrl; 414 543 u16 slot_status; 415 - u8 pwr_fault; 416 - int retval = 0; 544 + int retval; 417 545 418 - retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 546 + retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 419 547 if (retval) { 420 548 ctrl_err(ctrl, "Cannot check for power fault\n"); 421 549 return retval; 422 550 } 423 - pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1); 424 - 425 - return pwr_fault; 551 + return !!(slot_status & PCI_EXP_SLTSTA_PFD); 426 552 } 427 553 428 554 static int hpc_get_emi_status(struct slot *slot, u8 *status) 429 555 { 430 556 struct controller *ctrl = slot->ctrl; 431 557 u16 slot_status; 432 - int retval = 0; 558 + int retval; 433 559 434 - retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 560 + retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 435 561 if (retval) { 436 562 ctrl_err(ctrl, "Cannot check EMI status\n"); 437 563 return retval; 438 564 } 439 - *status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT; 440 - 565 + *status = !!(slot_status & PCI_EXP_SLTSTA_EIS); 441 566 return retval; 442 567 } 443 568 ··· 443 576 u16 cmd_mask; 444 577 int rc; 445 578 446 - slot_cmd = EMI_CTRL; 447 - cmd_mask = EMI_CTRL; 579 + slot_cmd = PCI_EXP_SLTCTL_EIC; 580 + cmd_mask = PCI_EXP_SLTCTL_EIC; 448 581 rc = pcie_write_cmd(slot->ctrl, slot_cmd, cmd_mask); 449 582 slot->last_emi_toggle = get_seconds(); 450 583 ··· 458 591 u16 cmd_mask; 459 592 int rc; 460 593 461 - cmd_mask = ATTN_LED_CTRL; 594 + cmd_mask = PCI_EXP_SLTCTL_AIC; 462 595 switch (value) { 463 596 case 0 : /* turn off */ 464 597 slot_cmd = 0x00C0; ··· 474 607 } 475 608 rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 476 609 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", 477 - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 610 + __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 478 611 479 612 return rc; 480 613 } ··· 486 619 u16 cmd_mask; 487 620 488 621 slot_cmd = 0x0100; 489 - cmd_mask = PWR_LED_CTRL; 622 + cmd_mask = PCI_EXP_SLTCTL_PIC; 490 623 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 491 624 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", 492 - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 625 + __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 493 626 } 494 627 495 628 static void hpc_set_green_led_off(struct slot *slot) ··· 499 632 u16 cmd_mask; 500 633 501 634 slot_cmd = 0x0300; 502 - cmd_mask = PWR_LED_CTRL; 635 + cmd_mask = PCI_EXP_SLTCTL_PIC; 503 636 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 504 637 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", 505 - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 638 + __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 506 639 } 507 640 508 641 static void hpc_set_green_led_blink(struct slot *slot) ··· 512 645 u16 cmd_mask; 513 646 514 647 slot_cmd = 0x0200; 515 - cmd_mask = PWR_LED_CTRL; 648 + cmd_mask = PCI_EXP_SLTCTL_PIC; 516 649 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 517 650 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", 518 - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 651 + __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 519 652 } 520 653 521 654 static int hpc_power_on_slot(struct slot * slot) ··· 529 662 ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot); 530 663 531 664 /* Clear sticky power-fault bit from previous power failures */ 532 - retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); 665 + retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status); 533 666 if (retval) { 534 667 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", 535 668 __func__); 536 669 return retval; 537 670 } 538 - slot_status &= PWR_FAULT_DETECTED; 671 + slot_status &= PCI_EXP_SLTSTA_PFD; 539 672 if (slot_status) { 540 - retval = pciehp_writew(ctrl, SLOTSTATUS, slot_status); 673 + retval = pciehp_writew(ctrl, PCI_EXP_SLTSTA, slot_status); 541 674 if (retval) { 542 675 ctrl_err(ctrl, 543 676 "%s: Cannot write to SLOTSTATUS register\n", ··· 547 680 } 548 681 549 682 slot_cmd = POWER_ON; 550 - cmd_mask = PWR_CTRL; 683 + cmd_mask = PCI_EXP_SLTCTL_PCC; 551 684 /* Enable detection that we turned off at slot power-off time */ 552 685 if (!pciehp_poll_mode) { 553 - slot_cmd |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | 554 - PRSN_DETECT_ENABLE); 555 - cmd_mask |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | 556 - PRSN_DETECT_ENABLE); 686 + slot_cmd |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 687 + PCI_EXP_SLTCTL_PDCE); 688 + cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 689 + PCI_EXP_SLTCTL_PDCE); 557 690 } 558 691 559 692 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); ··· 563 696 return -1; 564 697 } 565 698 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", 566 - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 699 + __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 567 700 568 701 return retval; 569 702 } ··· 620 753 changed = pcie_mask_bad_dllp(ctrl); 621 754 622 755 slot_cmd = POWER_OFF; 623 - cmd_mask = PWR_CTRL; 756 + cmd_mask = PCI_EXP_SLTCTL_PCC; 624 757 /* 625 758 * If we get MRL or presence detect interrupts now, the isr 626 759 * will notice the sticky power-fault bit too and issue power ··· 629 762 * till the slot is powered on again. 630 763 */ 631 764 if (!pciehp_poll_mode) { 632 - slot_cmd &= ~(PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | 633 - PRSN_DETECT_ENABLE); 634 - cmd_mask |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | 635 - PRSN_DETECT_ENABLE); 765 + slot_cmd &= ~(PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 766 + PCI_EXP_SLTCTL_PDCE); 767 + cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE | 768 + PCI_EXP_SLTCTL_PDCE); 636 769 } 637 770 638 771 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); ··· 642 775 goto out; 643 776 } 644 777 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", 645 - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); 778 + __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 646 779 out: 647 780 if (changed) 648 781 pcie_unmask_bad_dllp(ctrl); ··· 663 796 */ 664 797 intr_loc = 0; 665 798 do { 666 - if (pciehp_readw(ctrl, SLOTSTATUS, &detected)) { 799 + if (pciehp_readw(ctrl, PCI_EXP_SLTSTA, &detected)) { 667 800 ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n", 668 801 __func__); 669 802 return IRQ_NONE; 670 803 } 671 804 672 - detected &= (ATTN_BUTTN_PRESSED | PWR_FAULT_DETECTED | 673 - MRL_SENS_CHANGED | PRSN_DETECT_CHANGED | 674 - CMD_COMPLETED); 805 + detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | 806 + PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | 807 + PCI_EXP_SLTSTA_CC); 675 808 intr_loc |= detected; 676 809 if (!intr_loc) 677 810 return IRQ_NONE; 678 - if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) { 811 + if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, detected)) { 679 812 ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n", 680 813 __func__); 681 814 return IRQ_NONE; ··· 685 818 ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc); 686 819 687 820 /* Check Command Complete Interrupt Pending */ 688 - if (intr_loc & CMD_COMPLETED) { 821 + if (intr_loc & PCI_EXP_SLTSTA_CC) { 689 822 ctrl->cmd_busy = 0; 690 823 smp_mb(); 691 824 wake_up(&ctrl->queue); 692 825 } 693 826 694 - if (!(intr_loc & ~CMD_COMPLETED)) 827 + if (!(intr_loc & ~PCI_EXP_SLTSTA_CC)) 695 828 return IRQ_HANDLED; 696 829 697 830 p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); 698 831 699 832 /* Check MRL Sensor Changed */ 700 - if (intr_loc & MRL_SENS_CHANGED) 833 + if (intr_loc & PCI_EXP_SLTSTA_MRLSC) 701 834 pciehp_handle_switch_change(p_slot); 702 835 703 836 /* Check Attention Button Pressed */ 704 - if (intr_loc & ATTN_BUTTN_PRESSED) 837 + if (intr_loc & PCI_EXP_SLTSTA_ABP) 705 838 pciehp_handle_attention_button(p_slot); 706 839 707 840 /* Check Presence Detect Changed */ 708 - if (intr_loc & PRSN_DETECT_CHANGED) 841 + if (intr_loc & PCI_EXP_SLTSTA_PDC) 709 842 pciehp_handle_presence_change(p_slot); 710 843 711 844 /* Check Power Fault Detected */ 712 - if (intr_loc & PWR_FAULT_DETECTED) 845 + if (intr_loc & PCI_EXP_SLTSTA_PFD) 713 846 pciehp_handle_power_fault(p_slot); 714 847 715 848 return IRQ_HANDLED; ··· 722 855 u32 lnk_cap; 723 856 int retval = 0; 724 857 725 - retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); 858 + retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap); 726 859 if (retval) { 727 860 ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); 728 861 return retval; ··· 751 884 u32 lnk_cap; 752 885 int retval = 0; 753 886 754 - retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); 887 + retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap); 755 888 if (retval) { 756 889 ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); 757 890 return retval; 758 891 } 759 892 760 - switch ((lnk_cap & 0x03F0) >> 4){ 893 + switch ((lnk_cap & PCI_EXP_LNKSTA_NLW) >> 4){ 761 894 case 0: 762 895 lnk_wdth = PCIE_LNK_WIDTH_RESRV; 763 896 break; ··· 800 933 int retval = 0; 801 934 u16 lnk_status; 802 935 803 - retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); 936 + retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); 804 937 if (retval) { 805 938 ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", 806 939 __func__); 807 940 return retval; 808 941 } 809 942 810 - switch (lnk_status & 0x0F) { 943 + switch (lnk_status & PCI_EXP_LNKSTA_CLS) { 811 944 case 1: 812 945 lnk_speed = PCIE_2PT5GB; 813 946 break; ··· 830 963 int retval = 0; 831 964 u16 lnk_status; 832 965 833 - retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); 966 + retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); 834 967 if (retval) { 835 968 ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", 836 969 __func__); 837 970 return retval; 838 971 } 839 972 840 - switch ((lnk_status & 0x03F0) >> 4){ 973 + switch ((lnk_status & PCI_EXP_LNKSTA_NLW) >> 4){ 841 974 case 0: 842 975 lnk_wdth = PCIE_LNK_WIDTH_RESRV; 843 976 break; ··· 903 1036 { 904 1037 u16 cmd, mask; 905 1038 906 - cmd = PRSN_DETECT_ENABLE; 1039 + cmd = PCI_EXP_SLTCTL_PDCE; 907 1040 if (ATTN_BUTTN(ctrl)) 908 - cmd |= ATTN_BUTTN_ENABLE; 1041 + cmd |= PCI_EXP_SLTCTL_ABPE; 909 1042 if (POWER_CTRL(ctrl)) 910 - cmd |= PWR_FAULT_DETECT_ENABLE; 1043 + cmd |= PCI_EXP_SLTCTL_PFDE; 911 1044 if (MRL_SENS(ctrl)) 912 - cmd |= MRL_DETECT_ENABLE; 1045 + cmd |= PCI_EXP_SLTCTL_MRLSCE; 913 1046 if (!pciehp_poll_mode) 914 - cmd |= HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; 1047 + cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE; 915 1048 916 - mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | 917 - PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; 1049 + mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | 1050 + PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | 1051 + PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE); 918 1052 919 1053 if (pcie_write_cmd(ctrl, cmd, mask)) { 920 1054 ctrl_err(ctrl, "Cannot enable software notification\n"); ··· 927 1059 static void pcie_disable_notification(struct controller *ctrl) 928 1060 { 929 1061 u16 mask; 930 - mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | 931 - PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; 1062 + mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | 1063 + PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | 1064 + PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE); 932 1065 if (pcie_write_cmd(ctrl, 0, mask)) 933 1066 ctrl_warn(ctrl, "Cannot disable software notification\n"); 934 1067 } ··· 1026 1157 EMI(ctrl) ? "yes" : "no"); 1027 1158 ctrl_info(ctrl, " Command Completed : %3s\n", 1028 1159 NO_CMD_CMPL(ctrl) ? "no" : "yes"); 1029 - pciehp_readw(ctrl, SLOTSTATUS, &reg16); 1160 + pciehp_readw(ctrl, PCI_EXP_SLTSTA, &reg16); 1030 1161 ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16); 1031 - pciehp_readw(ctrl, SLOTCTRL, &reg16); 1162 + pciehp_readw(ctrl, PCI_EXP_SLTCTL, &reg16); 1032 1163 ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16); 1033 1164 } 1034 1165 ··· 1052 1183 ctrl_err(ctrl, "Cannot find PCI Express capability\n"); 1053 1184 goto abort_ctrl; 1054 1185 } 1055 - if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) { 1186 + if (pciehp_readl(ctrl, PCI_EXP_SLTCAP, &slot_cap)) { 1056 1187 ctrl_err(ctrl, "Cannot read SLOTCAP register\n"); 1057 1188 goto abort_ctrl; 1058 1189 } ··· 1077 1208 ctrl->no_cmd_complete = 1; 1078 1209 1079 1210 /* Check if Data Link Layer Link Active Reporting is implemented */ 1080 - if (pciehp_readl(ctrl, LNKCAP, &link_cap)) { 1211 + if (pciehp_readl(ctrl, PCI_EXP_LNKCAP, &link_cap)) { 1081 1212 ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); 1082 1213 goto abort_ctrl; 1083 1214 } 1084 - if (link_cap & LINK_ACTIVE_REPORTING) { 1215 + if (link_cap & PCI_EXP_LNKCAP_DLLLARC) { 1085 1216 ctrl_dbg(ctrl, "Link Active Reporting supported\n"); 1086 1217 ctrl->link_active_reporting = 1; 1087 1218 } 1088 1219 1089 1220 /* Clear all remaining event bits in Slot Status register */ 1090 - if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) 1221 + if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f)) 1091 1222 goto abort_ctrl; 1092 1223 1093 1224 /* Disable sotfware notification */
+1 -1
drivers/pci/irq.c
··· 15 15 16 16 dev_printk(KERN_ERR, &pdev->dev, 17 17 "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n", 18 - parent->dev.bus_id, parent->vendor, parent->device); 18 + dev_name(&parent->dev), parent->vendor, parent->device); 19 19 dev_printk(KERN_ERR, &pdev->dev, "%s\n", reason); 20 20 dev_printk(KERN_ERR, &pdev->dev, "Please report to linux-kernel@vger.kernel.org\n"); 21 21 WARN_ON(1);
+12 -21
drivers/pci/msi.c
··· 776 776 pci_msi_enable = 0; 777 777 } 778 778 779 + /** 780 + * pci_msi_enabled - is MSI enabled? 781 + * 782 + * Returns true if MSI has not been disabled by the command-line option 783 + * pci=nomsi. 784 + **/ 785 + int pci_msi_enabled(void) 786 + { 787 + return pci_msi_enable; 788 + } 789 + EXPORT_SYMBOL(pci_msi_enabled); 790 + 779 791 void pci_msi_init_pci_dev(struct pci_dev *dev) 780 792 { 781 793 INIT_LIST_HEAD(&dev->msi_list); 782 - } 783 - 784 - #ifdef CONFIG_ACPI 785 - #include <linux/acpi.h> 786 - #include <linux/pci-acpi.h> 787 - static void __devinit msi_acpi_init(void) 788 - { 789 - if (acpi_pci_disabled) 790 - return; 791 - pci_osc_support_set(OSC_MSI_SUPPORT); 792 - pcie_osc_support_set(OSC_MSI_SUPPORT); 793 - } 794 - #else 795 - static inline void msi_acpi_init(void) { } 796 - #endif /* CONFIG_ACPI */ 797 - 798 - void __devinit msi_init(void) 799 - { 800 - if (!pci_msi_enable) 801 - return; 802 - msi_acpi_init(); 803 794 }
+38 -44
drivers/pci/pci-acpi.c
··· 24 24 acpi_handle handle; 25 25 u32 support_set; 26 26 u32 control_set; 27 + u32 control_query; 28 + int is_queried; 27 29 struct list_head sibiling; 28 30 }; 29 31 static LIST_HEAD(acpi_osc_data_list); 30 32 31 33 struct acpi_osc_args { 32 34 u32 capbuf[3]; 33 - u32 ctrl_result; 34 35 }; 35 36 36 37 static DEFINE_MUTEX(pci_acpi_lock); ··· 57 56 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66}; 58 57 59 58 static acpi_status acpi_run_osc(acpi_handle handle, 60 - struct acpi_osc_args *osc_args) 59 + struct acpi_osc_args *osc_args, u32 *retval) 61 60 { 62 61 acpi_status status; 63 62 struct acpi_object_list input; ··· 113 112 goto out_kfree; 114 113 } 115 114 out_success: 116 - osc_args->ctrl_result = 117 - *((u32 *)(out_obj->buffer.pointer + 8)); 115 + *retval = *((u32 *)(out_obj->buffer.pointer + 8)); 118 116 status = AE_OK; 119 117 120 118 out_kfree: ··· 121 121 return status; 122 122 } 123 123 124 - static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data, 125 - u32 *result) 124 + static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data) 126 125 { 127 126 acpi_status status; 128 - u32 support_set; 127 + u32 support_set, result; 129 128 struct acpi_osc_args osc_args; 130 129 131 130 /* do _OSC query for all possible controls */ ··· 133 134 osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set; 134 135 osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS; 135 136 136 - status = acpi_run_osc(osc_data->handle, &osc_args); 137 + status = acpi_run_osc(osc_data->handle, &osc_args, &result); 137 138 if (ACPI_SUCCESS(status)) { 138 139 osc_data->support_set = support_set; 139 - *result = osc_args.ctrl_result; 140 + osc_data->control_query = result; 141 + osc_data->is_queried = 1; 140 142 } 141 143 142 144 return status; 143 145 } 144 146 145 - static acpi_status acpi_query_osc(acpi_handle handle, 146 - u32 level, void *context, void **retval) 147 + /* 148 + * pci_acpi_osc_support: Invoke _OSC indicating support for the given feature 149 + * @flags: Bitmask of flags to support 150 + * 151 + * See the ACPI spec for the definition of the flags 152 + */ 153 + int pci_acpi_osc_support(acpi_handle handle, u32 flags) 147 154 { 148 155 acpi_status status; 149 - struct acpi_osc_data *osc_data; 150 - u32 flags = (unsigned long)context, dummy; 151 156 acpi_handle tmp; 157 + struct acpi_osc_data *osc_data; 158 + int rc = 0; 152 159 153 160 status = acpi_get_handle(handle, "_OSC", &tmp); 154 161 if (ACPI_FAILURE(status)) 155 - return AE_OK; 162 + return -ENOTTY; 156 163 157 164 mutex_lock(&pci_acpi_lock); 158 165 osc_data = acpi_get_osc_data(handle); 159 166 if (!osc_data) { 160 167 printk(KERN_ERR "acpi osc data array is full\n"); 168 + rc = -ENOMEM; 161 169 goto out; 162 170 } 163 171 164 - __acpi_query_osc(flags, osc_data, &dummy); 172 + __acpi_query_osc(flags, osc_data); 165 173 out: 166 174 mutex_unlock(&pci_acpi_lock); 167 - return AE_OK; 168 - } 169 - 170 - /** 171 - * __pci_osc_support_set - register OS support to Firmware 172 - * @flags: OS support bits 173 - * @hid: hardware ID 174 - * 175 - * Update OS support fields and doing a _OSC Query to obtain an update 176 - * from Firmware on supported control bits. 177 - **/ 178 - acpi_status __pci_osc_support_set(u32 flags, const char *hid) 179 - { 180 - if (!(flags & OSC_SUPPORT_MASKS)) 181 - return AE_TYPE; 182 - 183 - acpi_get_devices(hid, acpi_query_osc, 184 - (void *)(unsigned long)flags, NULL); 185 - return AE_OK; 175 + return rc; 186 176 } 187 177 188 178 /** ··· 184 196 acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) 185 197 { 186 198 acpi_status status; 187 - u32 ctrlset, control_set, result; 199 + u32 control_req, control_set, result; 188 200 acpi_handle tmp; 189 201 struct acpi_osc_data *osc_data; 190 202 struct acpi_osc_args osc_args; ··· 201 213 goto out; 202 214 } 203 215 204 - ctrlset = (flags & OSC_CONTROL_MASKS); 205 - if (!ctrlset) { 216 + control_req = (flags & OSC_CONTROL_MASKS); 217 + if (!control_req) { 206 218 status = AE_TYPE; 207 219 goto out; 208 220 } 209 221 210 - status = __acpi_query_osc(osc_data->support_set, osc_data, &result); 211 - if (ACPI_FAILURE(status)) 222 + /* No need to evaluate _OSC if the control was already granted. */ 223 + if ((osc_data->control_set & control_req) == control_req) 212 224 goto out; 213 225 214 - if ((result & ctrlset) != ctrlset) { 226 + if (!osc_data->is_queried) { 227 + status = __acpi_query_osc(osc_data->support_set, osc_data); 228 + if (ACPI_FAILURE(status)) 229 + goto out; 230 + } 231 + 232 + if ((osc_data->control_query & control_req) != control_req) { 215 233 status = AE_SUPPORT; 216 234 goto out; 217 235 } 218 236 219 - control_set = osc_data->control_set | ctrlset; 237 + control_set = osc_data->control_set | control_req; 220 238 osc_args.capbuf[OSC_QUERY_TYPE] = 0; 221 239 osc_args.capbuf[OSC_SUPPORT_TYPE] = osc_data->support_set; 222 240 osc_args.capbuf[OSC_CONTROL_TYPE] = control_set; 223 - status = acpi_run_osc(handle, &osc_args); 241 + status = acpi_run_osc(handle, &osc_args, &result); 224 242 if (ACPI_SUCCESS(status)) 225 - osc_data->control_set = control_set; 243 + osc_data->control_set = result; 226 244 out: 227 245 mutex_unlock(&pci_acpi_lock); 228 246 return status; ··· 369 375 * The string should be the same as root bridge's name 370 376 * Please look at 'pci_scan_bus_parented' 371 377 */ 372 - num = sscanf(dev->bus_id, "pci%04x:%02x", &seg, &bus); 378 + num = sscanf(dev_name(dev), "pci%04x:%02x", &seg, &bus); 373 379 if (num != 2) 374 380 return -ENODEV; 375 381 *handle = acpi_get_pci_rootbridge_handle(seg, bus);
+286 -196
drivers/pci/pci-driver.c
··· 16 16 #include <linux/string.h> 17 17 #include <linux/slab.h> 18 18 #include <linux/sched.h> 19 + #include <linux/cpu.h> 19 20 #include "pci.h" 20 21 21 22 /* ··· 49 48 subdevice=PCI_ANY_ID, class=0, class_mask=0; 50 49 unsigned long driver_data=0; 51 50 int fields=0; 52 - int retval; 51 + int retval=0; 53 52 54 53 fields = sscanf(buf, "%x %x %x %x %x %x %lx", 55 54 &vendor, &device, &subvendor, &subdevice, ··· 59 58 60 59 /* Only accept driver_data values that match an existing id_table 61 60 entry */ 62 - retval = -EINVAL; 63 - while (ids->vendor || ids->subvendor || ids->class_mask) { 64 - if (driver_data == ids->driver_data) { 65 - retval = 0; 66 - break; 61 + if (ids) { 62 + retval = -EINVAL; 63 + while (ids->vendor || ids->subvendor || ids->class_mask) { 64 + if (driver_data == ids->driver_data) { 65 + retval = 0; 66 + break; 67 + } 68 + ids++; 67 69 } 68 - ids++; 70 + if (retval) /* No match */ 71 + return retval; 69 72 } 70 - if (retval) /* No match */ 71 - return retval; 72 73 73 74 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 74 75 if (!dynid) ··· 186 183 return pci_match_id(drv->id_table, dev); 187 184 } 188 185 186 + struct drv_dev_and_id { 187 + struct pci_driver *drv; 188 + struct pci_dev *dev; 189 + const struct pci_device_id *id; 190 + }; 191 + 192 + static long local_pci_probe(void *_ddi) 193 + { 194 + struct drv_dev_and_id *ddi = _ddi; 195 + 196 + return ddi->drv->probe(ddi->dev, ddi->id); 197 + } 198 + 189 199 static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, 190 200 const struct pci_device_id *id) 191 201 { 192 - int error; 193 - #ifdef CONFIG_NUMA 194 - /* Execute driver initialization on node where the 195 - device's bus is attached to. This way the driver likely 196 - allocates its local memory on the right node without 197 - any need to change it. */ 198 - struct mempolicy *oldpol; 199 - cpumask_t oldmask = current->cpus_allowed; 200 - int node = dev_to_node(&dev->dev); 202 + int error, node; 203 + struct drv_dev_and_id ddi = { drv, dev, id }; 201 204 205 + /* Execute driver initialization on node where the device's 206 + bus is attached to. This way the driver likely allocates 207 + its local memory on the right node without any need to 208 + change it. */ 209 + node = dev_to_node(&dev->dev); 202 210 if (node >= 0) { 211 + int cpu; 203 212 node_to_cpumask_ptr(nodecpumask, node); 204 - set_cpus_allowed_ptr(current, nodecpumask); 205 - } 206 - /* And set default memory allocation policy */ 207 - oldpol = current->mempolicy; 208 - current->mempolicy = NULL; /* fall back to system default policy */ 209 - #endif 210 - error = drv->probe(dev, id); 211 - #ifdef CONFIG_NUMA 212 - set_cpus_allowed_ptr(current, &oldmask); 213 - current->mempolicy = oldpol; 214 - #endif 213 + 214 + get_online_cpus(); 215 + cpu = cpumask_any_and(nodecpumask, cpu_online_mask); 216 + if (cpu < nr_cpu_ids) 217 + error = work_on_cpu(cpu, local_pci_probe, &ddi); 218 + else 219 + error = local_pci_probe(&ddi); 220 + put_online_cpus(); 221 + } else 222 + error = local_pci_probe(&ddi); 215 223 return error; 216 224 } 217 225 ··· 314 300 315 301 #ifdef CONFIG_PM_SLEEP 316 302 317 - static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev) 318 - { 319 - struct pci_driver *drv = pci_dev->driver; 320 - 321 - return drv && (drv->suspend || drv->suspend_late || drv->resume 322 - || drv->resume_early); 323 - } 324 - 325 303 /* 326 304 * Default "suspend" method for devices that have no driver provided suspend, 327 - * or not even a driver at all. 305 + * or not even a driver at all (second part). 328 306 */ 329 - static void pci_default_pm_suspend(struct pci_dev *pci_dev) 307 + static void pci_pm_set_unknown_state(struct pci_dev *pci_dev) 330 308 { 331 - pci_save_state(pci_dev); 332 309 /* 333 310 * mark its power state as "unknown", since we don't know if 334 311 * e.g. the BIOS will change its device state when we suspend. ··· 330 325 331 326 /* 332 327 * Default "resume" method for devices that have no driver provided resume, 333 - * or not even a driver at all (first part). 334 - */ 335 - static void pci_default_pm_resume_early(struct pci_dev *pci_dev) 336 - { 337 - /* restore the PCI config space */ 338 - pci_restore_state(pci_dev); 339 - } 340 - 341 - /* 342 - * Default "resume" method for devices that have no driver provided resume, 343 328 * or not even a driver at all (second part). 344 329 */ 345 - static int pci_default_pm_resume_late(struct pci_dev *pci_dev) 330 + static int pci_pm_reenable_device(struct pci_dev *pci_dev) 346 331 { 347 332 int retval; 348 333 ··· 358 363 i = drv->suspend(pci_dev, state); 359 364 suspend_report_result(drv->suspend, i); 360 365 } else { 361 - pci_default_pm_suspend(pci_dev); 366 + pci_save_state(pci_dev); 367 + /* 368 + * This is for compatibility with existing code with legacy PM 369 + * support. 370 + */ 371 + pci_pm_set_unknown_state(pci_dev); 362 372 } 373 + 374 + pci_fixup_device(pci_fixup_suspend, pci_dev); 375 + 363 376 return i; 364 377 } 365 378 ··· 384 381 return i; 385 382 } 386 383 387 - static int pci_legacy_resume(struct device *dev) 388 - { 389 - int error; 390 - struct pci_dev * pci_dev = to_pci_dev(dev); 391 - struct pci_driver * drv = pci_dev->driver; 392 - 393 - if (drv && drv->resume) { 394 - error = drv->resume(pci_dev); 395 - } else { 396 - pci_default_pm_resume_early(pci_dev); 397 - error = pci_default_pm_resume_late(pci_dev); 398 - } 399 - return error; 400 - } 401 - 402 384 static int pci_legacy_resume_early(struct device *dev) 403 385 { 404 386 int error = 0; 405 387 struct pci_dev * pci_dev = to_pci_dev(dev); 406 388 struct pci_driver * drv = pci_dev->driver; 407 389 390 + pci_fixup_device(pci_fixup_resume_early, pci_dev); 391 + 408 392 if (drv && drv->resume_early) 409 393 error = drv->resume_early(pci_dev); 410 394 return error; 411 395 } 396 + 397 + static int pci_legacy_resume(struct device *dev) 398 + { 399 + int error; 400 + struct pci_dev * pci_dev = to_pci_dev(dev); 401 + struct pci_driver * drv = pci_dev->driver; 402 + 403 + pci_fixup_device(pci_fixup_resume, pci_dev); 404 + 405 + if (drv && drv->resume) { 406 + error = drv->resume(pci_dev); 407 + } else { 408 + /* restore the PCI config space */ 409 + pci_restore_state(pci_dev); 410 + error = pci_pm_reenable_device(pci_dev); 411 + } 412 + return error; 413 + } 414 + 415 + /* Auxiliary functions used by the new power management framework */ 416 + 417 + static int pci_restore_standard_config(struct pci_dev *pci_dev) 418 + { 419 + struct pci_dev *parent = pci_dev->bus->self; 420 + int error = 0; 421 + 422 + /* Check if the device's bus is operational */ 423 + if (!parent || parent->current_state == PCI_D0) { 424 + pci_restore_state(pci_dev); 425 + pci_update_current_state(pci_dev, PCI_D0); 426 + } else { 427 + dev_warn(&pci_dev->dev, "unable to restore config, " 428 + "bridge %s in low power state D%d\n", pci_name(parent), 429 + parent->current_state); 430 + pci_dev->current_state = PCI_UNKNOWN; 431 + error = -EAGAIN; 432 + } 433 + 434 + return error; 435 + } 436 + 437 + static bool pci_is_bridge(struct pci_dev *pci_dev) 438 + { 439 + return !!(pci_dev->subordinate); 440 + } 441 + 442 + static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) 443 + { 444 + if (pci_restore_standard_config(pci_dev)) 445 + pci_fixup_device(pci_fixup_resume_early, pci_dev); 446 + } 447 + 448 + static int pci_pm_default_resume(struct pci_dev *pci_dev) 449 + { 450 + /* 451 + * pci_restore_standard_config() should have been called once already, 452 + * but it would have failed if the device's parent bridge had not been 453 + * in power state D0 at that time. Check it and try again if necessary. 454 + */ 455 + if (pci_dev->current_state == PCI_UNKNOWN) { 456 + int error = pci_restore_standard_config(pci_dev); 457 + if (error) 458 + return error; 459 + } 460 + 461 + pci_fixup_device(pci_fixup_resume, pci_dev); 462 + 463 + if (!pci_is_bridge(pci_dev)) 464 + pci_enable_wake(pci_dev, PCI_D0, false); 465 + 466 + return pci_pm_reenable_device(pci_dev); 467 + } 468 + 469 + static void pci_pm_default_suspend_generic(struct pci_dev *pci_dev) 470 + { 471 + /* If device is enabled at this point, disable it */ 472 + pci_disable_enabled_device(pci_dev); 473 + /* 474 + * Save state with interrupts enabled, because in principle the bus the 475 + * device is on may be put into a low power state after this code runs. 476 + */ 477 + pci_save_state(pci_dev); 478 + } 479 + 480 + static void pci_pm_default_suspend(struct pci_dev *pci_dev) 481 + { 482 + pci_pm_default_suspend_generic(pci_dev); 483 + 484 + if (!pci_is_bridge(pci_dev)) 485 + pci_prepare_to_sleep(pci_dev); 486 + 487 + pci_fixup_device(pci_fixup_suspend, pci_dev); 488 + } 489 + 490 + static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev) 491 + { 492 + struct pci_driver *drv = pci_dev->driver; 493 + bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume 494 + || drv->resume_early); 495 + 496 + /* 497 + * Legacy PM support is used by default, so warn if the new framework is 498 + * supported as well. Drivers are supposed to support either the 499 + * former, or the latter, but not both at the same time. 500 + */ 501 + WARN_ON(ret && drv->driver.pm); 502 + 503 + return ret; 504 + } 505 + 506 + /* New power management framework */ 412 507 413 508 static int pci_pm_prepare(struct device *dev) 414 509 { ··· 535 434 struct device_driver *drv = dev->driver; 536 435 int error = 0; 537 436 538 - if (drv && drv->pm) { 539 - if (drv->pm->suspend) { 540 - error = drv->pm->suspend(dev); 541 - suspend_report_result(drv->pm->suspend, error); 542 - } 543 - } else if (pci_has_legacy_pm_support(pci_dev)) { 544 - error = pci_legacy_suspend(dev, PMSG_SUSPEND); 437 + if (pci_has_legacy_pm_support(pci_dev)) 438 + return pci_legacy_suspend(dev, PMSG_SUSPEND); 439 + 440 + if (drv && drv->pm && drv->pm->suspend) { 441 + error = drv->pm->suspend(dev); 442 + suspend_report_result(drv->pm->suspend, error); 545 443 } 546 - pci_fixup_device(pci_fixup_suspend, pci_dev); 444 + 445 + if (!error) 446 + pci_pm_default_suspend(pci_dev); 547 447 548 448 return error; 549 449 } ··· 555 453 struct device_driver *drv = dev->driver; 556 454 int error = 0; 557 455 558 - if (drv && drv->pm) { 559 - if (drv->pm->suspend_noirq) { 560 - error = drv->pm->suspend_noirq(dev); 561 - suspend_report_result(drv->pm->suspend_noirq, error); 562 - } 563 - } else if (pci_has_legacy_pm_support(pci_dev)) { 564 - error = pci_legacy_suspend_late(dev, PMSG_SUSPEND); 565 - } else { 566 - pci_default_pm_suspend(pci_dev); 456 + if (pci_has_legacy_pm_support(pci_dev)) 457 + return pci_legacy_suspend_late(dev, PMSG_SUSPEND); 458 + 459 + if (drv && drv->pm && drv->pm->suspend_noirq) { 460 + error = drv->pm->suspend_noirq(dev); 461 + suspend_report_result(drv->pm->suspend_noirq, error); 567 462 } 568 463 569 - return error; 570 - } 571 - 572 - static int pci_pm_resume(struct device *dev) 573 - { 574 - struct pci_dev *pci_dev = to_pci_dev(dev); 575 - struct device_driver *drv = dev->driver; 576 - int error = 0; 577 - 578 - pci_fixup_device(pci_fixup_resume, pci_dev); 579 - 580 - if (drv && drv->pm) { 581 - if (drv->pm->resume) 582 - error = drv->pm->resume(dev); 583 - } else if (pci_has_legacy_pm_support(pci_dev)) { 584 - error = pci_legacy_resume(dev); 585 - } else { 586 - error = pci_default_pm_resume_late(pci_dev); 587 - } 464 + if (!error) 465 + pci_pm_set_unknown_state(pci_dev); 588 466 589 467 return error; 590 468 } ··· 575 493 struct device_driver *drv = dev->driver; 576 494 int error = 0; 577 495 578 - pci_fixup_device(pci_fixup_resume_early, to_pci_dev(dev)); 496 + if (pci_has_legacy_pm_support(pci_dev)) 497 + return pci_legacy_resume_early(dev); 579 498 580 - if (drv && drv->pm) { 581 - if (drv->pm->resume_noirq) 582 - error = drv->pm->resume_noirq(dev); 583 - } else if (pci_has_legacy_pm_support(pci_dev)) { 584 - error = pci_legacy_resume_early(dev); 585 - } else { 586 - pci_default_pm_resume_early(pci_dev); 587 - } 499 + pci_pm_default_resume_noirq(pci_dev); 500 + 501 + if (drv && drv->pm && drv->pm->resume_noirq) 502 + error = drv->pm->resume_noirq(dev); 503 + 504 + return error; 505 + } 506 + 507 + static int pci_pm_resume(struct device *dev) 508 + { 509 + struct pci_dev *pci_dev = to_pci_dev(dev); 510 + struct device_driver *drv = dev->driver; 511 + int error = 0; 512 + 513 + if (pci_has_legacy_pm_support(pci_dev)) 514 + return pci_legacy_resume(dev); 515 + 516 + error = pci_pm_default_resume(pci_dev); 517 + 518 + if (!error && drv && drv->pm && drv->pm->resume) 519 + error = drv->pm->resume(dev); 588 520 589 521 return error; 590 522 } ··· 620 524 struct device_driver *drv = dev->driver; 621 525 int error = 0; 622 526 623 - if (drv && drv->pm) { 624 - if (drv->pm->freeze) { 625 - error = drv->pm->freeze(dev); 626 - suspend_report_result(drv->pm->freeze, error); 627 - } 628 - } else if (pci_has_legacy_pm_support(pci_dev)) { 629 - error = pci_legacy_suspend(dev, PMSG_FREEZE); 630 - pci_fixup_device(pci_fixup_suspend, pci_dev); 527 + if (pci_has_legacy_pm_support(pci_dev)) 528 + return pci_legacy_suspend(dev, PMSG_FREEZE); 529 + 530 + if (drv && drv->pm && drv->pm->freeze) { 531 + error = drv->pm->freeze(dev); 532 + suspend_report_result(drv->pm->freeze, error); 631 533 } 534 + 535 + if (!error) 536 + pci_pm_default_suspend_generic(pci_dev); 632 537 633 538 return error; 634 539 } ··· 640 543 struct device_driver *drv = dev->driver; 641 544 int error = 0; 642 545 643 - if (drv && drv->pm) { 644 - if (drv->pm->freeze_noirq) { 645 - error = drv->pm->freeze_noirq(dev); 646 - suspend_report_result(drv->pm->freeze_noirq, error); 647 - } 648 - } else if (pci_has_legacy_pm_support(pci_dev)) { 649 - error = pci_legacy_suspend_late(dev, PMSG_FREEZE); 650 - } else { 651 - pci_default_pm_suspend(pci_dev); 546 + if (pci_has_legacy_pm_support(pci_dev)) 547 + return pci_legacy_suspend_late(dev, PMSG_FREEZE); 548 + 549 + if (drv && drv->pm && drv->pm->freeze_noirq) { 550 + error = drv->pm->freeze_noirq(dev); 551 + suspend_report_result(drv->pm->freeze_noirq, error); 652 552 } 653 553 654 - return error; 655 - } 656 - 657 - static int pci_pm_thaw(struct device *dev) 658 - { 659 - struct pci_dev *pci_dev = to_pci_dev(dev); 660 - struct device_driver *drv = dev->driver; 661 - int error = 0; 662 - 663 - if (drv && drv->pm) { 664 - if (drv->pm->thaw) 665 - error = drv->pm->thaw(dev); 666 - } else if (pci_has_legacy_pm_support(pci_dev)) { 667 - pci_fixup_device(pci_fixup_resume, pci_dev); 668 - error = pci_legacy_resume(dev); 669 - } 554 + if (!error) 555 + pci_pm_set_unknown_state(pci_dev); 670 556 671 557 return error; 672 558 } ··· 660 580 struct device_driver *drv = dev->driver; 661 581 int error = 0; 662 582 663 - if (drv && drv->pm) { 664 - if (drv->pm->thaw_noirq) 665 - error = drv->pm->thaw_noirq(dev); 666 - } else if (pci_has_legacy_pm_support(pci_dev)) { 667 - pci_fixup_device(pci_fixup_resume_early, to_pci_dev(dev)); 668 - error = pci_legacy_resume_early(dev); 669 - } 583 + if (pci_has_legacy_pm_support(pci_dev)) 584 + return pci_legacy_resume_early(dev); 585 + 586 + pci_update_current_state(pci_dev, PCI_D0); 587 + 588 + if (drv && drv->pm && drv->pm->thaw_noirq) 589 + error = drv->pm->thaw_noirq(dev); 590 + 591 + return error; 592 + } 593 + 594 + static int pci_pm_thaw(struct device *dev) 595 + { 596 + struct pci_dev *pci_dev = to_pci_dev(dev); 597 + struct device_driver *drv = dev->driver; 598 + int error = 0; 599 + 600 + if (pci_has_legacy_pm_support(pci_dev)) 601 + return pci_legacy_resume(dev); 602 + 603 + pci_pm_reenable_device(pci_dev); 604 + 605 + if (drv && drv->pm && drv->pm->thaw) 606 + error = drv->pm->thaw(dev); 670 607 671 608 return error; 672 609 } ··· 694 597 struct device_driver *drv = dev->driver; 695 598 int error = 0; 696 599 697 - pci_fixup_device(pci_fixup_suspend, pci_dev); 600 + if (pci_has_legacy_pm_support(pci_dev)) 601 + return pci_legacy_suspend(dev, PMSG_HIBERNATE); 698 602 699 - if (drv && drv->pm) { 700 - if (drv->pm->poweroff) { 701 - error = drv->pm->poweroff(dev); 702 - suspend_report_result(drv->pm->poweroff, error); 703 - } 704 - } else if (pci_has_legacy_pm_support(pci_dev)) { 705 - error = pci_legacy_suspend(dev, PMSG_HIBERNATE); 603 + if (drv && drv->pm && drv->pm->poweroff) { 604 + error = drv->pm->poweroff(dev); 605 + suspend_report_result(drv->pm->poweroff, error); 706 606 } 607 + 608 + if (!error) 609 + pci_pm_default_suspend(pci_dev); 707 610 708 611 return error; 709 612 } ··· 713 616 struct device_driver *drv = dev->driver; 714 617 int error = 0; 715 618 716 - if (drv && drv->pm) { 717 - if (drv->pm->poweroff_noirq) { 718 - error = drv->pm->poweroff_noirq(dev); 719 - suspend_report_result(drv->pm->poweroff_noirq, error); 720 - } 721 - } else if (pci_has_legacy_pm_support(to_pci_dev(dev))) { 722 - error = pci_legacy_suspend_late(dev, PMSG_HIBERNATE); 619 + if (pci_has_legacy_pm_support(to_pci_dev(dev))) 620 + return pci_legacy_suspend_late(dev, PMSG_HIBERNATE); 621 + 622 + if (drv && drv->pm && drv->pm->poweroff_noirq) { 623 + error = drv->pm->poweroff_noirq(dev); 624 + suspend_report_result(drv->pm->poweroff_noirq, error); 723 625 } 724 - 725 - return error; 726 - } 727 - 728 - static int pci_pm_restore(struct device *dev) 729 - { 730 - struct pci_dev *pci_dev = to_pci_dev(dev); 731 - struct device_driver *drv = dev->driver; 732 - int error = 0; 733 - 734 - if (drv && drv->pm) { 735 - if (drv->pm->restore) 736 - error = drv->pm->restore(dev); 737 - } else if (pci_has_legacy_pm_support(pci_dev)) { 738 - error = pci_legacy_resume(dev); 739 - } else { 740 - error = pci_default_pm_resume_late(pci_dev); 741 - } 742 - pci_fixup_device(pci_fixup_resume, pci_dev); 743 626 744 627 return error; 745 628 } ··· 730 653 struct device_driver *drv = dev->driver; 731 654 int error = 0; 732 655 733 - pci_fixup_device(pci_fixup_resume, pci_dev); 656 + if (pci_has_legacy_pm_support(pci_dev)) 657 + return pci_legacy_resume_early(dev); 734 658 735 - if (drv && drv->pm) { 736 - if (drv->pm->restore_noirq) 737 - error = drv->pm->restore_noirq(dev); 738 - } else if (pci_has_legacy_pm_support(pci_dev)) { 739 - error = pci_legacy_resume_early(dev); 740 - } else { 741 - pci_default_pm_resume_early(pci_dev); 742 - } 743 - pci_fixup_device(pci_fixup_resume_early, pci_dev); 659 + pci_pm_default_resume_noirq(pci_dev); 660 + 661 + if (drv && drv->pm && drv->pm->restore_noirq) 662 + error = drv->pm->restore_noirq(dev); 663 + 664 + return error; 665 + } 666 + 667 + static int pci_pm_restore(struct device *dev) 668 + { 669 + struct pci_dev *pci_dev = to_pci_dev(dev); 670 + struct device_driver *drv = dev->driver; 671 + int error = 0; 672 + 673 + if (pci_has_legacy_pm_support(pci_dev)) 674 + return pci_legacy_resume(dev); 675 + 676 + error = pci_pm_default_resume(pci_dev); 677 + 678 + if (!error && drv && drv->pm && drv->pm->restore) 679 + error = drv->pm->restore(dev); 744 680 745 681 return error; 746 682 }
+47
drivers/pci/pci-stub.c
··· 1 + /* pci-stub - simple stub driver to reserve a pci device 2 + * 3 + * Copyright (C) 2008 Red Hat, Inc. 4 + * Author: 5 + * Chris Wright 6 + * 7 + * This work is licensed under the terms of the GNU GPL, version 2. 8 + * 9 + * Usage is simple, allocate a new id to the stub driver and bind the 10 + * device to it. For example: 11 + * 12 + * # echo "8086 10f5" > /sys/bus/pci/drivers/pci-stub/new_id 13 + * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind 14 + * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/pci-stub/bind 15 + * # ls -l /sys/bus/pci/devices/0000:00:19.0/driver 16 + * .../0000:00:19.0/driver -> ../../../bus/pci/drivers/pci-stub 17 + */ 18 + 19 + #include <linux/module.h> 20 + #include <linux/pci.h> 21 + 22 + static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id) 23 + { 24 + return 0; 25 + } 26 + 27 + static struct pci_driver stub_driver = { 28 + .name = "pci-stub", 29 + .id_table = NULL, /* only dynamic id's */ 30 + .probe = pci_stub_probe, 31 + }; 32 + 33 + static int __init pci_stub_init(void) 34 + { 35 + return pci_register_driver(&stub_driver); 36 + } 37 + 38 + static void __exit pci_stub_exit(void) 39 + { 40 + pci_unregister_driver(&stub_driver); 41 + } 42 + 43 + module_init(pci_stub_init); 44 + module_exit(pci_stub_exit); 45 + 46 + MODULE_LICENSE("GPL"); 47 + MODULE_AUTHOR("Chris Wright <chrisw@sous-sol.org>");
+53 -62
drivers/pci/pci-sysfs.c
··· 58 58 const char *buf, size_t count) 59 59 { 60 60 struct pci_dev *pdev = to_pci_dev(dev); 61 - ssize_t consumed = -EINVAL; 61 + unsigned long val; 62 62 63 - if ((count > 0) && (*buf == '0' || *buf == '1')) { 64 - pdev->broken_parity_status = *buf == '1' ? 1 : 0; 65 - consumed = count; 66 - } 67 - return consumed; 63 + if (strict_strtoul(buf, 0, &val) < 0) 64 + return -EINVAL; 65 + 66 + pdev->broken_parity_status = !!val; 67 + 68 + return count; 68 69 } 69 70 70 71 static ssize_t local_cpus_show(struct device *dev, ··· 102 101 struct pci_dev * pci_dev = to_pci_dev(dev); 103 102 char * str = buf; 104 103 int i; 105 - int max = 7; 104 + int max; 106 105 resource_size_t start, end; 107 106 108 107 if (pci_dev->subordinate) 109 108 max = DEVICE_COUNT_RESOURCE; 109 + else 110 + max = PCI_BRIDGE_RESOURCES; 110 111 111 112 for (i = 0; i < max; i++) { 112 113 struct resource *res = &pci_dev->resource[i]; ··· 136 133 struct device_attribute *attr, const char *buf, 137 134 size_t count) 138 135 { 139 - ssize_t result = -EINVAL; 140 136 struct pci_dev *pdev = to_pci_dev(dev); 137 + unsigned long val; 138 + ssize_t result = strict_strtoul(buf, 0, &val); 139 + 140 + if (result < 0) 141 + return result; 141 142 142 143 /* this can crash the machine when done on the "wrong" device */ 143 144 if (!capable(CAP_SYS_ADMIN)) 144 - return count; 145 + return -EPERM; 145 146 146 - if (*buf == '0') { 147 + if (!val) { 147 148 if (atomic_read(&pdev->enable_cnt) != 0) 148 149 pci_disable_device(pdev); 149 150 else 150 151 result = -EIO; 151 - } else if (*buf == '1') 152 + } else 152 153 result = pci_enable_device(pdev); 153 154 154 155 return result < 0 ? result : count; ··· 192 185 const char *buf, size_t count) 193 186 { 194 187 struct pci_dev *pdev = to_pci_dev(dev); 188 + unsigned long val; 189 + 190 + if (strict_strtoul(buf, 0, &val) < 0) 191 + return -EINVAL; 195 192 196 193 /* bad things may happen if the no_msi flag is changed 197 194 * while some drivers are loaded */ 198 195 if (!capable(CAP_SYS_ADMIN)) 199 - return count; 196 + return -EPERM; 200 197 198 + /* Maybe pci devices without subordinate busses shouldn't even have this 199 + * attribute in the first place? */ 201 200 if (!pdev->subordinate) 202 201 return count; 203 202 204 - if (*buf == '0') { 205 - pdev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; 206 - dev_warn(&pdev->dev, "forced subordinate bus to not support MSI," 207 - " bad things could happen.\n"); 208 - } 203 + /* Is the flag going to change, or keep the value it already had? */ 204 + if (!(pdev->subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) ^ 205 + !!val) { 206 + pdev->subordinate->bus_flags ^= PCI_BUS_FLAGS_NO_MSI; 209 207 210 - if (*buf == '1') { 211 - pdev->subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI; 212 - dev_warn(&pdev->dev, "forced subordinate bus to support MSI," 213 - " bad things could happen.\n"); 208 + dev_warn(&pdev->dev, "forced subordinate bus to%s support MSI," 209 + " bad things could happen\n", val ? "" : " not"); 214 210 } 215 211 216 212 return count; ··· 371 361 } 372 362 373 363 static ssize_t 374 - pci_read_vpd(struct kobject *kobj, struct bin_attribute *bin_attr, 375 - char *buf, loff_t off, size_t count) 376 - { 377 - struct pci_dev *dev = 378 - to_pci_dev(container_of(kobj, struct device, kobj)); 379 - int end; 380 - int ret; 381 - 382 - if (off > bin_attr->size) 383 - count = 0; 384 - else if (count > bin_attr->size - off) 385 - count = bin_attr->size - off; 386 - end = off + count; 387 - 388 - while (off < end) { 389 - ret = dev->vpd->ops->read(dev, off, end - off, buf); 390 - if (ret < 0) 391 - return ret; 392 - buf += ret; 393 - off += ret; 394 - } 395 - 396 - return count; 397 - } 398 - 399 - static ssize_t 400 - pci_write_vpd(struct kobject *kobj, struct bin_attribute *bin_attr, 364 + read_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr, 401 365 char *buf, loff_t off, size_t count) 402 366 { 403 367 struct pci_dev *dev = 404 368 to_pci_dev(container_of(kobj, struct device, kobj)); 405 - int end; 406 - int ret; 407 369 408 370 if (off > bin_attr->size) 409 371 count = 0; 410 372 else if (count > bin_attr->size - off) 411 373 count = bin_attr->size - off; 412 - end = off + count; 413 374 414 - while (off < end) { 415 - ret = dev->vpd->ops->write(dev, off, end - off, buf); 416 - if (ret < 0) 417 - return ret; 418 - buf += ret; 419 - off += ret; 420 - } 375 + return pci_read_vpd(dev, off, count, buf); 376 + } 421 377 422 - return count; 378 + static ssize_t 379 + write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr, 380 + char *buf, loff_t off, size_t count) 381 + { 382 + struct pci_dev *dev = 383 + to_pci_dev(container_of(kobj, struct device, kobj)); 384 + 385 + if (off > bin_attr->size) 386 + count = 0; 387 + else if (count > bin_attr->size - off) 388 + count = bin_attr->size - off; 389 + 390 + return pci_write_vpd(dev, off, count, buf); 423 391 } 424 392 425 393 #ifdef HAVE_PCI_LEGACY ··· 557 569 558 570 #ifdef HAVE_PCI_MMAP 559 571 560 - static int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma) 572 + int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma) 561 573 { 562 574 unsigned long nr, start, size; 563 575 ··· 607 619 pci_resource_to_user(pdev, i, res, &start, &end); 608 620 vma->vm_pgoff += start >> PAGE_SHIFT; 609 621 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; 622 + 623 + if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(start)) 624 + return -EINVAL; 610 625 611 626 return pci_mmap_page_range(pdev, vma, mmap_type, write_combine); 612 627 } ··· 823 832 attr->size = dev->vpd->len; 824 833 attr->attr.name = "vpd"; 825 834 attr->attr.mode = S_IRUSR | S_IWUSR; 826 - attr->read = pci_read_vpd; 827 - attr->write = pci_write_vpd; 835 + attr->read = read_vpd_attr; 836 + attr->write = write_vpd_attr; 828 837 retval = sysfs_create_bin_file(&dev->dev.kobj, attr); 829 838 if (retval) { 830 839 kfree(dev->vpd->attr);
+406 -107
drivers/pci/pci.c
··· 56 56 } 57 57 EXPORT_SYMBOL_GPL(pci_bus_max_busnr); 58 58 59 + #ifdef CONFIG_HAS_IOMEM 60 + void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) 61 + { 62 + /* 63 + * Make sure the BAR is actually a memory resource, not an IO resource 64 + */ 65 + if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 66 + WARN_ON(1); 67 + return NULL; 68 + } 69 + return ioremap_nocache(pci_resource_start(pdev, bar), 70 + pci_resource_len(pdev, bar)); 71 + } 72 + EXPORT_SYMBOL_GPL(pci_ioremap_bar); 73 + #endif 74 + 59 75 #if 0 60 76 /** 61 77 * pci_max_busnr - returns maximum PCI bus number ··· 376 360 static void 377 361 pci_restore_bars(struct pci_dev *dev) 378 362 { 379 - int i, numres; 363 + int i; 380 364 381 - switch (dev->hdr_type) { 382 - case PCI_HEADER_TYPE_NORMAL: 383 - numres = 6; 384 - break; 385 - case PCI_HEADER_TYPE_BRIDGE: 386 - numres = 2; 387 - break; 388 - case PCI_HEADER_TYPE_CARDBUS: 389 - numres = 1; 390 - break; 391 - default: 392 - /* Should never get here, but just in case... */ 393 - return; 394 - } 395 - 396 - for (i = 0; i < numres; i ++) 397 - pci_update_resource(dev, &dev->resource[i], i); 365 + for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) 366 + pci_update_resource(dev, i); 398 367 } 399 368 400 369 static struct pci_platform_pm_ops *pci_platform_pm; ··· 525 524 * pci_update_current_state - Read PCI power state of given device from its 526 525 * PCI PM registers and cache it 527 526 * @dev: PCI device to handle. 527 + * @state: State to cache in case the device doesn't have the PM capability 528 528 */ 529 - static void pci_update_current_state(struct pci_dev *dev) 529 + void pci_update_current_state(struct pci_dev *dev, pci_power_t state) 530 530 { 531 531 if (dev->pm_cap) { 532 532 u16 pmcsr; 533 533 534 534 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 535 535 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 536 + } else { 537 + dev->current_state = state; 536 538 } 537 539 } 538 540 ··· 578 574 */ 579 575 int ret = platform_pci_set_power_state(dev, PCI_D0); 580 576 if (!ret) 581 - pci_update_current_state(dev); 577 + pci_update_current_state(dev, PCI_D0); 582 578 } 583 579 /* This device is quirked not to be put into D3, so 584 580 don't put it in D3 */ ··· 591 587 /* Allow the platform to finalize the transition */ 592 588 int ret = platform_pci_set_power_state(dev, state); 593 589 if (!ret) { 594 - pci_update_current_state(dev); 590 + pci_update_current_state(dev, state); 595 591 error = 0; 596 592 } 597 593 } ··· 644 640 int pos, i = 0; 645 641 struct pci_cap_saved_state *save_state; 646 642 u16 *cap; 647 - int found = 0; 648 643 649 644 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 650 645 if (pos <= 0) 651 646 return 0; 652 647 653 648 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 654 - if (!save_state) 655 - save_state = kzalloc(sizeof(*save_state) + sizeof(u16) * 4, GFP_KERNEL); 656 - else 657 - found = 1; 658 649 if (!save_state) { 659 - dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n"); 650 + dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__); 660 651 return -ENOMEM; 661 652 } 662 653 cap = (u16 *)&save_state->data[0]; ··· 660 661 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); 661 662 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); 662 663 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); 663 - save_state->cap_nr = PCI_CAP_ID_EXP; 664 - if (!found) 665 - pci_add_saved_cap(dev, save_state); 664 + 666 665 return 0; 667 666 } 668 667 ··· 685 688 686 689 static int pci_save_pcix_state(struct pci_dev *dev) 687 690 { 688 - int pos, i = 0; 691 + int pos; 689 692 struct pci_cap_saved_state *save_state; 690 - u16 *cap; 691 - int found = 0; 692 693 693 694 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 694 695 if (pos <= 0) 695 696 return 0; 696 697 697 698 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 698 - if (!save_state) 699 - save_state = kzalloc(sizeof(*save_state) + sizeof(u16), GFP_KERNEL); 700 - else 701 - found = 1; 702 699 if (!save_state) { 703 - dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n"); 700 + dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__); 704 701 return -ENOMEM; 705 702 } 706 - cap = (u16 *)&save_state->data[0]; 707 703 708 - pci_read_config_word(dev, pos + PCI_X_CMD, &cap[i++]); 709 - save_state->cap_nr = PCI_CAP_ID_PCIX; 710 - if (!found) 711 - pci_add_saved_cap(dev, save_state); 704 + pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data); 705 + 712 706 return 0; 713 707 } 714 708 ··· 970 982 */ 971 983 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} 972 984 985 + static void do_pci_disable_device(struct pci_dev *dev) 986 + { 987 + u16 pci_command; 988 + 989 + pci_read_config_word(dev, PCI_COMMAND, &pci_command); 990 + if (pci_command & PCI_COMMAND_MASTER) { 991 + pci_command &= ~PCI_COMMAND_MASTER; 992 + pci_write_config_word(dev, PCI_COMMAND, pci_command); 993 + } 994 + 995 + pcibios_disable_device(dev); 996 + } 997 + 998 + /** 999 + * pci_disable_enabled_device - Disable device without updating enable_cnt 1000 + * @dev: PCI device to disable 1001 + * 1002 + * NOTE: This function is a backend of PCI power management routines and is 1003 + * not supposed to be called drivers. 1004 + */ 1005 + void pci_disable_enabled_device(struct pci_dev *dev) 1006 + { 1007 + if (atomic_read(&dev->enable_cnt)) 1008 + do_pci_disable_device(dev); 1009 + } 1010 + 973 1011 /** 974 1012 * pci_disable_device - Disable PCI device after use 975 1013 * @dev: PCI device to be disabled ··· 1010 996 pci_disable_device(struct pci_dev *dev) 1011 997 { 1012 998 struct pci_devres *dr; 1013 - u16 pci_command; 1014 999 1015 1000 dr = find_pci_dr(dev); 1016 1001 if (dr) ··· 1018 1005 if (atomic_sub_return(1, &dev->enable_cnt) != 0) 1019 1006 return; 1020 1007 1021 - pci_read_config_word(dev, PCI_COMMAND, &pci_command); 1022 - if (pci_command & PCI_COMMAND_MASTER) { 1023 - pci_command &= ~PCI_COMMAND_MASTER; 1024 - pci_write_config_word(dev, PCI_COMMAND, pci_command); 1025 - } 1026 - dev->is_busmaster = 0; 1008 + do_pci_disable_device(dev); 1027 1009 1028 - pcibios_disable_device(dev); 1010 + dev->is_busmaster = 0; 1029 1011 } 1030 1012 1031 1013 /** ··· 1115 1107 int error = 0; 1116 1108 bool pme_done = false; 1117 1109 1118 - if (!device_may_wakeup(&dev->dev)) 1110 + if (enable && !device_may_wakeup(&dev->dev)) 1119 1111 return -EINVAL; 1120 1112 1121 1113 /* ··· 1260 1252 /* find PCI PM capability in list */ 1261 1253 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 1262 1254 if (!pm) 1263 - return; 1255 + goto Exit; 1256 + 1264 1257 /* Check device's ability to generate PME# */ 1265 1258 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); 1266 1259 1267 1260 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 1268 1261 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", 1269 1262 pmc & PCI_PM_CAP_VER_MASK); 1270 - return; 1263 + goto Exit; 1271 1264 } 1272 1265 1273 1266 dev->pm_cap = pm; ··· 1307 1298 } else { 1308 1299 dev->pme_support = 0; 1309 1300 } 1301 + 1302 + Exit: 1303 + pci_update_current_state(dev, PCI_D0); 1304 + } 1305 + 1306 + /** 1307 + * platform_pci_wakeup_init - init platform wakeup if present 1308 + * @dev: PCI device 1309 + * 1310 + * Some devices don't have PCI PM caps but can still generate wakeup 1311 + * events through platform methods (like ACPI events). If @dev supports 1312 + * platform wakeup events, set the device flag to indicate as much. This 1313 + * may be redundant if the device also supports PCI PM caps, but double 1314 + * initialization should be safe in that case. 1315 + */ 1316 + void platform_pci_wakeup_init(struct pci_dev *dev) 1317 + { 1318 + if (!platform_pci_can_wakeup(dev)) 1319 + return; 1320 + 1321 + device_set_wakeup_capable(&dev->dev, true); 1322 + device_set_wakeup_enable(&dev->dev, false); 1323 + platform_pci_sleep_wake(dev, false); 1324 + } 1325 + 1326 + /** 1327 + * pci_add_save_buffer - allocate buffer for saving given capability registers 1328 + * @dev: the PCI device 1329 + * @cap: the capability to allocate the buffer for 1330 + * @size: requested size of the buffer 1331 + */ 1332 + static int pci_add_cap_save_buffer( 1333 + struct pci_dev *dev, char cap, unsigned int size) 1334 + { 1335 + int pos; 1336 + struct pci_cap_saved_state *save_state; 1337 + 1338 + pos = pci_find_capability(dev, cap); 1339 + if (pos <= 0) 1340 + return 0; 1341 + 1342 + save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL); 1343 + if (!save_state) 1344 + return -ENOMEM; 1345 + 1346 + save_state->cap_nr = cap; 1347 + pci_add_saved_cap(dev, save_state); 1348 + 1349 + return 0; 1350 + } 1351 + 1352 + /** 1353 + * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities 1354 + * @dev: the PCI device 1355 + */ 1356 + void pci_allocate_cap_save_buffers(struct pci_dev *dev) 1357 + { 1358 + int error; 1359 + 1360 + error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 4 * sizeof(u16)); 1361 + if (error) 1362 + dev_err(&dev->dev, 1363 + "unable to preallocate PCI Express save buffer\n"); 1364 + 1365 + error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); 1366 + if (error) 1367 + dev_err(&dev->dev, 1368 + "unable to preallocate PCI-X save buffer\n"); 1310 1369 } 1311 1370 1312 1371 /** ··· 1414 1337 bridge->ari_enabled = 1; 1415 1338 } 1416 1339 1340 + /** 1341 + * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge 1342 + * @dev: the PCI device 1343 + * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD) 1344 + * 1345 + * Perform INTx swizzling for a device behind one level of bridge. This is 1346 + * required by section 9.1 of the PCI-to-PCI bridge specification for devices 1347 + * behind bridges on add-in cards. 1348 + */ 1349 + u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin) 1350 + { 1351 + return (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1; 1352 + } 1353 + 1417 1354 int 1418 1355 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 1419 1356 { ··· 1436 1345 pin = dev->pin; 1437 1346 if (!pin) 1438 1347 return -1; 1439 - pin--; 1348 + 1440 1349 while (dev->bus->self) { 1441 - pin = (pin + PCI_SLOT(dev->devfn)) % 4; 1350 + pin = pci_swizzle_interrupt_pin(dev, pin); 1442 1351 dev = dev->bus->self; 1443 1352 } 1444 1353 *bridge = dev; 1445 1354 return pin; 1355 + } 1356 + 1357 + /** 1358 + * pci_common_swizzle - swizzle INTx all the way to root bridge 1359 + * @dev: the PCI device 1360 + * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD) 1361 + * 1362 + * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI 1363 + * bridges all the way up to a PCI root bus. 1364 + */ 1365 + u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) 1366 + { 1367 + u8 pin = *pinp; 1368 + 1369 + while (dev->bus->self) { 1370 + pin = pci_swizzle_interrupt_pin(dev, pin); 1371 + dev = dev->bus->self; 1372 + } 1373 + *pinp = pin; 1374 + return PCI_SLOT(dev->devfn); 1446 1375 } 1447 1376 1448 1377 /** ··· 1506 1395 * Returns 0 on success, or %EBUSY on error. A warning 1507 1396 * message is also printed on failure. 1508 1397 */ 1509 - int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 1398 + static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, 1399 + int exclusive) 1510 1400 { 1511 1401 struct pci_devres *dr; 1512 1402 ··· 1520 1408 goto err_out; 1521 1409 } 1522 1410 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 1523 - if (!request_mem_region(pci_resource_start(pdev, bar), 1524 - pci_resource_len(pdev, bar), res_name)) 1411 + if (!__request_mem_region(pci_resource_start(pdev, bar), 1412 + pci_resource_len(pdev, bar), res_name, 1413 + exclusive)) 1525 1414 goto err_out; 1526 1415 } 1527 1416 ··· 1541 1428 } 1542 1429 1543 1430 /** 1431 + * pci_request_region - Reserved PCI I/O and memory resource 1432 + * @pdev: PCI device whose resources are to be reserved 1433 + * @bar: BAR to be reserved 1434 + * @res_name: Name to be associated with resource. 1435 + * 1436 + * Mark the PCI region associated with PCI device @pdev BR @bar as 1437 + * being reserved by owner @res_name. Do not access any 1438 + * address inside the PCI regions unless this call returns 1439 + * successfully. 1440 + * 1441 + * Returns 0 on success, or %EBUSY on error. A warning 1442 + * message is also printed on failure. 1443 + */ 1444 + int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 1445 + { 1446 + return __pci_request_region(pdev, bar, res_name, 0); 1447 + } 1448 + 1449 + /** 1450 + * pci_request_region_exclusive - Reserved PCI I/O and memory resource 1451 + * @pdev: PCI device whose resources are to be reserved 1452 + * @bar: BAR to be reserved 1453 + * @res_name: Name to be associated with resource. 1454 + * 1455 + * Mark the PCI region associated with PCI device @pdev BR @bar as 1456 + * being reserved by owner @res_name. Do not access any 1457 + * address inside the PCI regions unless this call returns 1458 + * successfully. 1459 + * 1460 + * Returns 0 on success, or %EBUSY on error. A warning 1461 + * message is also printed on failure. 1462 + * 1463 + * The key difference that _exclusive makes it that userspace is 1464 + * explicitly not allowed to map the resource via /dev/mem or 1465 + * sysfs. 1466 + */ 1467 + int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name) 1468 + { 1469 + return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); 1470 + } 1471 + /** 1544 1472 * pci_release_selected_regions - Release selected PCI I/O and memory resources 1545 1473 * @pdev: PCI device whose resources were previously reserved 1546 1474 * @bars: Bitmask of BARs to be released ··· 1598 1444 pci_release_region(pdev, i); 1599 1445 } 1600 1446 1601 - /** 1602 - * pci_request_selected_regions - Reserve selected PCI I/O and memory resources 1603 - * @pdev: PCI device whose resources are to be reserved 1604 - * @bars: Bitmask of BARs to be requested 1605 - * @res_name: Name to be associated with resource 1606 - */ 1607 - int pci_request_selected_regions(struct pci_dev *pdev, int bars, 1608 - const char *res_name) 1447 + int __pci_request_selected_regions(struct pci_dev *pdev, int bars, 1448 + const char *res_name, int excl) 1609 1449 { 1610 1450 int i; 1611 1451 1612 1452 for (i = 0; i < 6; i++) 1613 1453 if (bars & (1 << i)) 1614 - if(pci_request_region(pdev, i, res_name)) 1454 + if (__pci_request_region(pdev, i, res_name, excl)) 1615 1455 goto err_out; 1616 1456 return 0; 1617 1457 ··· 1615 1467 pci_release_region(pdev, i); 1616 1468 1617 1469 return -EBUSY; 1470 + } 1471 + 1472 + 1473 + /** 1474 + * pci_request_selected_regions - Reserve selected PCI I/O and memory resources 1475 + * @pdev: PCI device whose resources are to be reserved 1476 + * @bars: Bitmask of BARs to be requested 1477 + * @res_name: Name to be associated with resource 1478 + */ 1479 + int pci_request_selected_regions(struct pci_dev *pdev, int bars, 1480 + const char *res_name) 1481 + { 1482 + return __pci_request_selected_regions(pdev, bars, res_name, 0); 1483 + } 1484 + 1485 + int pci_request_selected_regions_exclusive(struct pci_dev *pdev, 1486 + int bars, const char *res_name) 1487 + { 1488 + return __pci_request_selected_regions(pdev, bars, res_name, 1489 + IORESOURCE_EXCLUSIVE); 1618 1490 } 1619 1491 1620 1492 /** ··· 1670 1502 } 1671 1503 1672 1504 /** 1505 + * pci_request_regions_exclusive - Reserved PCI I/O and memory resources 1506 + * @pdev: PCI device whose resources are to be reserved 1507 + * @res_name: Name to be associated with resource. 1508 + * 1509 + * Mark all PCI regions associated with PCI device @pdev as 1510 + * being reserved by owner @res_name. Do not access any 1511 + * address inside the PCI regions unless this call returns 1512 + * successfully. 1513 + * 1514 + * pci_request_regions_exclusive() will mark the region so that 1515 + * /dev/mem and the sysfs MMIO access will not be allowed. 1516 + * 1517 + * Returns 0 on success, or %EBUSY on error. A warning 1518 + * message is also printed on failure. 1519 + */ 1520 + int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) 1521 + { 1522 + return pci_request_selected_regions_exclusive(pdev, 1523 + ((1 << 6) - 1), res_name); 1524 + } 1525 + 1526 + static void __pci_set_master(struct pci_dev *dev, bool enable) 1527 + { 1528 + u16 old_cmd, cmd; 1529 + 1530 + pci_read_config_word(dev, PCI_COMMAND, &old_cmd); 1531 + if (enable) 1532 + cmd = old_cmd | PCI_COMMAND_MASTER; 1533 + else 1534 + cmd = old_cmd & ~PCI_COMMAND_MASTER; 1535 + if (cmd != old_cmd) { 1536 + dev_dbg(&dev->dev, "%s bus mastering\n", 1537 + enable ? "enabling" : "disabling"); 1538 + pci_write_config_word(dev, PCI_COMMAND, cmd); 1539 + } 1540 + dev->is_busmaster = enable; 1541 + } 1542 + 1543 + /** 1673 1544 * pci_set_master - enables bus-mastering for device dev 1674 1545 * @dev: the PCI device to enable 1675 1546 * 1676 1547 * Enables bus-mastering on the device and calls pcibios_set_master() 1677 1548 * to do the needed arch specific settings. 1678 1549 */ 1679 - void 1680 - pci_set_master(struct pci_dev *dev) 1550 + void pci_set_master(struct pci_dev *dev) 1681 1551 { 1682 - u16 cmd; 1683 - 1684 - pci_read_config_word(dev, PCI_COMMAND, &cmd); 1685 - if (! (cmd & PCI_COMMAND_MASTER)) { 1686 - dev_dbg(&dev->dev, "enabling bus mastering\n"); 1687 - cmd |= PCI_COMMAND_MASTER; 1688 - pci_write_config_word(dev, PCI_COMMAND, cmd); 1689 - } 1690 - dev->is_busmaster = 1; 1552 + __pci_set_master(dev, true); 1691 1553 pcibios_set_master(dev); 1554 + } 1555 + 1556 + /** 1557 + * pci_clear_master - disables bus-mastering for device dev 1558 + * @dev: the PCI device to disable 1559 + */ 1560 + void pci_clear_master(struct pci_dev *dev) 1561 + { 1562 + __pci_set_master(dev, false); 1692 1563 } 1693 1564 1694 1565 #ifdef PCI_DISABLE_MWI ··· 1958 1751 EXPORT_SYMBOL(pci_set_dma_seg_boundary); 1959 1752 #endif 1960 1753 1961 - /** 1962 - * pci_execute_reset_function() - Reset a PCI device function 1963 - * @dev: Device function to reset 1964 - * 1965 - * Some devices allow an individual function to be reset without affecting 1966 - * other functions in the same device. The PCI device must be responsive 1967 - * to PCI config space in order to use this function. 1968 - * 1969 - * The device function is presumed to be unused when this function is called. 1970 - * Resetting the device will make the contents of PCI configuration space 1971 - * random, so any caller of this must be prepared to reinitialise the 1972 - * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 1973 - * etc. 1974 - * 1975 - * Returns 0 if the device function was successfully reset or -ENOTTY if the 1976 - * device doesn't support resetting a single function. 1977 - */ 1978 - int pci_execute_reset_function(struct pci_dev *dev) 1754 + static int __pcie_flr(struct pci_dev *dev, int probe) 1979 1755 { 1980 1756 u16 status; 1981 1757 u32 cap; ··· 1969 1779 pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap); 1970 1780 if (!(cap & PCI_EXP_DEVCAP_FLR)) 1971 1781 return -ENOTTY; 1782 + 1783 + if (probe) 1784 + return 0; 1972 1785 1973 1786 pci_block_user_cfg_access(dev); 1974 1787 ··· 1995 1802 pci_unblock_user_cfg_access(dev); 1996 1803 return 0; 1997 1804 } 1805 + 1806 + static int __pci_af_flr(struct pci_dev *dev, int probe) 1807 + { 1808 + int cappos = pci_find_capability(dev, PCI_CAP_ID_AF); 1809 + u8 status; 1810 + u8 cap; 1811 + 1812 + if (!cappos) 1813 + return -ENOTTY; 1814 + pci_read_config_byte(dev, cappos + PCI_AF_CAP, &cap); 1815 + if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 1816 + return -ENOTTY; 1817 + 1818 + if (probe) 1819 + return 0; 1820 + 1821 + pci_block_user_cfg_access(dev); 1822 + 1823 + /* Wait for Transaction Pending bit clean */ 1824 + msleep(100); 1825 + pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status); 1826 + if (status & PCI_AF_STATUS_TP) { 1827 + dev_info(&dev->dev, "Busy after 100ms while trying to" 1828 + " reset; sleeping for 1 second\n"); 1829 + ssleep(1); 1830 + pci_read_config_byte(dev, 1831 + cappos + PCI_AF_STATUS, &status); 1832 + if (status & PCI_AF_STATUS_TP) 1833 + dev_info(&dev->dev, "Still busy after 1s; " 1834 + "proceeding with reset anyway\n"); 1835 + } 1836 + pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); 1837 + mdelay(100); 1838 + 1839 + pci_unblock_user_cfg_access(dev); 1840 + return 0; 1841 + } 1842 + 1843 + static int __pci_reset_function(struct pci_dev *pdev, int probe) 1844 + { 1845 + int res; 1846 + 1847 + res = __pcie_flr(pdev, probe); 1848 + if (res != -ENOTTY) 1849 + return res; 1850 + 1851 + res = __pci_af_flr(pdev, probe); 1852 + if (res != -ENOTTY) 1853 + return res; 1854 + 1855 + return res; 1856 + } 1857 + 1858 + /** 1859 + * pci_execute_reset_function() - Reset a PCI device function 1860 + * @dev: Device function to reset 1861 + * 1862 + * Some devices allow an individual function to be reset without affecting 1863 + * other functions in the same device. The PCI device must be responsive 1864 + * to PCI config space in order to use this function. 1865 + * 1866 + * The device function is presumed to be unused when this function is called. 1867 + * Resetting the device will make the contents of PCI configuration space 1868 + * random, so any caller of this must be prepared to reinitialise the 1869 + * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 1870 + * etc. 1871 + * 1872 + * Returns 0 if the device function was successfully reset or -ENOTTY if the 1873 + * device doesn't support resetting a single function. 1874 + */ 1875 + int pci_execute_reset_function(struct pci_dev *dev) 1876 + { 1877 + return __pci_reset_function(dev, 0); 1878 + } 1998 1879 EXPORT_SYMBOL_GPL(pci_execute_reset_function); 1999 1880 2000 1881 /** ··· 2089 1822 */ 2090 1823 int pci_reset_function(struct pci_dev *dev) 2091 1824 { 2092 - u32 cap; 2093 - int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP); 2094 - int r; 1825 + int r = __pci_reset_function(dev, 1); 2095 1826 2096 - if (!exppos) 2097 - return -ENOTTY; 2098 - pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap); 2099 - if (!(cap & PCI_EXP_DEVCAP_FLR)) 2100 - return -ENOTTY; 1827 + if (r < 0) 1828 + return r; 2101 1829 2102 1830 if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0) 2103 1831 disable_irq(dev->irq); ··· 2284 2022 return bars; 2285 2023 } 2286 2024 2025 + /** 2026 + * pci_resource_bar - get position of the BAR associated with a resource 2027 + * @dev: the PCI device 2028 + * @resno: the resource number 2029 + * @type: the BAR type to be filled in 2030 + * 2031 + * Returns BAR position in config space, or 0 if the BAR is invalid. 2032 + */ 2033 + int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) 2034 + { 2035 + if (resno < PCI_ROM_RESOURCE) { 2036 + *type = pci_bar_unknown; 2037 + return PCI_BASE_ADDRESS_0 + 4 * resno; 2038 + } else if (resno == PCI_ROM_RESOURCE) { 2039 + *type = pci_bar_mem32; 2040 + return dev->rom_base_reg; 2041 + } 2042 + 2043 + dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno); 2044 + return 0; 2045 + } 2046 + 2287 2047 static void __devinit pci_no_domains(void) 2288 2048 { 2289 2049 #ifdef CONFIG_PCI_DOMAINS 2290 2050 pci_domains_supported = 0; 2291 2051 #endif 2052 + } 2053 + 2054 + /** 2055 + * pci_ext_cfg_enabled - can we access extended PCI config space? 2056 + * @dev: The PCI device of the root bridge. 2057 + * 2058 + * Returns 1 if we can access PCI extended config space (offsets 2059 + * greater than 0xff). This is the default implementation. Architecture 2060 + * implementations can override this. 2061 + */ 2062 + int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) 2063 + { 2064 + return 1; 2292 2065 } 2293 2066 2294 2067 static int __devinit pci_init(void) ··· 2333 2036 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2334 2037 pci_fixup_device(pci_fixup_final, dev); 2335 2038 } 2336 - 2337 - msi_init(); 2338 2039 2339 2040 return 0; 2340 2041 } ··· 2378 2083 EXPORT_SYMBOL(pci_bus_find_capability); 2379 2084 EXPORT_SYMBOL(pci_release_regions); 2380 2085 EXPORT_SYMBOL(pci_request_regions); 2086 + EXPORT_SYMBOL(pci_request_regions_exclusive); 2381 2087 EXPORT_SYMBOL(pci_release_region); 2382 2088 EXPORT_SYMBOL(pci_request_region); 2089 + EXPORT_SYMBOL(pci_request_region_exclusive); 2383 2090 EXPORT_SYMBOL(pci_release_selected_regions); 2384 2091 EXPORT_SYMBOL(pci_request_selected_regions); 2092 + EXPORT_SYMBOL(pci_request_selected_regions_exclusive); 2385 2093 EXPORT_SYMBOL(pci_set_master); 2094 + EXPORT_SYMBOL(pci_clear_master); 2386 2095 EXPORT_SYMBOL(pci_set_mwi); 2387 2096 EXPORT_SYMBOL(pci_try_set_mwi); 2388 2097 EXPORT_SYMBOL(pci_clear_mwi);
+26 -8
drivers/pci/pci.h
··· 10 10 extern int pci_create_sysfs_dev_files(struct pci_dev *pdev); 11 11 extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev); 12 12 extern void pci_cleanup_rom(struct pci_dev *dev); 13 + #ifdef HAVE_PCI_MMAP 14 + extern int pci_mmap_fits(struct pci_dev *pdev, int resno, 15 + struct vm_area_struct *vma); 16 + #endif 13 17 14 18 /** 15 19 * Firmware PM callbacks ··· 44 40 }; 45 41 46 42 extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops); 43 + extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state); 44 + extern void pci_disable_enabled_device(struct pci_dev *dev); 47 45 extern void pci_pm_init(struct pci_dev *dev); 46 + extern void platform_pci_wakeup_init(struct pci_dev *dev); 47 + extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); 48 48 49 49 extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); 50 50 extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); ··· 58 50 extern int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val); 59 51 60 52 struct pci_vpd_ops { 61 - int (*read)(struct pci_dev *dev, int pos, int size, char *buf); 62 - int (*write)(struct pci_dev *dev, int pos, int size, const char *buf); 53 + ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 54 + ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 63 55 void (*release)(struct pci_dev *dev); 64 56 }; 65 57 66 58 struct pci_vpd { 67 59 unsigned int len; 68 - struct pci_vpd_ops *ops; 60 + const struct pci_vpd_ops *ops; 69 61 struct bin_attribute *attr; /* descriptor for sysfs VPD entry */ 70 62 }; 71 63 ··· 106 98 #ifdef CONFIG_PCI_MSI 107 99 void pci_no_msi(void); 108 100 extern void pci_msi_init_pci_dev(struct pci_dev *dev); 109 - extern void __devinit msi_init(void); 110 101 #else 111 102 static inline void pci_no_msi(void) { } 112 103 static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } 113 - static inline void msi_init(void) { } 114 104 #endif 115 105 116 106 #ifdef CONFIG_PCIEAER ··· 165 159 }; 166 160 #define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr) 167 161 162 + enum pci_bar_type { 163 + pci_bar_unknown, /* Standard PCI BAR probe */ 164 + pci_bar_io, /* An io port BAR */ 165 + pci_bar_mem32, /* A 32-bit memory BAR */ 166 + pci_bar_mem64, /* A 64-bit memory BAR */ 167 + }; 168 + 169 + extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, 170 + struct resource *res, unsigned int reg); 171 + extern int pci_resource_bar(struct pci_dev *dev, int resno, 172 + enum pci_bar_type *type); 173 + extern int pci_bus_add_child(struct pci_bus *bus); 168 174 extern void pci_enable_ari(struct pci_dev *dev); 169 175 /** 170 176 * pci_ari_enabled - query ARI forwarding status 171 - * @dev: the PCI device 177 + * @bus: the PCI bus 172 178 * 173 179 * Returns 1 if ARI forwarding is enabled, or 0 if not enabled; 174 180 */ 175 - static inline int pci_ari_enabled(struct pci_dev *dev) 181 + static inline int pci_ari_enabled(struct pci_bus *bus) 176 182 { 177 - return dev->ari_enabled; 183 + return bus->self && bus->self->ari_enabled; 178 184 } 179 185 180 186 #endif /* DRIVERS_PCI_H */
-1
drivers/pci/pcie/aer/aerdrv_acpi.c
··· 38 38 39 39 handle = acpi_find_root_bridge_handle(pdev); 40 40 if (handle) { 41 - pcie_osc_support_set(OSC_EXT_PCI_CONFIG_SUPPORT); 42 41 status = pci_osc_control_set(handle, 43 42 OSC_PCI_EXPRESS_AER_CONTROL | 44 43 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+1 -1
drivers/pci/pcie/aer/aerdrv_errprint.c
··· 233 233 234 234 if (info->flags & AER_TLP_HEADER_VALID_FLAG) { 235 235 unsigned char *tlp = (unsigned char *) &info->tlp; 236 - printk("%sTLB Header:\n", loglevel); 236 + printk("%sTLP Header:\n", loglevel); 237 237 printk("%s%02x%02x%02x%02x %02x%02x%02x%02x" 238 238 " %02x%02x%02x%02x %02x%02x%02x%02x\n", 239 239 loglevel,
+126 -43
drivers/pci/pcie/aspm.c
··· 17 17 #include <linux/init.h> 18 18 #include <linux/slab.h> 19 19 #include <linux/jiffies.h> 20 + #include <linux/delay.h> 20 21 #include <linux/pci-aspm.h> 21 22 #include "../pci.h" 22 23 ··· 34 33 struct pcie_link_state { 35 34 struct list_head sibiling; 36 35 struct pci_dev *pdev; 36 + bool downstream_has_switch; 37 + 38 + struct pcie_link_state *parent; 39 + struct list_head children; 40 + struct list_head link; 37 41 38 42 /* ASPM state */ 39 43 unsigned int support_state; ··· 75 69 [POLICY_PERFORMANCE] = "performance", 76 70 [POLICY_POWERSAVE] = "powersave" 77 71 }; 72 + 73 + #define LINK_RETRAIN_TIMEOUT HZ 78 74 79 75 static int policy_to_aspm_state(struct pci_dev *pdev) 80 76 { ··· 133 125 link_state->clk_pm_enabled = !!enable; 134 126 } 135 127 136 - static void pcie_check_clock_pm(struct pci_dev *pdev) 128 + static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist) 137 129 { 138 130 int pos; 139 131 u32 reg32; ··· 157 149 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) 158 150 enabled = 0; 159 151 } 160 - link_state->clk_pm_capable = capable; 161 152 link_state->clk_pm_enabled = enabled; 162 153 link_state->bios_clk_state = enabled; 163 - pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev)); 154 + if (!blacklist) { 155 + link_state->clk_pm_capable = capable; 156 + pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev)); 157 + } else { 158 + link_state->clk_pm_capable = 0; 159 + pcie_set_clock_pm(pdev, 0); 160 + } 161 + } 162 + 163 + static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev) 164 + { 165 + struct pci_dev *child_dev; 166 + 167 + list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 168 + if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) 169 + return true; 170 + } 171 + return false; 164 172 } 165 173 166 174 /* ··· 241 217 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 242 218 243 219 /* Wait for link training end */ 244 - /* break out after waiting for 1 second */ 220 + /* break out after waiting for timeout */ 245 221 start_jiffies = jiffies; 246 - while ((jiffies - start_jiffies) < HZ) { 222 + for (;;) { 247 223 pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16); 248 224 if (!(reg16 & PCI_EXP_LNKSTA_LT)) 249 225 break; 250 - cpu_relax(); 226 + if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) 227 + break; 228 + msleep(1); 251 229 } 252 230 /* training failed -> recover */ 253 - if ((jiffies - start_jiffies) >= HZ) { 231 + if (reg16 & PCI_EXP_LNKSTA_LT) { 254 232 dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure" 255 233 " common clock\n"); 256 234 i = 0; ··· 445 419 { 446 420 struct pci_dev *child_dev; 447 421 448 - /* If no child, disable the link */ 422 + /* If no child, ignore the link */ 449 423 if (list_empty(&pdev->subordinate->devices)) 450 - return 0; 424 + return state; 451 425 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) { 452 426 if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { 453 427 /* ··· 488 462 int valid = 1; 489 463 struct pcie_link_state *link_state = pdev->link_state; 490 464 465 + /* If no child, disable the link */ 466 + if (list_empty(&pdev->subordinate->devices)) 467 + state = 0; 491 468 /* 492 469 * if the downstream component has pci bridge function, don't do ASPM 493 470 * now ··· 522 493 link_state->enabled_state = state; 523 494 } 524 495 496 + static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link) 497 + { 498 + struct pcie_link_state *root_port_link = link; 499 + while (root_port_link->parent) 500 + root_port_link = root_port_link->parent; 501 + return root_port_link; 502 + } 503 + 504 + /* check the whole hierarchy, and configure each link in the hierarchy */ 525 505 static void __pcie_aspm_configure_link_state(struct pci_dev *pdev, 526 506 unsigned int state) 527 507 { 528 508 struct pcie_link_state *link_state = pdev->link_state; 509 + struct pcie_link_state *root_port_link = get_root_port_link(link_state); 510 + struct pcie_link_state *leaf; 529 511 530 - if (link_state->support_state == 0) 531 - return; 532 512 state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; 533 513 534 - /* state 0 means disabling aspm */ 535 - state = pcie_aspm_check_state(pdev, state); 514 + /* check all links who have specific root port link */ 515 + list_for_each_entry(leaf, &link_list, sibiling) { 516 + if (!list_empty(&leaf->children) || 517 + get_root_port_link(leaf) != root_port_link) 518 + continue; 519 + state = pcie_aspm_check_state(leaf->pdev, state); 520 + } 521 + /* check root port link too in case it hasn't children */ 522 + state = pcie_aspm_check_state(root_port_link->pdev, state); 523 + 536 524 if (link_state->enabled_state == state) 537 525 return; 538 - __pcie_aspm_config_link(pdev, state); 526 + 527 + /* 528 + * we must change the hierarchy. See comments in 529 + * __pcie_aspm_config_link for the order 530 + **/ 531 + if (state & PCIE_LINK_STATE_L1) { 532 + list_for_each_entry(leaf, &link_list, sibiling) { 533 + if (get_root_port_link(leaf) == root_port_link) 534 + __pcie_aspm_config_link(leaf->pdev, state); 535 + } 536 + } else { 537 + list_for_each_entry_reverse(leaf, &link_list, sibiling) { 538 + if (get_root_port_link(leaf) == root_port_link) 539 + __pcie_aspm_config_link(leaf->pdev, state); 540 + } 541 + } 539 542 } 540 543 541 544 /* ··· 631 570 unsigned int state; 632 571 struct pcie_link_state *link_state; 633 572 int error = 0; 573 + int blacklist; 634 574 635 575 if (aspm_disabled || !pdev->is_pcie || pdev->link_state) 636 576 return; ··· 642 580 if (list_empty(&pdev->subordinate->devices)) 643 581 goto out; 644 582 645 - if (pcie_aspm_sanity_check(pdev)) 646 - goto out; 583 + blacklist = !!pcie_aspm_sanity_check(pdev); 647 584 648 585 mutex_lock(&aspm_lock); 649 586 650 587 link_state = kzalloc(sizeof(*link_state), GFP_KERNEL); 651 588 if (!link_state) 652 589 goto unlock_out; 590 + 591 + link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev); 592 + INIT_LIST_HEAD(&link_state->children); 593 + INIT_LIST_HEAD(&link_state->link); 594 + if (pdev->bus->self) {/* this is a switch */ 595 + struct pcie_link_state *parent_link_state; 596 + 597 + parent_link_state = pdev->bus->parent->self->link_state; 598 + if (!parent_link_state) { 599 + kfree(link_state); 600 + goto unlock_out; 601 + } 602 + list_add(&link_state->link, &parent_link_state->children); 603 + link_state->parent = parent_link_state; 604 + } 605 + 653 606 pdev->link_state = link_state; 654 607 655 - pcie_aspm_configure_common_clock(pdev); 656 - 657 - pcie_aspm_cap_init(pdev); 658 - 659 - /* config link state to avoid BIOS error */ 660 - state = pcie_aspm_check_state(pdev, policy_to_aspm_state(pdev)); 661 - __pcie_aspm_config_link(pdev, state); 662 - 663 - pcie_check_clock_pm(pdev); 608 + if (!blacklist) { 609 + pcie_aspm_configure_common_clock(pdev); 610 + pcie_aspm_cap_init(pdev); 611 + } else { 612 + link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1; 613 + link_state->bios_aspm_state = 0; 614 + /* Set support state to 0, so we will disable ASPM later */ 615 + link_state->support_state = 0; 616 + } 664 617 665 618 link_state->pdev = pdev; 666 619 list_add(&link_state->sibiling, &link_list); 620 + 621 + if (link_state->downstream_has_switch) { 622 + /* 623 + * If link has switch, delay the link config. The leaf link 624 + * initialization will config the whole hierarchy. but we must 625 + * make sure BIOS doesn't set unsupported link state 626 + **/ 627 + state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state); 628 + __pcie_aspm_config_link(pdev, state); 629 + } else 630 + __pcie_aspm_configure_link_state(pdev, 631 + policy_to_aspm_state(pdev)); 632 + 633 + pcie_check_clock_pm(pdev, blacklist); 667 634 668 635 unlock_out: 669 636 if (error) ··· 726 635 /* All functions are removed, so just disable ASPM for the link */ 727 636 __pcie_aspm_config_one_dev(parent, 0); 728 637 list_del(&link_state->sibiling); 638 + list_del(&link_state->link); 729 639 /* Clock PM is for endpoint device */ 730 640 731 641 free_link_state(parent); ··· 949 857 aspm_disabled = 1; 950 858 } 951 859 952 - #ifdef CONFIG_ACPI 953 - #include <acpi/acpi_bus.h> 954 - #include <linux/pci-acpi.h> 955 - static void pcie_aspm_platform_init(void) 860 + /** 861 + * pcie_aspm_enabled - is PCIe ASPM enabled? 862 + * 863 + * Returns true if ASPM has not been disabled by the command-line option 864 + * pcie_aspm=off. 865 + **/ 866 + int pcie_aspm_enabled(void) 956 867 { 957 - pcie_osc_support_set(OSC_ACTIVE_STATE_PWR_SUPPORT| 958 - OSC_CLOCK_PWR_CAPABILITY_SUPPORT); 868 + return !aspm_disabled; 959 869 } 960 - #else 961 - static inline void pcie_aspm_platform_init(void) { } 962 - #endif 870 + EXPORT_SYMBOL(pcie_aspm_enabled); 963 871 964 - static int __init pcie_aspm_init(void) 965 - { 966 - if (aspm_disabled) 967 - return 0; 968 - pcie_aspm_platform_init(); 969 - return 0; 970 - } 971 - 972 - fs_initcall(pcie_aspm_init);
+4 -28
drivers/pci/pcie/portdrv_bus.c
··· 16 16 #include "portdrv.h" 17 17 18 18 static int pcie_port_bus_match(struct device *dev, struct device_driver *drv); 19 - static int pcie_port_bus_suspend(struct device *dev, pm_message_t state); 20 - static int pcie_port_bus_resume(struct device *dev); 21 19 22 20 struct bus_type pcie_port_bus_type = { 23 21 .name = "pci_express", 24 22 .match = pcie_port_bus_match, 25 - .suspend = pcie_port_bus_suspend, 26 - .resume = pcie_port_bus_resume, 27 23 }; 28 24 EXPORT_SYMBOL_GPL(pcie_port_bus_type); 29 25 ··· 45 49 return 1; 46 50 } 47 51 48 - static int pcie_port_bus_suspend(struct device *dev, pm_message_t state) 52 + int pcie_port_bus_register(void) 49 53 { 50 - struct pcie_device *pciedev; 51 - struct pcie_port_service_driver *driver; 52 - 53 - if (!dev || !dev->driver) 54 - return 0; 55 - 56 - pciedev = to_pcie_device(dev); 57 - driver = to_service_driver(dev->driver); 58 - if (driver && driver->suspend) 59 - driver->suspend(pciedev, state); 60 - return 0; 54 + return bus_register(&pcie_port_bus_type); 61 55 } 62 56 63 - static int pcie_port_bus_resume(struct device *dev) 57 + void pcie_port_bus_unregister(void) 64 58 { 65 - struct pcie_device *pciedev; 66 - struct pcie_port_service_driver *driver; 67 - 68 - if (!dev || !dev->driver) 69 - return 0; 70 - 71 - pciedev = to_pcie_device(dev); 72 - driver = to_service_driver(dev->driver); 73 - if (driver && driver->resume) 74 - driver->resume(pciedev); 75 - return 0; 59 + bus_unregister(&pcie_port_bus_type); 76 60 }
+145 -95
drivers/pci/pcie/portdrv_core.c
··· 19 19 20 20 extern int pcie_mch_quirk; /* MSI-quirk Indicator */ 21 21 22 - static int pcie_port_probe_service(struct device *dev) 23 - { 24 - struct pcie_device *pciedev; 25 - struct pcie_port_service_driver *driver; 26 - int status; 27 - 28 - if (!dev || !dev->driver) 29 - return -ENODEV; 30 - 31 - driver = to_service_driver(dev->driver); 32 - if (!driver || !driver->probe) 33 - return -ENODEV; 34 - 35 - pciedev = to_pcie_device(dev); 36 - status = driver->probe(pciedev, driver->id_table); 37 - if (!status) { 38 - dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", 39 - driver->name); 40 - get_device(dev); 41 - } 42 - return status; 43 - } 44 - 45 - static int pcie_port_remove_service(struct device *dev) 46 - { 47 - struct pcie_device *pciedev; 48 - struct pcie_port_service_driver *driver; 49 - 50 - if (!dev || !dev->driver) 51 - return 0; 52 - 53 - pciedev = to_pcie_device(dev); 54 - driver = to_service_driver(dev->driver); 55 - if (driver && driver->remove) { 56 - dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n", 57 - driver->name); 58 - driver->remove(pciedev); 59 - put_device(dev); 60 - } 61 - return 0; 62 - } 63 - 64 - static void pcie_port_shutdown_service(struct device *dev) {} 65 - 66 - static int pcie_port_suspend_service(struct device *dev, pm_message_t state) 67 - { 68 - struct pcie_device *pciedev; 69 - struct pcie_port_service_driver *driver; 70 - 71 - if (!dev || !dev->driver) 72 - return 0; 73 - 74 - pciedev = to_pcie_device(dev); 75 - driver = to_service_driver(dev->driver); 76 - if (driver && driver->suspend) 77 - driver->suspend(pciedev, state); 78 - return 0; 79 - } 80 - 81 - static int pcie_port_resume_service(struct device *dev) 82 - { 83 - struct pcie_device *pciedev; 84 - struct pcie_port_service_driver *driver; 85 - 86 - if (!dev || !dev->driver) 87 - return 0; 88 - 89 - pciedev = to_pcie_device(dev); 90 - driver = to_service_driver(dev->driver); 91 - 92 - if (driver && driver->resume) 93 - driver->resume(pciedev); 94 - return 0; 95 - } 96 - 97 - /* 98 - * release_pcie_device 99 - * 100 - * Being invoked automatically when device is being removed 101 - * in response to device_unregister(dev) call. 102 - * Release all resources being claimed. 22 + /** 23 + * release_pcie_device - free PCI Express port service device structure 24 + * @dev: Port service device to release 25 + * 26 + * Invoked automatically when device is being removed in response to 27 + * device_unregister(dev). Release all resources being claimed. 103 28 */ 104 29 static void release_pcie_device(struct device *dev) 105 30 { 106 - dev_printk(KERN_DEBUG, dev, "free port service\n"); 107 31 kfree(to_pcie_device(dev)); 108 32 } 109 33 ··· 52 128 } 53 129 return quirk; 54 130 } 55 - 131 + 132 + /** 133 + * assign_interrupt_mode - choose interrupt mode for PCI Express port services 134 + * (INTx, MSI-X, MSI) and set up vectors 135 + * @dev: PCI Express port to handle 136 + * @vectors: Array of interrupt vectors to populate 137 + * @mask: Bitmask of port capabilities returned by get_port_device_capability() 138 + * 139 + * Return value: Interrupt mode associated with the port 140 + */ 56 141 static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) 57 142 { 58 143 int i, pos, nvec, status = -EINVAL; ··· 83 150 if (pos) { 84 151 struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] = 85 152 {{0, 0}, {0, 1}, {0, 2}, {0, 3}}; 86 - dev_info(&dev->dev, "found MSI-X capability\n"); 87 153 status = pci_enable_msix(dev, msix_entries, nvec); 88 154 if (!status) { 89 155 int j = 0; ··· 97 165 if (status) { 98 166 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 99 167 if (pos) { 100 - dev_info(&dev->dev, "found MSI capability\n"); 101 168 status = pci_enable_msi(dev); 102 169 if (!status) { 103 170 interrupt_mode = PCIE_PORT_MSI_MODE; ··· 108 177 return interrupt_mode; 109 178 } 110 179 180 + /** 181 + * get_port_device_capability - discover capabilities of a PCI Express port 182 + * @dev: PCI Express port to examine 183 + * 184 + * The capabilities are read from the port's PCI Express configuration registers 185 + * as described in PCI Express Base Specification 1.0a sections 7.8.2, 7.8.9 and 186 + * 7.9 - 7.11. 187 + * 188 + * Return value: Bitmask of discovered port capabilities 189 + */ 111 190 static int get_port_device_capability(struct pci_dev *dev) 112 191 { 113 192 int services = 0, pos; ··· 145 204 return services; 146 205 } 147 206 207 + /** 208 + * pcie_device_init - initialize PCI Express port service device 209 + * @dev: Port service device to initialize 210 + * @parent: PCI Express port to associate the service device with 211 + * @port_type: Type of the port 212 + * @service_type: Type of service to associate with the service device 213 + * @irq: Interrupt vector to associate with the service device 214 + * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI) 215 + */ 148 216 static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev, 149 217 int port_type, int service_type, int irq, int irq_mode) 150 218 { ··· 174 224 device->driver = NULL; 175 225 device->driver_data = NULL; 176 226 device->release = release_pcie_device; /* callback to free pcie dev */ 177 - snprintf(device->bus_id, sizeof(device->bus_id), "%s:pcie%02x", 227 + dev_set_name(device, "%s:pcie%02x", 178 228 pci_name(parent), get_descriptor_id(port_type, service_type)); 179 229 device->parent = &parent->dev; 180 230 } 181 231 232 + /** 233 + * alloc_pcie_device - allocate PCI Express port service device structure 234 + * @parent: PCI Express port to associate the service device with 235 + * @port_type: Type of the port 236 + * @service_type: Type of service to associate with the service device 237 + * @irq: Interrupt vector to associate with the service device 238 + * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI) 239 + */ 182 240 static struct pcie_device* alloc_pcie_device(struct pci_dev *parent, 183 241 int port_type, int service_type, int irq, int irq_mode) 184 242 { ··· 197 239 return NULL; 198 240 199 241 pcie_device_init(parent, device, port_type, service_type, irq,irq_mode); 200 - dev_printk(KERN_DEBUG, &device->device, "allocate port service\n"); 201 242 return device; 202 243 } 203 244 245 + /** 246 + * pcie_port_device_probe - check if device is a PCI Express port 247 + * @dev: Device to check 248 + */ 204 249 int pcie_port_device_probe(struct pci_dev *dev) 205 250 { 206 251 int pos, type; ··· 221 260 return -ENODEV; 222 261 } 223 262 263 + /** 264 + * pcie_port_device_register - register PCI Express port 265 + * @dev: PCI Express port to register 266 + * 267 + * Allocate the port extension structure and register services associated with 268 + * the port. 269 + */ 224 270 int pcie_port_device_register(struct pci_dev *dev) 225 271 { 226 272 struct pcie_port_device_ext *p_ext; ··· 291 323 return 0; 292 324 } 293 325 326 + /** 327 + * pcie_port_device_suspend - suspend port services associated with a PCIe port 328 + * @dev: PCI Express port to handle 329 + * @state: Representation of system power management transition in progress 330 + */ 294 331 int pcie_port_device_suspend(struct pci_dev *dev, pm_message_t state) 295 332 { 296 333 return device_for_each_child(&dev->dev, &state, suspend_iter); ··· 314 341 return 0; 315 342 } 316 343 344 + /** 345 + * pcie_port_device_suspend - resume port services associated with a PCIe port 346 + * @dev: PCI Express port to handle 347 + */ 317 348 int pcie_port_device_resume(struct pci_dev *dev) 318 349 { 319 350 return device_for_each_child(&dev->dev, NULL, resume_iter); ··· 340 363 return 0; 341 364 } 342 365 366 + /** 367 + * pcie_port_device_remove - unregister PCI Express port service devices 368 + * @dev: PCI Express port the service devices to unregister are associated with 369 + * 370 + * Remove PCI Express port service devices associated with given port and 371 + * disable MSI-X or MSI for the port. 372 + */ 343 373 void pcie_port_device_remove(struct pci_dev *dev) 344 374 { 345 375 struct device *device; ··· 370 386 pci_disable_msi(dev); 371 387 } 372 388 373 - int pcie_port_bus_register(void) 389 + /** 390 + * pcie_port_probe_service - probe driver for given PCI Express port service 391 + * @dev: PCI Express port service device to probe against 392 + * 393 + * If PCI Express port service driver is registered with 394 + * pcie_port_service_register(), this function will be called by the driver core 395 + * whenever match is found between the driver and a port service device. 396 + */ 397 + static int pcie_port_probe_service(struct device *dev) 374 398 { 375 - return bus_register(&pcie_port_bus_type); 399 + struct pcie_device *pciedev; 400 + struct pcie_port_service_driver *driver; 401 + int status; 402 + 403 + if (!dev || !dev->driver) 404 + return -ENODEV; 405 + 406 + driver = to_service_driver(dev->driver); 407 + if (!driver || !driver->probe) 408 + return -ENODEV; 409 + 410 + pciedev = to_pcie_device(dev); 411 + status = driver->probe(pciedev, driver->id_table); 412 + if (!status) { 413 + dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", 414 + driver->name); 415 + get_device(dev); 416 + } 417 + return status; 376 418 } 377 419 378 - void pcie_port_bus_unregister(void) 420 + /** 421 + * pcie_port_remove_service - detach driver from given PCI Express port service 422 + * @dev: PCI Express port service device to handle 423 + * 424 + * If PCI Express port service driver is registered with 425 + * pcie_port_service_register(), this function will be called by the driver core 426 + * when device_unregister() is called for the port service device associated 427 + * with the driver. 428 + */ 429 + static int pcie_port_remove_service(struct device *dev) 379 430 { 380 - bus_unregister(&pcie_port_bus_type); 431 + struct pcie_device *pciedev; 432 + struct pcie_port_service_driver *driver; 433 + 434 + if (!dev || !dev->driver) 435 + return 0; 436 + 437 + pciedev = to_pcie_device(dev); 438 + driver = to_service_driver(dev->driver); 439 + if (driver && driver->remove) { 440 + dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n", 441 + driver->name); 442 + driver->remove(pciedev); 443 + put_device(dev); 444 + } 445 + return 0; 381 446 } 382 447 448 + /** 449 + * pcie_port_shutdown_service - shut down given PCI Express port service 450 + * @dev: PCI Express port service device to handle 451 + * 452 + * If PCI Express port service driver is registered with 453 + * pcie_port_service_register(), this function will be called by the driver core 454 + * when device_shutdown() is called for the port service device associated 455 + * with the driver. 456 + */ 457 + static void pcie_port_shutdown_service(struct device *dev) {} 458 + 459 + /** 460 + * pcie_port_service_register - register PCI Express port service driver 461 + * @new: PCI Express port service driver to register 462 + */ 383 463 int pcie_port_service_register(struct pcie_port_service_driver *new) 384 464 { 385 465 new->driver.name = (char *)new->name; ··· 451 403 new->driver.probe = pcie_port_probe_service; 452 404 new->driver.remove = pcie_port_remove_service; 453 405 new->driver.shutdown = pcie_port_shutdown_service; 454 - new->driver.suspend = pcie_port_suspend_service; 455 - new->driver.resume = pcie_port_resume_service; 456 406 457 407 return driver_register(&new->driver); 458 408 } 459 409 460 - void pcie_port_service_unregister(struct pcie_port_service_driver *new) 410 + /** 411 + * pcie_port_service_unregister - unregister PCI Express port service driver 412 + * @drv: PCI Express port service driver to unregister 413 + */ 414 + void pcie_port_service_unregister(struct pcie_port_service_driver *drv) 461 415 { 462 - driver_unregister(&new->driver); 416 + driver_unregister(&drv->driver); 463 417 } 464 418 465 419 EXPORT_SYMBOL(pcie_port_service_register);
+16 -5
drivers/pci/pcie/portdrv_pci.c
··· 41 41 { 42 42 int retval; 43 43 44 - pci_restore_state(dev); 45 44 retval = pci_enable_device(dev); 46 45 if (retval) 47 46 return retval; ··· 51 52 #ifdef CONFIG_PM 52 53 static int pcie_portdrv_suspend(struct pci_dev *dev, pm_message_t state) 53 54 { 54 - int ret = pcie_port_device_suspend(dev, state); 55 + return pcie_port_device_suspend(dev, state); 55 56 56 - if (!ret) 57 - ret = pcie_portdrv_save_config(dev); 58 - return ret; 57 + } 58 + 59 + static int pcie_portdrv_suspend_late(struct pci_dev *dev, pm_message_t state) 60 + { 61 + return pci_save_state(dev); 62 + } 63 + 64 + static int pcie_portdrv_resume_early(struct pci_dev *dev) 65 + { 66 + return pci_restore_state(dev); 59 67 } 60 68 61 69 static int pcie_portdrv_resume(struct pci_dev *dev) ··· 72 66 } 73 67 #else 74 68 #define pcie_portdrv_suspend NULL 69 + #define pcie_portdrv_suspend_late NULL 70 + #define pcie_portdrv_resume_early NULL 75 71 #define pcie_portdrv_resume NULL 76 72 #endif 77 73 ··· 229 221 230 222 /* If fatal, restore cfg space for possible link reset at upstream */ 231 223 if (dev->error_state == pci_channel_io_frozen) { 224 + pci_restore_state(dev); 232 225 pcie_portdrv_restore_config(dev); 233 226 pci_enable_pcie_error_reporting(dev); 234 227 } ··· 292 283 .remove = pcie_portdrv_remove, 293 284 294 285 .suspend = pcie_portdrv_suspend, 286 + .suspend_late = pcie_portdrv_suspend_late, 287 + .resume_early = pcie_portdrv_resume_early, 295 288 .resume = pcie_portdrv_resume, 296 289 297 290 .err_handler = &pcie_portdrv_err_handler,
+23 -17
drivers/pci/probe.c
··· 135 135 return size; 136 136 } 137 137 138 - enum pci_bar_type { 139 - pci_bar_unknown, /* Standard PCI BAR probe */ 140 - pci_bar_io, /* An io port BAR */ 141 - pci_bar_mem32, /* A 32-bit memory BAR */ 142 - pci_bar_mem64, /* A 64-bit memory BAR */ 143 - }; 144 - 145 138 static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar) 146 139 { 147 140 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { ··· 149 156 return pci_bar_mem32; 150 157 } 151 158 152 - /* 153 - * If the type is not unknown, we assume that the lowest bit is 'enable'. 154 - * Returns 1 if the BAR was 64-bit and 0 if it was 32-bit. 159 + /** 160 + * pci_read_base - read a PCI BAR 161 + * @dev: the PCI device 162 + * @type: type of the BAR 163 + * @res: resource buffer to be filled in 164 + * @pos: BAR position in the config space 165 + * 166 + * Returns 1 if the BAR is 64-bit, or 0 if 32-bit. 155 167 */ 156 - static int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, 168 + int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, 157 169 struct resource *res, unsigned int pos) 158 170 { 159 171 u32 l, sz, mask; ··· 398 400 if (!child) 399 401 return NULL; 400 402 401 - child->self = bridge; 402 403 child->parent = parent; 403 404 child->ops = parent->ops; 404 405 child->sysdata = parent->sysdata; 405 406 child->bus_flags = parent->bus_flags; 406 - child->bridge = get_device(&bridge->dev); 407 407 408 408 /* initialize some portions of the bus device, but don't register it 409 409 * now as the parent is not properly set up yet. This device will get 410 410 * registered later in pci_bus_add_devices() 411 411 */ 412 412 child->dev.class = &pcibus_class; 413 - sprintf(child->dev.bus_id, "%04x:%02x", pci_domain_nr(child), busnr); 413 + dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr); 414 414 415 415 /* 416 416 * Set up the primary, secondary and subordinate ··· 418 422 child->primary = parent->secondary; 419 423 child->subordinate = 0xff; 420 424 425 + if (!bridge) 426 + return child; 427 + 428 + child->self = bridge; 429 + child->bridge = get_device(&bridge->dev); 430 + 421 431 /* Set up default resource pointers and names.. */ 422 - for (i = 0; i < 4; i++) { 432 + for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { 423 433 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; 424 434 child->resource[i]->name = child->name; 425 435 } ··· 960 958 /* MSI/MSI-X list */ 961 959 pci_msi_init_pci_dev(dev); 962 960 961 + /* Buffers for saving PCIe and PCI-X capabilities */ 962 + pci_allocate_cap_save_buffers(dev); 963 + 963 964 /* Power Management */ 964 965 pci_pm_init(dev); 966 + platform_pci_wakeup_init(dev); 965 967 966 968 /* Vital Product Data */ 967 969 pci_vpd_pci22_init(dev); ··· 1136 1130 memset(dev, 0, sizeof(*dev)); 1137 1131 dev->parent = parent; 1138 1132 dev->release = pci_release_bus_bridge_dev; 1139 - sprintf(dev->bus_id, "pci%04x:%02x", pci_domain_nr(b), bus); 1133 + dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus); 1140 1134 error = device_register(dev); 1141 1135 if (error) 1142 1136 goto dev_reg_err; ··· 1147 1141 1148 1142 b->dev.class = &pcibus_class; 1149 1143 b->dev.parent = b->bridge; 1150 - sprintf(b->dev.bus_id, "%04x:%02x", pci_domain_nr(b), bus); 1144 + dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus); 1151 1145 error = device_register(&b->dev); 1152 1146 if (error) 1153 1147 goto class_dev_reg_err;
+14 -4
drivers/pci/proc.c
··· 252 252 const struct proc_dir_entry *dp = PDE(inode); 253 253 struct pci_dev *dev = dp->data; 254 254 struct pci_filp_private *fpriv = file->private_data; 255 - int ret; 255 + int i, ret; 256 256 257 257 if (!capable(CAP_SYS_RAWIO)) 258 258 return -EPERM; 259 + 260 + /* Make sure the caller is mapping a real resource for this device */ 261 + for (i = 0; i < PCI_ROM_RESOURCE; i++) { 262 + if (pci_mmap_fits(dev, i, vma)) 263 + break; 264 + } 265 + 266 + if (i >= PCI_ROM_RESOURCE) 267 + return -ENODEV; 259 268 260 269 ret = pci_mmap_page_range(dev, vma, 261 270 fpriv->mmap_state, ··· 361 352 dev->vendor, 362 353 dev->device, 363 354 dev->irq); 364 - /* Here should be 7 and not PCI_NUM_RESOURCES as we need to preserve compatibility */ 365 - for (i=0; i<7; i++) { 355 + 356 + /* only print standard and ROM resources to preserve compatibility */ 357 + for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 366 358 resource_size_t start, end; 367 359 pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); 368 360 seq_printf(m, "\t%16llx", 369 361 (unsigned long long)(start | 370 362 (dev->resource[i].flags & PCI_REGION_FLAG_MASK))); 371 363 } 372 - for (i=0; i<7; i++) { 364 + for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 373 365 resource_size_t start, end; 374 366 pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); 375 367 seq_printf(m, "\t%16llx",
+94 -18
drivers/pci/quirks.c
··· 56 56 while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) { 57 57 pci_read_config_byte(d, 0x82, &dlc); 58 58 if (!(dlc & 1<<1)) { 59 - dev_err(&d->dev, "PIIX3: Enabling Passive Release\n"); 59 + dev_info(&d->dev, "PIIX3: Enabling Passive Release\n"); 60 60 dlc |= 1<<1; 61 61 pci_write_config_byte(d, 0x82, dlc); 62 62 } ··· 449 449 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi); 450 450 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi); 451 451 452 - static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev) 452 + static void __devinit ich6_lpc_acpi_gpio(struct pci_dev *dev) 453 453 { 454 454 u32 region; 455 455 ··· 459 459 pci_read_config_dword(dev, 0x48, &region); 460 460 quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); 461 461 } 462 - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc_acpi); 463 - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi); 464 - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich6_lpc_acpi); 465 - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich6_lpc_acpi); 466 - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich6_lpc_acpi); 467 - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich6_lpc_acpi); 468 - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich6_lpc_acpi); 469 - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich6_lpc_acpi); 470 - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich6_lpc_acpi); 471 - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich6_lpc_acpi); 472 - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich6_lpc_acpi); 473 - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich6_lpc_acpi); 474 - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich6_lpc_acpi); 475 - DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich6_lpc_acpi); 462 + 463 + static void __devinit ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize) 464 + { 465 + u32 val; 466 + u32 size, base; 467 + 468 + pci_read_config_dword(dev, reg, &val); 469 + 470 + /* Enabled? */ 471 + if (!(val & 1)) 472 + return; 473 + base = val & 0xfffc; 474 + if (dynsize) { 475 + /* 476 + * This is not correct. It is 16, 32 or 64 bytes depending on 477 + * register D31:F0:ADh bits 5:4. 478 + * 479 + * But this gets us at least _part_ of it. 480 + */ 481 + size = 16; 482 + } else { 483 + size = 128; 484 + } 485 + base &= ~(size-1); 486 + 487 + /* Just print it out for now. We should reserve it after more debugging */ 488 + dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base+size-1); 489 + } 490 + 491 + static void __devinit quirk_ich6_lpc(struct pci_dev *dev) 492 + { 493 + /* Shared ACPI/GPIO decode with all ICH6+ */ 494 + ich6_lpc_acpi_gpio(dev); 495 + 496 + /* ICH6-specific generic IO decode */ 497 + ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0); 498 + ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1); 499 + } 500 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc); 501 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc); 502 + 503 + static void __devinit ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name) 504 + { 505 + u32 val; 506 + u32 mask, base; 507 + 508 + pci_read_config_dword(dev, reg, &val); 509 + 510 + /* Enabled? */ 511 + if (!(val & 1)) 512 + return; 513 + 514 + /* 515 + * IO base in bits 15:2, mask in bits 23:18, both 516 + * are dword-based 517 + */ 518 + base = val & 0xfffc; 519 + mask = (val >> 16) & 0xfc; 520 + mask |= 3; 521 + 522 + /* Just print it out for now. We should reserve it after more debugging */ 523 + dev_info(&dev->dev, "%s PIO at %04x (mask %04x)\n", name, base, mask); 524 + } 525 + 526 + /* ICH7-10 has the same common LPC generic IO decode registers */ 527 + static void __devinit quirk_ich7_lpc(struct pci_dev *dev) 528 + { 529 + /* We share the common ACPI/DPIO decode with ICH6 */ 530 + ich6_lpc_acpi_gpio(dev); 531 + 532 + /* And have 4 ICH7+ generic decodes */ 533 + ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1"); 534 + ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2"); 535 + ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3"); 536 + ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4"); 537 + } 538 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc); 539 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc); 540 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc); 541 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc); 542 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc); 543 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc); 544 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc); 545 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc); 546 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc); 547 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc); 548 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc); 549 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc); 550 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc); 476 551 477 552 /* 478 553 * VIA ACPI: One IO region pointed to by longword at ··· 2149 2074 2150 2075 #endif /* CONFIG_PCI_MSI */ 2151 2076 2152 - static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) 2077 + static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, 2078 + struct pci_fixup *end) 2153 2079 { 2154 2080 while (f < end) { 2155 2081 if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) && 2156 - (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { 2082 + (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { 2157 2083 dev_dbg(&dev->dev, "calling %pF\n", f->hook); 2158 2084 f->hook(dev); 2159 2085 }
+2 -3
drivers/pci/setup-bus.c
··· 536 536 if (!res) 537 537 continue; 538 538 539 - printk(KERN_INFO "bus: %02x index %x %s: %pR\n", 540 - bus->number, i, 541 - (res->flags & IORESOURCE_IO) ? "io port" : "mmio", res); 539 + dev_printk(KERN_DEBUG, &bus->dev, "resource %d %s %pR\n", i, 540 + (res->flags & IORESOURCE_IO) ? "io: " : "mem:", res); 542 541 } 543 542 } 544 543
+11 -13
drivers/pci/setup-res.c
··· 26 26 #include "pci.h" 27 27 28 28 29 - void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) 29 + void pci_update_resource(struct pci_dev *dev, int resno) 30 30 { 31 31 struct pci_bus_region region; 32 32 u32 new, check, mask; 33 33 int reg; 34 + enum pci_bar_type type; 35 + struct resource *res = dev->resource + resno; 34 36 35 37 /* 36 38 * Ignore resources for unimplemented BARs and unused resource slots ··· 63 61 else 64 62 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; 65 63 66 - if (resno < 6) { 67 - reg = PCI_BASE_ADDRESS_0 + 4 * resno; 68 - } else if (resno == PCI_ROM_RESOURCE) { 64 + reg = pci_resource_bar(dev, resno, &type); 65 + if (!reg) 66 + return; 67 + if (type != pci_bar_unknown) { 69 68 if (!(res->flags & IORESOURCE_ROM_ENABLE)) 70 69 return; 71 70 new |= PCI_ROM_ADDRESS_ENABLE; 72 - reg = dev->rom_base_reg; 73 - } else { 74 - /* Hmm, non-standard resource. */ 75 - 76 - return; /* kill uninitialised var warning */ 77 71 } 78 72 79 73 pci_write_config_dword(dev, reg, new); ··· 132 134 133 135 align = resource_alignment(res); 134 136 if (!align) { 135 - dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus " 137 + dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus " 136 138 "alignment) %pR flags %#lx\n", 137 139 resno, res, res->flags); 138 140 return -EINVAL; ··· 155 157 } 156 158 157 159 if (ret) { 158 - dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", 160 + dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", 159 161 resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); 160 162 } else { 161 163 res->flags &= ~IORESOURCE_STARTALIGN; 162 164 if (resno < PCI_BRIDGE_RESOURCES) 163 - pci_update_resource(dev, res, resno); 165 + pci_update_resource(dev, resno); 164 166 } 165 167 166 168 return ret; ··· 195 197 dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", 196 198 resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); 197 199 } else if (resno < PCI_BRIDGE_RESOURCES) { 198 - pci_update_resource(dev, res, resno); 200 + pci_update_resource(dev, resno); 199 201 } 200 202 201 203 return ret;
+8 -3
include/linux/ioport.h
··· 49 49 #define IORESOURCE_SIZEALIGN 0x00020000 /* size indicates alignment */ 50 50 #define IORESOURCE_STARTALIGN 0x00040000 /* start field is alignment */ 51 51 52 + #define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */ 52 53 #define IORESOURCE_DISABLED 0x10000000 53 54 #define IORESOURCE_UNSET 0x20000000 54 55 #define IORESOURCE_AUTO 0x40000000 ··· 134 133 } 135 134 136 135 /* Convenience shorthand with allocation */ 137 - #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) 138 - #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name)) 136 + #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0) 137 + #define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl) 138 + #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0) 139 + #define request_mem_region_exclusive(start,n,name) \ 140 + __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE) 139 141 #define rename_region(region, newname) do { (region)->name = (newname); } while (0) 140 142 141 143 extern struct resource * __request_region(struct resource *, 142 144 resource_size_t start, 143 - resource_size_t n, const char *name); 145 + resource_size_t n, const char *name, int relaxed); 144 146 145 147 /* Compatibility cruft */ 146 148 #define release_region(start,n) __release_region(&ioport_resource, (start), (n)) ··· 179 175 extern void __devm_release_region(struct device *dev, struct resource *parent, 180 176 resource_size_t start, resource_size_t n); 181 177 extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); 178 + extern int iomem_is_exclusive(u64 addr); 182 179 183 180 #endif /* __ASSEMBLY__ */ 184 181 #endif /* _LINUX_IOPORT_H */
+12 -11
include/linux/pci-acpi.h
··· 8 8 #ifndef _PCI_ACPI_H_ 9 9 #define _PCI_ACPI_H_ 10 10 11 + #include <linux/acpi.h> 12 + 11 13 #define OSC_QUERY_TYPE 0 12 14 #define OSC_SUPPORT_TYPE 1 13 15 #define OSC_CONTROL_TYPE 2 ··· 50 48 51 49 #ifdef CONFIG_ACPI 52 50 extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags); 53 - extern acpi_status __pci_osc_support_set(u32 flags, const char *hid); 54 - static inline acpi_status pci_osc_support_set(u32 flags) 55 - { 56 - return __pci_osc_support_set(flags, PCI_ROOT_HID_STRING); 57 - } 58 - static inline acpi_status pcie_osc_support_set(u32 flags) 59 - { 60 - return __pci_osc_support_set(flags, PCI_EXPRESS_ROOT_HID_STRING); 61 - } 51 + int pci_acpi_osc_support(acpi_handle handle, u32 flags); 62 52 static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) 63 53 { 64 54 /* Find root host bridge */ ··· 60 66 return acpi_get_pci_rootbridge_handle(pci_domain_nr(pdev->bus), 61 67 pdev->bus->number); 62 68 } 69 + 70 + static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) 71 + { 72 + int seg = pci_domain_nr(pbus), busnr = pbus->number; 73 + struct pci_dev *bridge = pbus->self; 74 + if (bridge) 75 + return DEVICE_ACPI_HANDLE(&(bridge->dev)); 76 + return acpi_get_pci_rootbridge_handle(seg, busnr); 77 + } 63 78 #else 64 79 #if !defined(AE_ERROR) 65 80 typedef u32 acpi_status; ··· 76 73 #endif 77 74 static inline acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) 78 75 {return AE_ERROR;} 79 - static inline acpi_status pci_osc_support_set(u32 flags) {return AE_ERROR;} 80 - static inline acpi_status pcie_osc_support_set(u32 flags) {return AE_ERROR;} 81 76 static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) 82 77 { return NULL; } 83 78 #endif
+62 -28
include/linux/pci.h
··· 82 82 #define PCI_DMA_FROMDEVICE 2 83 83 #define PCI_DMA_NONE 3 84 84 85 - #define DEVICE_COUNT_RESOURCE 12 85 + /* 86 + * For PCI devices, the region numbers are assigned this way: 87 + */ 88 + enum { 89 + /* #0-5: standard PCI resources */ 90 + PCI_STD_RESOURCES, 91 + PCI_STD_RESOURCE_END = 5, 92 + 93 + /* #6: expansion ROM resource */ 94 + PCI_ROM_RESOURCE, 95 + 96 + /* resources assigned to buses behind the bridge */ 97 + #define PCI_BRIDGE_RESOURCE_NUM 4 98 + 99 + PCI_BRIDGE_RESOURCES, 100 + PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES + 101 + PCI_BRIDGE_RESOURCE_NUM - 1, 102 + 103 + /* total resources associated with a PCI device */ 104 + PCI_NUM_RESOURCES, 105 + 106 + /* preserve this for compatibility */ 107 + DEVICE_COUNT_RESOURCE 108 + }; 86 109 87 110 typedef int __bitwise pci_power_t; 88 111 ··· 297 274 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space); 298 275 } 299 276 300 - /* 301 - * For PCI devices, the region numbers are assigned this way: 302 - * 303 - * 0-5 standard PCI regions 304 - * 6 expansion ROM 305 - * 7-10 bridges: address space assigned to buses behind the bridge 306 - */ 307 - 308 - #define PCI_ROM_RESOURCE 6 309 - #define PCI_BRIDGE_RESOURCES 7 310 - #define PCI_NUM_RESOURCES 11 311 - 312 277 #ifndef PCI_BUS_NUM_RESOURCES 313 278 #define PCI_BUS_NUM_RESOURCES 16 314 279 #endif ··· 335 324 336 325 #define pci_bus_b(n) list_entry(n, struct pci_bus, node) 337 326 #define to_pci_bus(n) container_of(n, struct pci_bus, dev) 327 + 328 + #ifdef CONFIG_PCI_MSI 329 + static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) 330 + { 331 + return pci_dev->msi_enabled || pci_dev->msix_enabled; 332 + } 333 + #else 334 + static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; } 335 + #endif 338 336 339 337 /* 340 338 * Error values that may be returned by PCI functions. ··· 552 532 void pci_read_bridge_bases(struct pci_bus *child); 553 533 struct resource *pci_find_parent_resource(const struct pci_dev *dev, 554 534 struct resource *res); 535 + u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin); 555 536 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); 537 + u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); 556 538 extern struct pci_dev *pci_dev_get(struct pci_dev *dev); 557 539 extern void pci_dev_put(struct pci_dev *dev); 558 540 extern void pci_remove_bus(struct pci_bus *b); ··· 651 629 652 630 void pci_disable_device(struct pci_dev *dev); 653 631 void pci_set_master(struct pci_dev *dev); 632 + void pci_clear_master(struct pci_dev *dev); 654 633 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); 655 634 #define HAVE_PCI_SET_MWI 656 635 int __must_check pci_set_mwi(struct pci_dev *dev); ··· 670 647 int pcie_set_readrq(struct pci_dev *dev, int rq); 671 648 int pci_reset_function(struct pci_dev *dev); 672 649 int pci_execute_reset_function(struct pci_dev *dev); 673 - void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno); 650 + void pci_update_resource(struct pci_dev *dev, int resno); 674 651 int __must_check pci_assign_resource(struct pci_dev *dev, int i); 675 652 int pci_select_bars(struct pci_dev *dev, unsigned long flags); 676 653 ··· 697 674 /* Functions for PCI Hotplug drivers to use */ 698 675 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); 699 676 677 + /* Vital product data routines */ 678 + ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 679 + ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 680 + int pci_vpd_truncate(struct pci_dev *dev, size_t size); 681 + 700 682 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ 701 683 void pci_bus_assign_resources(struct pci_bus *bus); 702 684 void pci_bus_size_bridges(struct pci_bus *bus); ··· 714 686 int (*)(struct pci_dev *, u8, u8)); 715 687 #define HAVE_PCI_REQ_REGIONS 2 716 688 int __must_check pci_request_regions(struct pci_dev *, const char *); 689 + int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *); 717 690 void pci_release_regions(struct pci_dev *); 718 691 int __must_check pci_request_region(struct pci_dev *, int, const char *); 692 + int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *); 719 693 void pci_release_region(struct pci_dev *, int); 720 694 int pci_request_selected_regions(struct pci_dev *, int, const char *); 695 + int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *); 721 696 void pci_release_selected_regions(struct pci_dev *, int); 722 697 723 698 /* drivers/pci/bus.c */ ··· 810 779 811 780 static inline void pci_restore_msi_state(struct pci_dev *dev) 812 781 { } 782 + static inline int pci_msi_enabled(void) 783 + { 784 + return 0; 785 + } 813 786 #else 814 787 extern int pci_enable_msi(struct pci_dev *dev); 815 788 extern void pci_msi_shutdown(struct pci_dev *dev); ··· 824 789 extern void pci_disable_msix(struct pci_dev *dev); 825 790 extern void msi_remove_pci_irq_vectors(struct pci_dev *dev); 826 791 extern void pci_restore_msi_state(struct pci_dev *dev); 792 + extern int pci_msi_enabled(void); 793 + #endif 794 + 795 + #ifndef CONFIG_PCIEASPM 796 + static inline int pcie_aspm_enabled(void) 797 + { 798 + return 0; 799 + } 800 + #else 801 + extern int pcie_aspm_enabled(void); 827 802 #endif 828 803 829 804 #ifdef CONFIG_HT_IRQ ··· 1185 1140 static inline void pci_mmcfg_late_init(void) { } 1186 1141 #endif 1187 1142 1188 - #ifdef CONFIG_HAS_IOMEM 1189 - static inline void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) 1190 - { 1191 - /* 1192 - * Make sure the BAR is actually a memory resource, not an IO resource 1193 - */ 1194 - if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 1195 - WARN_ON(1); 1196 - return NULL; 1197 - } 1198 - return ioremap_nocache(pci_resource_start(pdev, bar), 1199 - pci_resource_len(pdev, bar)); 1200 - } 1201 - #endif 1143 + int pci_ext_cfg_avail(struct pci_dev *dev); 1144 + 1145 + void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); 1202 1146 1203 1147 #endif /* __KERNEL__ */ 1204 1148 #endif /* LINUX_PCI_H */
+2
include/linux/pci_hotplug.h
··· 228 228 struct hotplug_params *hpp); 229 229 int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); 230 230 int acpi_root_bridge(acpi_handle handle); 231 + int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle); 232 + int acpi_pci_detect_ejectable(struct pci_bus *pbus); 231 233 #endif 232 234 #endif 233 235
+69 -7
include/linux/pci_regs.h
··· 210 210 #define PCI_CAP_ID_AGP3 0x0E /* AGP Target PCI-PCI bridge */ 211 211 #define PCI_CAP_ID_EXP 0x10 /* PCI Express */ 212 212 #define PCI_CAP_ID_MSIX 0x11 /* MSI-X */ 213 + #define PCI_CAP_ID_AF 0x13 /* PCI Advanced Features */ 213 214 #define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */ 214 215 #define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */ 215 216 #define PCI_CAP_SIZEOF 4 ··· 317 316 #define PCI_CHSWP_EXT 0x40 /* ENUM# status - extraction */ 318 317 #define PCI_CHSWP_INS 0x80 /* ENUM# status - insertion */ 319 318 319 + /* PCI Advanced Feature registers */ 320 + 321 + #define PCI_AF_LENGTH 2 322 + #define PCI_AF_CAP 3 323 + #define PCI_AF_CAP_TP 0x01 324 + #define PCI_AF_CAP_FLR 0x02 325 + #define PCI_AF_CTRL 4 326 + #define PCI_AF_CTRL_FLR 0x01 327 + #define PCI_AF_STATUS 5 328 + #define PCI_AF_STATUS_TP 0x01 329 + 320 330 /* PCI-X registers */ 321 331 322 332 #define PCI_X_CMD 2 /* Modes & Features */ ··· 411 399 #define PCI_EXP_DEVSTA_AUXPD 0x10 /* AUX Power Detected */ 412 400 #define PCI_EXP_DEVSTA_TRPND 0x20 /* Transactions Pending */ 413 401 #define PCI_EXP_LNKCAP 12 /* Link Capabilities */ 414 - #define PCI_EXP_LNKCAP_ASPMS 0xc00 /* ASPM Support */ 415 - #define PCI_EXP_LNKCAP_L0SEL 0x7000 /* L0s Exit Latency */ 416 - #define PCI_EXP_LNKCAP_L1EL 0x38000 /* L1 Exit Latency */ 417 - #define PCI_EXP_LNKCAP_CLKPM 0x40000 /* L1 Clock Power Management */ 402 + #define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */ 403 + #define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ 404 + #define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ 405 + #define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */ 406 + #define PCI_EXP_LNKCAP_L1EL 0x00038000 /* L1 Exit Latency */ 407 + #define PCI_EXP_LNKCAP_CLKPM 0x00040000 /* L1 Clock Power Management */ 408 + #define PCI_EXP_LNKCAP_SDERC 0x00080000 /* Suprise Down Error Reporting Capable */ 409 + #define PCI_EXP_LNKCAP_DLLLARC 0x00100000 /* Data Link Layer Link Active Reporting Capable */ 410 + #define PCI_EXP_LNKCAP_LBNC 0x00200000 /* Link Bandwidth Notification Capability */ 411 + #define PCI_EXP_LNKCAP_PN 0xff000000 /* Port Number */ 418 412 #define PCI_EXP_LNKCTL 16 /* Link Control */ 419 - #define PCI_EXP_LNKCTL_RL 0x20 /* Retrain Link */ 420 - #define PCI_EXP_LNKCTL_CCC 0x40 /* Common Clock COnfiguration */ 413 + #define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ 414 + #define PCI_EXP_LNKCTL_RCB 0x0008 /* Read Completion Boundary */ 415 + #define PCI_EXP_LNKCTL_LD 0x0010 /* Link Disable */ 416 + #define PCI_EXP_LNKCTL_RL 0x0020 /* Retrain Link */ 417 + #define PCI_EXP_LNKCTL_CCC 0x0040 /* Common Clock Configuration */ 418 + #define PCI_EXP_LNKCTL_ES 0x0080 /* Extended Synch */ 421 419 #define PCI_EXP_LNKCTL_CLKREQ_EN 0x100 /* Enable clkreq */ 420 + #define PCI_EXP_LNKCTL_HAWD 0x0200 /* Hardware Autonomous Width Disable */ 421 + #define PCI_EXP_LNKCTL_LBMIE 0x0400 /* Link Bandwidth Management Interrupt Enable */ 422 + #define PCI_EXP_LNKCTL_LABIE 0x0800 /* Lnk Autonomous Bandwidth Interrupt Enable */ 422 423 #define PCI_EXP_LNKSTA 18 /* Link Status */ 423 - #define PCI_EXP_LNKSTA_LT 0x800 /* Link Training */ 424 + #define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ 425 + #define PCI_EXP_LNKSTA_NLW 0x03f0 /* Nogotiated Link Width */ 426 + #define PCI_EXP_LNKSTA_LT 0x0800 /* Link Training */ 424 427 #define PCI_EXP_LNKSTA_SLC 0x1000 /* Slot Clock Configuration */ 428 + #define PCI_EXP_LNKSTA_DLLLA 0x2000 /* Data Link Layer Link Active */ 429 + #define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */ 430 + #define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */ 425 431 #define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ 432 + #define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */ 433 + #define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */ 434 + #define PCI_EXP_SLTCAP_MRLSP 0x00000004 /* MRL Sensor Present */ 435 + #define PCI_EXP_SLTCAP_AIP 0x00000008 /* Attention Indicator Present */ 436 + #define PCI_EXP_SLTCAP_PIP 0x00000010 /* Power Indicator Present */ 437 + #define PCI_EXP_SLTCAP_HPS 0x00000020 /* Hot-Plug Surprise */ 438 + #define PCI_EXP_SLTCAP_HPC 0x00000040 /* Hot-Plug Capable */ 439 + #define PCI_EXP_SLTCAP_SPLV 0x00007f80 /* Slot Power Limit Value */ 440 + #define PCI_EXP_SLTCAP_SPLS 0x00018000 /* Slot Power Limit Scale */ 441 + #define PCI_EXP_SLTCAP_EIP 0x00020000 /* Electromechanical Interlock Present */ 442 + #define PCI_EXP_SLTCAP_NCCS 0x00040000 /* No Command Completed Support */ 443 + #define PCI_EXP_SLTCAP_PSN 0xfff80000 /* Physical Slot Number */ 426 444 #define PCI_EXP_SLTCTL 24 /* Slot Control */ 445 + #define PCI_EXP_SLTCTL_ABPE 0x0001 /* Attention Button Pressed Enable */ 446 + #define PCI_EXP_SLTCTL_PFDE 0x0002 /* Power Fault Detected Enable */ 447 + #define PCI_EXP_SLTCTL_MRLSCE 0x0004 /* MRL Sensor Changed Enable */ 448 + #define PCI_EXP_SLTCTL_PDCE 0x0008 /* Presence Detect Changed Enable */ 449 + #define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */ 450 + #define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */ 451 + #define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */ 452 + #define PCI_EXP_SLTCTL_PIC 0x0300 /* Power Indicator Control */ 453 + #define PCI_EXP_SLTCTL_PCC 0x0400 /* Power Controller Control */ 454 + #define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */ 455 + #define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */ 427 456 #define PCI_EXP_SLTSTA 26 /* Slot Status */ 457 + #define PCI_EXP_SLTSTA_ABP 0x0001 /* Attention Button Pressed */ 458 + #define PCI_EXP_SLTSTA_PFD 0x0002 /* Power Fault Detected */ 459 + #define PCI_EXP_SLTSTA_MRLSC 0x0004 /* MRL Sensor Changed */ 460 + #define PCI_EXP_SLTSTA_PDC 0x0008 /* Presence Detect Changed */ 461 + #define PCI_EXP_SLTSTA_CC 0x0010 /* Command Completed */ 462 + #define PCI_EXP_SLTSTA_MRLSS 0x0020 /* MRL Sensor State */ 463 + #define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ 464 + #define PCI_EXP_SLTSTA_EIS 0x0080 /* Electromechanical Interlock Status */ 465 + #define PCI_EXP_SLTSTA_DLLSC 0x0100 /* Data Link Layer State Changed */ 428 466 #define PCI_EXP_RTCTL 28 /* Root Control */ 429 467 #define PCI_EXP_RTCTL_SECEE 0x01 /* System Error on Correctable Error */ 430 468 #define PCI_EXP_RTCTL_SENFEE 0x02 /* System Error on Non-Fatal Error */
+58 -3
kernel/resource.c
··· 623 623 */ 624 624 struct resource * __request_region(struct resource *parent, 625 625 resource_size_t start, resource_size_t n, 626 - const char *name) 626 + const char *name, int flags) 627 627 { 628 628 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); 629 629 ··· 634 634 res->start = start; 635 635 res->end = start + n - 1; 636 636 res->flags = IORESOURCE_BUSY; 637 + res->flags |= flags; 637 638 638 639 write_lock(&resource_lock); 639 640 ··· 680 679 { 681 680 struct resource * res; 682 681 683 - res = __request_region(parent, start, n, "check-region"); 682 + res = __request_region(parent, start, n, "check-region", 0); 684 683 if (!res) 685 684 return -EBUSY; 686 685 ··· 777 776 dr->start = start; 778 777 dr->n = n; 779 778 780 - res = __request_region(parent, start, n, name); 779 + res = __request_region(parent, start, n, name, 0); 781 780 if (res) 782 781 devres_add(dev, dr); 783 782 else ··· 877 876 878 877 return err; 879 878 } 879 + 880 + #ifdef CONFIG_STRICT_DEVMEM 881 + static int strict_iomem_checks = 1; 882 + #else 883 + static int strict_iomem_checks; 884 + #endif 885 + 886 + /* 887 + * check if an address is reserved in the iomem resource tree 888 + * returns 1 if reserved, 0 if not reserved. 889 + */ 890 + int iomem_is_exclusive(u64 addr) 891 + { 892 + struct resource *p = &iomem_resource; 893 + int err = 0; 894 + loff_t l; 895 + int size = PAGE_SIZE; 896 + 897 + if (!strict_iomem_checks) 898 + return 0; 899 + 900 + addr = addr & PAGE_MASK; 901 + 902 + read_lock(&resource_lock); 903 + for (p = p->child; p ; p = r_next(NULL, p, &l)) { 904 + /* 905 + * We can probably skip the resources without 906 + * IORESOURCE_IO attribute? 907 + */ 908 + if (p->start >= addr + size) 909 + break; 910 + if (p->end < addr) 911 + continue; 912 + if (p->flags & IORESOURCE_BUSY && 913 + p->flags & IORESOURCE_EXCLUSIVE) { 914 + err = 1; 915 + break; 916 + } 917 + } 918 + read_unlock(&resource_lock); 919 + 920 + return err; 921 + } 922 + 923 + static int __init strict_iomem(char *str) 924 + { 925 + if (strstr(str, "relaxed")) 926 + strict_iomem_checks = 0; 927 + if (strstr(str, "strict")) 928 + strict_iomem_checks = 1; 929 + return 1; 930 + } 931 + 932 + __setup("iomem=", strict_iomem);