Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6

* 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (48 commits)
x86/PCI: Prevent mmconfig memory corruption
ACPI: Use GPE reference counting to support shared GPEs
x86/PCI: use host bridge _CRS info by default on 2008 and newer machines
PCI: augment bus resource table with a list
PCI: add pci_bus_for_each_resource(), remove direct bus->resource[] refs
PCI: read bridge windows before filling in subtractive decode resources
PCI: split up pci_read_bridge_bases()
PCIe PME: use pci_pcie_cap()
PCI PM: Run-time callbacks for PCI bus type
PCIe PME: use pci_is_pcie()
PCI / ACPI / PM: Platform support for PCI PME wake-up
ACPI / ACPICA: Multiple system notify handlers per device
ACPI / PM: Add more run-time wake-up fields
ACPI: Use GPE reference counting to support shared GPEs
PCI PM: Make it possible to force using INTx for PCIe PME signaling
PCI PM: PCIe PME root port service driver
PCI PM: Add function for checking PME status of devices
PCI: mark is_pcie obsolete
PCI: set PCI_PREF_RANGE_TYPE_64 in pci_bridge_check_ranges
PCI: pciehp: second try to get big range for pcie devices
...

+3039 -1392
+14 -2
Documentation/kernel-parameters.txt
··· 1948 1948 IRQ routing is enabled. 1949 1949 noacpi [X86] Do not use ACPI for IRQ routing 1950 1950 or for PCI scanning. 1951 - use_crs [X86] Use _CRS for PCI resource 1952 - allocation. 1951 + use_crs [X86] Use PCI host bridge window information 1952 + from ACPI. On BIOSes from 2008 or later, this 1953 + is enabled by default. If you need to use this, 1954 + please report a bug. 1955 + nocrs [X86] Ignore PCI host bridge windows from ACPI. 1956 + If you need to use this, please report a bug. 1953 1957 routeirq Do IRQ routing for all PCI devices. 1954 1958 This is normally done in pci_enable_device(), 1955 1959 so this option is a temporary workaround ··· 2001 1997 off Disable ASPM. 2002 1998 force Enable ASPM even on devices that claim not to support it. 2003 1999 WARNING: Forcing ASPM on may cause system lockups. 2000 + 2001 + pcie_pme= [PCIE,PM] Native PCIe PME signaling options: 2002 + off Do not use native PCIe PME signaling. 2003 + force Use native PCIe PME signaling even if the BIOS refuses 2004 + to allow the kernel to control the relevant PCIe config 2005 + registers. 2006 + nomsi Do not use MSI for native PCIe PME signaling (this makes 2007 + all PCIe root ports use INTx for everything). 2004 2008 2005 2009 pcmv= [HW,PCMCIA] BadgePAD 4 2006 2010
+3 -3
arch/alpha/kernel/pci.c
··· 126 126 #define MB (1024*KB) 127 127 #define GB (1024*MB) 128 128 129 - void 130 - pcibios_align_resource(void *data, struct resource *res, 129 + resource_size_t 130 + pcibios_align_resource(void *data, const struct resource *res, 131 131 resource_size_t size, resource_size_t align) 132 132 { 133 133 struct pci_dev *dev = data; ··· 184 184 } 185 185 } 186 186 187 - res->start = start; 187 + return start; 188 188 } 189 189 #undef KB 190 190 #undef MB
+5 -3
arch/arm/kernel/bios32.c
··· 616 616 * but we want to try to avoid allocating at 0x2900-0x2bff 617 617 * which might be mirrored at 0x0100-0x03ff.. 618 618 */ 619 - void pcibios_align_resource(void *data, struct resource *res, 620 - resource_size_t size, resource_size_t align) 619 + resource_size_t pcibios_align_resource(void *data, const struct resource *res, 620 + resource_size_t size, resource_size_t align) 621 621 { 622 622 resource_size_t start = res->start; 623 623 624 624 if (res->flags & IORESOURCE_IO && start & 0x300) 625 625 start = (start + 0x3ff) & ~0x3ff; 626 626 627 - res->start = (start + align - 1) & ~(align - 1); 627 + start = (start + align - 1) & ~(align - 1); 628 + 629 + return start; 628 630 } 629 631 630 632 /**
+7 -9
arch/cris/arch-v32/drivers/pci/bios.c
··· 41 41 return 0; 42 42 } 43 43 44 - void 45 - pcibios_align_resource(void *data, struct resource *res, 44 + resource_size_t 45 + pcibios_align_resource(void *data, const struct resource *res, 46 46 resource_size_t size, resource_size_t align) 47 47 { 48 - if (res->flags & IORESOURCE_IO) { 49 - resource_size_t start = res->start; 48 + resource_size_t start = res->start; 50 49 51 - if (start & 0x300) { 52 - start = (start + 0x3ff) & ~0x3ff; 53 - res->start = start; 54 - } 55 - } 50 + if ((res->flags & IORESOURCE_IO) && (start & 0x300)) 51 + start = (start + 0x3ff) & ~0x3ff; 52 + 53 + return start 56 54 } 57 55 58 56 int pcibios_enable_resources(struct pci_dev *dev, int mask)
+7 -9
arch/frv/mb93090-mb00/pci-frv.c
··· 32 32 * but we want to try to avoid allocating at 0x2900-0x2bff 33 33 * which might have be mirrored at 0x0100-0x03ff.. 34 34 */ 35 - void 36 - pcibios_align_resource(void *data, struct resource *res, 35 + resource_size_t 36 + pcibios_align_resource(void *data, const struct resource *res, 37 37 resource_size_t size, resource_size_t align) 38 38 { 39 - if (res->flags & IORESOURCE_IO) { 40 - resource_size_t start = res->start; 39 + resource_size_t start = res->start; 41 40 42 - if (start & 0x300) { 43 - start = (start + 0x3ff) & ~0x3ff; 44 - res->start = start; 45 - } 46 - } 41 + if ((res->flags & IORESOURCE_IO) && (start & 0x300)) 42 + start = (start + 0x3ff) & ~0x3ff; 43 + 44 + return start 47 45 } 48 46 49 47
+1
arch/ia64/include/asm/acpi.h
··· 98 98 #endif 99 99 #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ 100 100 static inline void disable_acpi(void) { } 101 + static inline void pci_acpi_crs_quirks(void) { } 101 102 102 103 const char *acpi_get_sysname (void); 103 104 int acpi_request_vector (u32 int_type);
+8 -14
arch/ia64/pci/pci.c
··· 320 320 static void __devinit 321 321 pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl) 322 322 { 323 - int i, j; 323 + int i; 324 324 325 - j = 0; 325 + pci_bus_remove_resources(bus); 326 326 for (i = 0; i < ctrl->windows; i++) { 327 327 struct resource *res = &ctrl->window[i].resource; 328 328 /* HP's firmware has a hack to work around a Windows bug. ··· 330 330 if ((res->flags & IORESOURCE_MEM) && 331 331 (res->end - res->start < 16)) 332 332 continue; 333 - if (j >= PCI_BUS_NUM_RESOURCES) { 334 - dev_warn(&bus->dev, 335 - "ignoring host bridge window %pR (no space)\n", 336 - res); 337 - continue; 338 - } 339 - bus->resource[j++] = res; 333 + pci_bus_add_resource(bus, res, 0); 340 334 } 341 335 } 342 336 ··· 446 452 static int __devinit is_valid_resource(struct pci_dev *dev, int idx) 447 453 { 448 454 unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM; 449 - struct resource *devr = &dev->resource[idx]; 455 + struct resource *devr = &dev->resource[idx], *busr; 450 456 451 457 if (!dev->bus) 452 458 return 0; 453 - for (i=0; i<PCI_BUS_NUM_RESOURCES; i++) { 454 - struct resource *busr = dev->bus->resource[i]; 455 459 460 + pci_bus_for_each_resource(dev->bus, busr, i) { 456 461 if (!busr || ((busr->flags ^ devr->flags) & type_mask)) 457 462 continue; 458 463 if ((devr->start) && (devr->start >= busr->start) && ··· 540 547 acpi_pci_irq_disable(dev); 541 548 } 542 549 543 - void 544 - pcibios_align_resource (void *data, struct resource *res, 550 + resource_size_t 551 + pcibios_align_resource (void *data, const struct resource *res, 545 552 resource_size_t size, resource_size_t align) 546 553 { 554 + return res->start; 547 555 } 548 556 549 557 /*
+3 -3
arch/mips/pci/pci.c
··· 49 49 * but we want to try to avoid allocating at 0x2900-0x2bff 50 50 * which might have be mirrored at 0x0100-0x03ff.. 51 51 */ 52 - void 53 - pcibios_align_resource(void *data, struct resource *res, 52 + resource_size_t 53 + pcibios_align_resource(void *data, const struct resource *res, 54 54 resource_size_t size, resource_size_t align) 55 55 { 56 56 struct pci_dev *dev = data; ··· 73 73 start = PCIBIOS_MIN_MEM + hose->mem_resource->start; 74 74 } 75 75 76 - res->start = start; 76 + return start; 77 77 } 78 78 79 79 static void __devinit pcibios_scanbus(struct pci_controller *hose)
+5 -5
arch/mips/pmc-sierra/yosemite/ht.c
··· 345 345 return pcibios_enable_resources(dev); 346 346 } 347 347 348 - void pcibios_align_resource(void *data, struct resource *res, 349 - resource_size_t size, resource_size_t align) 348 + resource_size_t pcibios_align_resource(void *data, const struct resource *res, 349 + resource_size_t size, resource_size_t align) 350 350 { 351 351 struct pci_dev *dev = data; 352 + resource_size_t start = res->start; 352 353 353 354 if (res->flags & IORESOURCE_IO) { 354 - resource_size_t start = res->start; 355 - 356 355 /* We need to avoid collisions with `mirrored' VGA ports 357 356 and other strange ISA hardware, so we always want the 358 357 addresses kilobyte aligned. */ ··· 362 363 } 363 364 364 365 start = (start + 1024 - 1) & ~(1024 - 1); 365 - res->start = start; 366 366 } 367 + 368 + return start; 367 369 } 368 370 369 371 struct pci_ops titan_pci_ops = {
+7 -9
arch/mn10300/unit-asb2305/pci-asb2305.c
··· 31 31 * but we want to try to avoid allocating at 0x2900-0x2bff 32 32 * which might have be mirrored at 0x0100-0x03ff.. 33 33 */ 34 - void pcibios_align_resource(void *data, struct resource *res, 35 - resource_size_t size, resource_size_t align) 34 + resource_size_t pcibios_align_resource(void *data, const struct resource *res, 35 + resource_size_t size, resource_size_t align) 36 36 { 37 + resource_size_t start = res->start; 38 + 37 39 #if 0 38 40 struct pci_dev *dev = data; 39 41 ··· 49 47 ); 50 48 #endif 51 49 52 - if (res->flags & IORESOURCE_IO) { 53 - unsigned long start = res->start; 50 + if ((res->flags & IORESOURCE_IO) && (start & 0x300)) 51 + start = (start + 0x3ff) & ~0x3ff; 54 52 55 - if (start & 0x300) { 56 - start = (start + 0x3ff) & ~0x3ff; 57 - res->start = start; 58 - } 59 - } 53 + return start; 60 54 } 61 55 62 56
+2 -4
arch/mn10300/unit-asb2305/pci.c
··· 331 331 static int __devinit is_valid_resource(struct pci_dev *dev, int idx) 332 332 { 333 333 unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM; 334 - struct resource *devr = &dev->resource[idx]; 334 + struct resource *devr = &dev->resource[idx], *busr; 335 335 336 336 if (dev->bus) { 337 - for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 338 - struct resource *busr = dev->bus->resource[i]; 339 - 337 + pci_bus_for_each_resource(dev->bus, busr, i) { 340 338 if (!busr || (busr->flags ^ devr->flags) & type_mask) 341 339 continue; 342 340
+5 -5
arch/parisc/kernel/pci.c
··· 257 257 * Since we are just checking candidates, don't use any fields other 258 258 * than res->start. 259 259 */ 260 - void pcibios_align_resource(void *data, struct resource *res, 260 + resource_size_t pcibios_align_resource(void *data, const struct resource *res, 261 261 resource_size_t size, resource_size_t alignment) 262 262 { 263 - resource_size_t mask, align; 263 + resource_size_t mask, align, start = res->start; 264 264 265 265 DBG_RES("pcibios_align_resource(%s, (%p) [%lx,%lx]/%x, 0x%lx, 0x%lx)\n", 266 266 pci_name(((struct pci_dev *) data)), ··· 272 272 273 273 /* Align to largest of MIN or input size */ 274 274 mask = max(alignment, align) - 1; 275 - res->start += mask; 276 - res->start &= ~mask; 275 + start += mask; 276 + start &= ~mask; 277 277 278 - /* The caller updates the end field, we don't. */ 278 + return start; 279 279 } 280 280 281 281
+10 -14
arch/powerpc/kernel/pci-common.c
··· 1047 1047 1048 1048 struct pci_dev *dev = bus->self; 1049 1049 1050 - for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { 1051 - if ((res = bus->resource[i]) == NULL) 1052 - continue; 1053 - if (!res->flags) 1050 + pci_bus_for_each_resource(bus, res, i) { 1051 + if (!res || !res->flags) 1054 1052 continue; 1055 1053 if (i >= 3 && bus->self->transparent) 1056 1054 continue; ··· 1179 1181 * but we want to try to avoid allocating at 0x2900-0x2bff 1180 1182 * which might have be mirrored at 0x0100-0x03ff.. 1181 1183 */ 1182 - void pcibios_align_resource(void *data, struct resource *res, 1184 + resource_size_t pcibios_align_resource(void *data, const struct resource *res, 1183 1185 resource_size_t size, resource_size_t align) 1184 1186 { 1185 1187 struct pci_dev *dev = data; 1188 + resource_size_t start = res->start; 1186 1189 1187 1190 if (res->flags & IORESOURCE_IO) { 1188 - resource_size_t start = res->start; 1189 - 1190 1191 if (skip_isa_ioresource_align(dev)) 1191 - return; 1192 - if (start & 0x300) { 1192 + return start; 1193 + if (start & 0x300) 1193 1194 start = (start + 0x3ff) & ~0x3ff; 1194 - res->start = start; 1195 - } 1196 1195 } 1196 + 1197 + return start; 1197 1198 } 1198 1199 EXPORT_SYMBOL(pcibios_align_resource); 1199 1200 ··· 1275 1278 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n", 1276 1279 pci_domain_nr(bus), bus->number); 1277 1280 1278 - for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { 1279 - if ((res = bus->resource[i]) == NULL || !res->flags 1280 - || res->start > res->end || res->parent) 1281 + pci_bus_for_each_resource(bus, res, i) { 1282 + if (!res || !res->flags || res->start > res->end || res->parent) 1281 1283 continue; 1282 1284 if (bus->parent == NULL) 1283 1285 pr = (res->flags & IORESOURCE_IO) ?
+6 -6
arch/powerpc/platforms/fsl_uli1575.c
··· 222 222 int i; 223 223 u8 *dummy; 224 224 struct pci_bus *bus = dev->bus; 225 + struct resource *res; 225 226 resource_size_t end = 0; 226 227 227 228 for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCES+3; i++) { ··· 231 230 end = pci_resource_end(dev, i); 232 231 } 233 232 234 - for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 235 - if ((bus->resource[i]) && 236 - (bus->resource[i]->flags & IORESOURCE_MEM)) { 237 - if (bus->resource[i]->end == end) 238 - dummy = ioremap(bus->resource[i]->start, 0x4); 233 + pci_bus_for_each_resource(bus, res, i) { 234 + if (res && res->flags & IORESOURCE_MEM) { 235 + if (res->end == end) 236 + dummy = ioremap(res->start, 0x4); 239 237 else 240 - dummy = ioremap(bus->resource[i]->end - 3, 0x4); 238 + dummy = ioremap(res->end - 3, 0x4); 241 239 if (dummy) { 242 240 in_8(dummy); 243 241 iounmap(dummy);
+3 -3
arch/sh/drivers/pci/pci.c
··· 148 148 * addresses to be allocated in the 0x000-0x0ff region 149 149 * modulo 0x400. 150 150 */ 151 - void pcibios_align_resource(void *data, struct resource *res, 152 - resource_size_t size, resource_size_t align) 151 + resource_size_t pcibios_align_resource(void *data, const struct resource *res, 152 + resource_size_t size, resource_size_t align) 153 153 { 154 154 struct pci_dev *dev = data; 155 155 struct pci_channel *chan = dev->sysdata; ··· 171 171 start = PCIBIOS_MIN_MEM + chan->mem_resource->start; 172 172 } 173 173 174 - res->start = start; 174 + return start; 175 175 } 176 176 177 177 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+3 -2
arch/sparc/kernel/pci.c
··· 722 722 { 723 723 } 724 724 725 - void pcibios_align_resource(void *data, struct resource *res, 726 - resource_size_t size, resource_size_t align) 725 + resource_size_t pcibios_align_resource(void *data, const struct resource *res, 726 + resource_size_t size, resource_size_t align) 727 727 { 728 + return res->start; 728 729 } 729 730 730 731 int pcibios_enable_device(struct pci_dev *dev, int mask)
+3 -2
arch/sparc/kernel/pcic.c
··· 768 768 return str; 769 769 } 770 770 771 - void pcibios_align_resource(void *data, struct resource *res, 772 - resource_size_t size, resource_size_t align) 771 + resource_size_t pcibios_align_resource(void *data, const struct resource *res, 772 + resource_size_t size, resource_size_t align) 773 773 { 774 + return res->start; 774 775 } 775 776 776 777 int pcibios_enable_device(struct pci_dev *pdev, int mask)
+1
arch/x86/include/asm/pci_x86.h
··· 29 29 #define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000 30 30 #define PCI_HAS_IO_ECS 0x40000 31 31 #define PCI_NOASSIGN_ROMS 0x80000 32 + #define PCI_ROOT_NO_CRS 0x100000 32 33 33 34 extern unsigned int pci_probe; 34 35 extern unsigned long pirq_table_addr;
+49 -33
arch/x86/pci/acpi.c
··· 15 15 int busnum; 16 16 }; 17 17 18 + static bool pci_use_crs = true; 19 + 20 + static int __init set_use_crs(const struct dmi_system_id *id) 21 + { 22 + pci_use_crs = true; 23 + return 0; 24 + } 25 + 26 + static const struct dmi_system_id pci_use_crs_table[] __initconst = { 27 + /* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */ 28 + { 29 + .callback = set_use_crs, 30 + .ident = "IBM System x3800", 31 + .matches = { 32 + DMI_MATCH(DMI_SYS_VENDOR, "IBM"), 33 + DMI_MATCH(DMI_PRODUCT_NAME, "x3800"), 34 + }, 35 + }, 36 + {} 37 + }; 38 + 39 + void __init pci_acpi_crs_quirks(void) 40 + { 41 + int year; 42 + 43 + if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) 44 + pci_use_crs = false; 45 + 46 + dmi_check_system(pci_use_crs_table); 47 + 48 + /* 49 + * If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that 50 + * takes precedence over anything we figured out above. 51 + */ 52 + if (pci_probe & PCI_ROOT_NO_CRS) 53 + pci_use_crs = false; 54 + else if (pci_probe & PCI_USE__CRS) 55 + pci_use_crs = true; 56 + 57 + printk(KERN_INFO "PCI: %s host bridge windows from ACPI; " 58 + "if necessary, use \"pci=%s\" and report a bug\n", 59 + pci_use_crs ? "Using" : "Ignoring", 60 + pci_use_crs ? "nocrs" : "use_crs"); 61 + } 62 + 18 63 static acpi_status 19 64 resource_to_addr(struct acpi_resource *resource, 20 65 struct acpi_resource_address64 *addr) ··· 88 43 if (ACPI_SUCCESS(status)) 89 44 info->res_num++; 90 45 return AE_OK; 91 - } 92 - 93 - static int 94 - bus_has_transparent_bridge(struct pci_bus *bus) 95 - { 96 - struct pci_dev *dev; 97 - 98 - list_for_each_entry(dev, &bus->devices, bus_list) { 99 - u16 class = dev->class >> 8; 100 - 101 - if (class == PCI_CLASS_BRIDGE_PCI && dev->transparent) 102 - return true; 103 - } 104 - return false; 105 46 } 106 47 107 48 static void ··· 123 92 acpi_status status; 124 93 unsigned long flags; 125 94 struct resource *root; 126 - int max_root_bus_resources = PCI_BUS_NUM_RESOURCES; 127 95 u64 start, end; 128 - 129 - if (bus_has_transparent_bridge(info->bus)) 130 - max_root_bus_resources -= 3; 131 96 132 97 status = resource_to_addr(acpi_res, &addr); 133 98 if (!ACPI_SUCCESS(status)) ··· 142 115 143 116 start = addr.minimum + addr.translation_offset; 144 117 end = start + addr.address_length - 1; 145 - if (info->res_num >= max_root_bus_resources) { 146 - if (pci_probe & PCI_USE__CRS) 147 - printk(KERN_WARNING "PCI: Failed to allocate " 148 - "0x%lx-0x%lx from %s for %s due to _CRS " 149 - "returning more than %d resource descriptors\n", 150 - (unsigned long) start, (unsigned long) end, 151 - root->name, info->name, max_root_bus_resources); 152 - return AE_OK; 153 - } 154 118 155 119 res = &info->res[info->res_num]; 156 120 res->name = info->name; ··· 151 133 res->child = NULL; 152 134 align_resource(info->bridge, res); 153 135 154 - if (!(pci_probe & PCI_USE__CRS)) { 136 + if (!pci_use_crs) { 155 137 dev_printk(KERN_DEBUG, &info->bridge->dev, 156 138 "host bridge window %pR (ignored)\n", res); 157 139 return AE_OK; ··· 161 143 dev_err(&info->bridge->dev, 162 144 "can't allocate host bridge window %pR\n", res); 163 145 } else { 164 - info->bus->resource[info->res_num] = res; 146 + pci_bus_add_resource(info->bus, res, 0); 165 147 info->res_num++; 166 148 if (addr.translation_offset) 167 149 dev_info(&info->bridge->dev, "host bridge window %pR " ··· 182 164 struct pci_root_info info; 183 165 size_t size; 184 166 185 - if (!(pci_probe & PCI_USE__CRS)) 186 - dev_info(&device->dev, 187 - "ignoring host bridge windows from ACPI; " 188 - "boot with \"pci=use_crs\" to use them\n"); 167 + if (pci_use_crs) 168 + pci_bus_remove_resources(bus); 189 169 190 170 info.bridge = device; 191 171 info.bus = bus;
+2 -1
arch/x86/pci/bus_numa.c
··· 36 36 printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n", 37 37 b->number); 38 38 39 + pci_bus_remove_resources(b); 39 40 info = &pci_root_info[i]; 40 41 for (j = 0; j < info->res_num; j++) { 41 42 struct resource *res; 42 43 struct resource *root; 43 44 44 45 res = &info->res[j]; 45 - b->resource[j] = res; 46 + pci_bus_add_resource(b, res, 0); 46 47 if (res->flags & IORESOURCE_IO) 47 48 root = &ioport_resource; 48 49 else
+1 -2
arch/x86/pci/bus_numa.h
··· 2 2 3 3 /* 4 4 * sub bus (transparent) will use entres from 3 to store extra from 5 - * root, so need to make sure we have enough slot there, Should we 6 - * increase PCI_BUS_NUM_RESOURCES? 5 + * root, so need to make sure we have enough slot there. 7 6 */ 8 7 #define RES_NUM 16 9 8 struct pci_root_info {
+3
arch/x86/pci/common.c
··· 520 520 } else if (!strcmp(str, "use_crs")) { 521 521 pci_probe |= PCI_USE__CRS; 522 522 return NULL; 523 + } else if (!strcmp(str, "nocrs")) { 524 + pci_probe |= PCI_ROOT_NO_CRS; 525 + return NULL; 523 526 } else if (!strcmp(str, "earlydump")) { 524 527 pci_early_dump_regs = 1; 525 528 return NULL;
+6 -8
arch/x86/pci/i386.c
··· 60 60 * but we want to try to avoid allocating at 0x2900-0x2bff 61 61 * which might have be mirrored at 0x0100-0x03ff.. 62 62 */ 63 - void 64 - pcibios_align_resource(void *data, struct resource *res, 63 + resource_size_t 64 + pcibios_align_resource(void *data, const struct resource *res, 65 65 resource_size_t size, resource_size_t align) 66 66 { 67 67 struct pci_dev *dev = data; 68 + resource_size_t start = res->start; 68 69 69 70 if (res->flags & IORESOURCE_IO) { 70 - resource_size_t start = res->start; 71 - 72 71 if (skip_isa_ioresource_align(dev)) 73 - return; 74 - if (start & 0x300) { 72 + return start; 73 + if (start & 0x300) 75 74 start = (start + 0x3ff) & ~0x3ff; 76 - res->start = start; 77 - } 78 75 } 76 + return start; 79 77 } 80 78 EXPORT_SYMBOL(pcibios_align_resource); 81 79
+2
arch/x86/pci/irq.c
··· 590 590 case PCI_DEVICE_ID_INTEL_ICH10_1: 591 591 case PCI_DEVICE_ID_INTEL_ICH10_2: 592 592 case PCI_DEVICE_ID_INTEL_ICH10_3: 593 + case PCI_DEVICE_ID_INTEL_CPT_LPC1: 594 + case PCI_DEVICE_ID_INTEL_CPT_LPC2: 593 595 r->name = "PIIX/ICH"; 594 596 r->get = pirq_piix_get; 595 597 r->set = pirq_piix_set;
+6 -11
arch/x86/pci/mmconfig-shared.c
··· 303 303 { 304 304 struct pci_mmcfg_region *cfg, *cfgx; 305 305 306 - /* last one*/ 307 - cfg = list_entry(pci_mmcfg_list.prev, typeof(*cfg), list); 308 - if (cfg) 309 - if (cfg->end_bus < cfg->start_bus) 310 - cfg->end_bus = 255; 311 - 312 - if (list_is_singular(&pci_mmcfg_list)) 313 - return; 314 - 315 - /* don't overlap please */ 306 + /* Fixup overlaps */ 316 307 list_for_each_entry(cfg, &pci_mmcfg_list, list) { 317 308 if (cfg->end_bus < cfg->start_bus) 318 309 cfg->end_bus = 255; 319 310 311 + /* Don't access the list head ! */ 312 + if (cfg->list.next == &pci_mmcfg_list) 313 + break; 314 + 320 315 cfgx = list_entry(cfg->list.next, typeof(*cfg), list); 321 - if (cfg != cfgx && cfg->end_bus >= cfgx->start_bus) 316 + if (cfg->end_bus >= cfgx->start_bus) 322 317 cfg->end_bus = cfgx->start_bus - 1; 323 318 } 324 319 }
+7 -8
arch/xtensa/kernel/pci.c
··· 69 69 * but we want to try to avoid allocating at 0x2900-0x2bff 70 70 * which might have be mirrored at 0x0100-0x03ff.. 71 71 */ 72 - void 73 - pcibios_align_resource(void *data, struct resource *res, resource_size_t size, 74 - resource_size_t align) 72 + resource_size_t 73 + pcibios_align_resource(void *data, const struct resource *res, 74 + resource_size_t size, resource_size_t align) 75 75 { 76 76 struct pci_dev *dev = data; 77 + resource_size_t start = res->start; 77 78 78 79 if (res->flags & IORESOURCE_IO) { 79 - resource_size_t start = res->start; 80 - 81 80 if (size > 0x100) { 82 81 printk(KERN_ERR "PCI: I/O Region %s/%d too large" 83 82 " (%ld bytes)\n", pci_name(dev), 84 83 dev->resource - res, size); 85 84 } 86 85 87 - if (start & 0x300) { 86 + if (start & 0x300) 88 87 start = (start + 0x3ff) & ~0x3ff; 89 - res->start = start; 90 - } 91 88 } 89 + 90 + return start; 92 91 } 93 92 94 93 int
+2 -8
drivers/acpi/acpica/acevents.h
··· 76 76 * evgpe - GPE handling and dispatch 77 77 */ 78 78 acpi_status 79 - acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, 80 - u8 type); 79 + acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info); 81 80 82 - acpi_status 83 - acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info, 84 - u8 write_to_hardware); 81 + acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); 85 82 86 83 acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); 87 84 ··· 117 120 u32 gpe_number); 118 121 119 122 u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list); 120 - 121 - acpi_status 122 - acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type); 123 123 124 124 acpi_status 125 125 acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info);
+2
drivers/acpi/acpica/aclocal.h
··· 426 426 struct acpi_gpe_register_info *register_info; /* Backpointer to register info */ 427 427 u8 flags; /* Misc info about this GPE */ 428 428 u8 gpe_number; /* This GPE */ 429 + u8 runtime_count; 430 + u8 wakeup_count; 429 431 }; 430 432 431 433 /* Information about a GPE register pair, one per each status/enable pair in an array */
+2
drivers/acpi/acpica/acobject.h
··· 287 287 288 288 struct acpi_object_notify_handler { 289 289 ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Parent device */ 290 + u32 handler_type; 290 291 acpi_notify_handler handler; 291 292 void *context; 293 + struct acpi_object_notify_handler *next; 292 294 }; 293 295 294 296 struct acpi_object_addr_handler {
+18 -143
drivers/acpi/acpica/evgpe.c
··· 54 54 55 55 /******************************************************************************* 56 56 * 57 - * FUNCTION: acpi_ev_set_gpe_type 58 - * 59 - * PARAMETERS: gpe_event_info - GPE to set 60 - * Type - New type 61 - * 62 - * RETURN: Status 63 - * 64 - * DESCRIPTION: Sets the new type for the GPE (wake, run, or wake/run) 65 - * 66 - ******************************************************************************/ 67 - 68 - acpi_status 69 - acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type) 70 - { 71 - acpi_status status; 72 - 73 - ACPI_FUNCTION_TRACE(ev_set_gpe_type); 74 - 75 - /* Validate type and update register enable masks */ 76 - 77 - switch (type) { 78 - case ACPI_GPE_TYPE_WAKE: 79 - case ACPI_GPE_TYPE_RUNTIME: 80 - case ACPI_GPE_TYPE_WAKE_RUN: 81 - break; 82 - 83 - default: 84 - return_ACPI_STATUS(AE_BAD_PARAMETER); 85 - } 86 - 87 - /* Disable the GPE if currently enabled */ 88 - 89 - status = acpi_ev_disable_gpe(gpe_event_info); 90 - 91 - /* Clear the type bits and insert the new Type */ 92 - 93 - gpe_event_info->flags &= ~ACPI_GPE_TYPE_MASK; 94 - gpe_event_info->flags |= type; 95 - return_ACPI_STATUS(status); 96 - } 97 - 98 - /******************************************************************************* 99 - * 100 57 * FUNCTION: acpi_ev_update_gpe_enable_masks 101 58 * 102 59 * PARAMETERS: gpe_event_info - GPE to update 103 - * Type - What to do: ACPI_GPE_DISABLE or 104 - * ACPI_GPE_ENABLE 105 60 * 106 61 * RETURN: Status 107 62 * ··· 65 110 ******************************************************************************/ 66 111 67 112 acpi_status 68 - acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, 69 - u8 type) 113 + acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info) 70 114 { 71 115 struct acpi_gpe_register_info *gpe_register_info; 72 116 u8 register_bit; ··· 81 127 (1 << 82 128 (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number)); 83 129 84 - /* 1) Disable case. Simply clear all enable bits */ 130 + ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit); 131 + ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); 85 132 86 - if (type == ACPI_GPE_DISABLE) { 87 - ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, 88 - register_bit); 89 - ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); 90 - return_ACPI_STATUS(AE_OK); 91 - } 92 - 93 - /* 2) Enable case. Set/Clear the appropriate enable bits */ 94 - 95 - switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { 96 - case ACPI_GPE_TYPE_WAKE: 97 - ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit); 98 - ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); 99 - break; 100 - 101 - case ACPI_GPE_TYPE_RUNTIME: 102 - ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, 103 - register_bit); 133 + if (gpe_event_info->runtime_count) 104 134 ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit); 105 - break; 106 135 107 - case ACPI_GPE_TYPE_WAKE_RUN: 136 + if (gpe_event_info->wakeup_count) 108 137 ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit); 109 - ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit); 110 - break; 111 - 112 - default: 113 - return_ACPI_STATUS(AE_BAD_PARAMETER); 114 - } 115 138 116 139 return_ACPI_STATUS(AE_OK); 117 140 } ··· 98 167 * FUNCTION: acpi_ev_enable_gpe 99 168 * 100 169 * PARAMETERS: gpe_event_info - GPE to enable 101 - * write_to_hardware - Enable now, or just mark data structs 102 - * (WAKE GPEs should be deferred) 103 170 * 104 171 * RETURN: Status 105 172 * ··· 105 176 * 106 177 ******************************************************************************/ 107 178 108 - acpi_status 109 - acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info, 110 - u8 write_to_hardware) 179 + acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) 111 180 { 112 181 acpi_status status; 113 182 ··· 113 186 114 187 /* Make sure HW enable masks are updated */ 115 188 116 - status = 117 - acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_ENABLE); 118 - if (ACPI_FAILURE(status)) { 189 + status = acpi_ev_update_gpe_enable_masks(gpe_event_info); 190 + if (ACPI_FAILURE(status)) 119 191 return_ACPI_STATUS(status); 120 - } 121 192 122 193 /* Mark wake-enabled or HW enable, or both */ 123 194 124 - switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { 125 - case ACPI_GPE_TYPE_WAKE: 195 + if (gpe_event_info->runtime_count) { 196 + /* Clear the GPE (of stale events), then enable it */ 197 + status = acpi_hw_clear_gpe(gpe_event_info); 198 + if (ACPI_FAILURE(status)) 199 + return_ACPI_STATUS(status); 126 200 127 - ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); 128 - break; 129 - 130 - case ACPI_GPE_TYPE_WAKE_RUN: 131 - 132 - ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); 133 - 134 - /*lint -fallthrough */ 135 - 136 - case ACPI_GPE_TYPE_RUNTIME: 137 - 138 - ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED); 139 - 140 - if (write_to_hardware) { 141 - 142 - /* Clear the GPE (of stale events), then enable it */ 143 - 144 - status = acpi_hw_clear_gpe(gpe_event_info); 145 - if (ACPI_FAILURE(status)) { 146 - return_ACPI_STATUS(status); 147 - } 148 - 149 - /* Enable the requested runtime GPE */ 150 - 151 - status = acpi_hw_write_gpe_enable_reg(gpe_event_info); 152 - } 153 - break; 154 - 155 - default: 156 - return_ACPI_STATUS(AE_BAD_PARAMETER); 201 + /* Enable the requested runtime GPE */ 202 + status = acpi_hw_write_gpe_enable_reg(gpe_event_info); 157 203 } 158 204 159 205 return_ACPI_STATUS(AE_OK); ··· 152 252 153 253 /* Make sure HW enable masks are updated */ 154 254 155 - status = 156 - acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_DISABLE); 157 - if (ACPI_FAILURE(status)) { 255 + status = acpi_ev_update_gpe_enable_masks(gpe_event_info); 256 + if (ACPI_FAILURE(status)) 158 257 return_ACPI_STATUS(status); 159 - } 160 - 161 - /* Clear the appropriate enabled flags for this GPE */ 162 - 163 - switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) { 164 - case ACPI_GPE_TYPE_WAKE: 165 - ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); 166 - break; 167 - 168 - case ACPI_GPE_TYPE_WAKE_RUN: 169 - ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED); 170 - 171 - /* fallthrough */ 172 - 173 - case ACPI_GPE_TYPE_RUNTIME: 174 - 175 - /* Disable the requested runtime GPE */ 176 - 177 - ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED); 178 - break; 179 - 180 - default: 181 - break; 182 - } 183 258 184 259 /* 185 260 * Even if we don't know the GPE type, make sure that we always ··· 396 521 397 522 /* Set the GPE flags for return to enabled state */ 398 523 399 - (void)acpi_ev_enable_gpe(gpe_event_info, FALSE); 524 + (void)acpi_ev_update_gpe_enable_masks(gpe_event_info); 400 525 401 526 /* 402 527 * Take a snapshot of the GPE info for this level - we copy the info to
+33 -54
drivers/acpi/acpica/evgpeblk.c
··· 258 258 u32 gpe_number; 259 259 char name[ACPI_NAME_SIZE + 1]; 260 260 u8 type; 261 - acpi_status status; 262 261 263 262 ACPI_FUNCTION_TRACE(ev_save_method_info); 264 263 ··· 324 325 325 326 /* 326 327 * Now we can add this information to the gpe_event_info block for use 327 - * during dispatch of this GPE. Default type is RUNTIME, although this may 328 - * change when the _PRW methods are executed later. 328 + * during dispatch of this GPE. 329 329 */ 330 330 gpe_event_info = 331 331 &gpe_block->event_info[gpe_number - gpe_block->block_base_number]; 332 332 333 - gpe_event_info->flags = (u8) 334 - (type | ACPI_GPE_DISPATCH_METHOD | ACPI_GPE_TYPE_RUNTIME); 333 + gpe_event_info->flags = (u8) (type | ACPI_GPE_DISPATCH_METHOD); 335 334 336 335 gpe_event_info->dispatch.method_node = 337 336 (struct acpi_namespace_node *)obj_handle; 338 337 339 - /* Update enable mask, but don't enable the HW GPE as of yet */ 340 - 341 - status = acpi_ev_enable_gpe(gpe_event_info, FALSE); 342 - 343 338 ACPI_DEBUG_PRINT((ACPI_DB_LOAD, 344 339 "Registered GPE method %s as GPE number 0x%.2X\n", 345 340 name, gpe_number)); 346 - return_ACPI_STATUS(status); 341 + return_ACPI_STATUS(AE_OK); 347 342 } 348 343 349 344 /******************************************************************************* ··· 447 454 gpe_block-> 448 455 block_base_number]; 449 456 450 - /* Mark GPE for WAKE-ONLY but WAKE_DISABLED */ 451 - 452 - gpe_event_info->flags &= 453 - ~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED); 454 - 455 - status = 456 - acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE); 457 - if (ACPI_FAILURE(status)) { 458 - goto cleanup; 459 - } 460 - 461 - status = 462 - acpi_ev_update_gpe_enable_masks(gpe_event_info, 463 - ACPI_GPE_DISABLE); 457 + gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; 464 458 } 465 459 466 460 cleanup: ··· 969 989 acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, 970 990 struct acpi_gpe_block_info *gpe_block) 971 991 { 972 - acpi_status status; 973 992 struct acpi_gpe_event_info *gpe_event_info; 974 993 struct acpi_gpe_walk_info gpe_info; 975 994 u32 wake_gpe_count; ··· 998 1019 gpe_info.gpe_block = gpe_block; 999 1020 gpe_info.gpe_device = gpe_device; 1000 1021 1001 - status = 1002 - acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 1022 + acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 1003 1023 ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, 1004 1024 acpi_ev_match_prw_and_gpe, NULL, 1005 1025 &gpe_info, NULL); 1006 1026 } 1007 1027 1008 1028 /* 1009 - * Enable all GPEs in this block that have these attributes: 1010 - * 1) are "runtime" or "run/wake" GPEs, and 1011 - * 2) have a corresponding _Lxx or _Exx method 1012 - * 1013 - * Any other GPEs within this block must be enabled via the 1014 - * acpi_enable_gpe() external interface. 1029 + * Enable all GPEs that have a corresponding method and aren't 1030 + * capable of generating wakeups. Any other GPEs within this block 1031 + * must be enabled via the acpi_enable_gpe() interface. 1015 1032 */ 1016 1033 wake_gpe_count = 0; 1017 1034 gpe_enabled_count = 0; 1035 + if (gpe_device == acpi_gbl_fadt_gpe_device) 1036 + gpe_device = NULL; 1018 1037 1019 1038 for (i = 0; i < gpe_block->register_count; i++) { 1020 - for (j = 0; j < 8; j++) { 1039 + for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { 1040 + acpi_status status; 1041 + acpi_size gpe_index; 1042 + int gpe_number; 1021 1043 1022 1044 /* Get the info block for this particular GPE */ 1045 + gpe_index = (acpi_size)i * ACPI_GPE_REGISTER_WIDTH + j; 1046 + gpe_event_info = &gpe_block->event_info[gpe_index]; 1023 1047 1024 - gpe_event_info = &gpe_block->event_info[((acpi_size) i * 1025 - ACPI_GPE_REGISTER_WIDTH) 1026 - + j]; 1027 - 1028 - if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 1029 - ACPI_GPE_DISPATCH_METHOD) && 1030 - (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) { 1031 - gpe_enabled_count++; 1032 - } 1033 - 1034 - if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) { 1048 + if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) { 1035 1049 wake_gpe_count++; 1050 + if (acpi_gbl_leave_wake_gpes_disabled) 1051 + continue; 1036 1052 } 1053 + 1054 + if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) 1055 + continue; 1056 + 1057 + gpe_number = gpe_index + gpe_block->block_base_number; 1058 + status = acpi_enable_gpe(gpe_device, gpe_number, 1059 + ACPI_GPE_TYPE_RUNTIME); 1060 + if (ACPI_FAILURE(status)) 1061 + ACPI_ERROR((AE_INFO, 1062 + "Failed to enable GPE %02X\n", 1063 + gpe_number)); 1064 + else 1065 + gpe_enabled_count++; 1037 1066 } 1038 1067 } 1039 1068 ··· 1049 1062 "Found %u Wake, Enabled %u Runtime GPEs in this block\n", 1050 1063 wake_gpe_count, gpe_enabled_count)); 1051 1064 1052 - /* Enable all valid runtime GPEs found above */ 1053 - 1054 - status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block, NULL); 1055 - if (ACPI_FAILURE(status)) { 1056 - ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p", 1057 - gpe_block)); 1058 - } 1059 - 1060 - return_ACPI_STATUS(status); 1065 + return_ACPI_STATUS(AE_OK); 1061 1066 } 1062 1067 1063 1068 /*******************************************************************************
+9 -3
drivers/acpi/acpica/evmisc.c
··· 259 259 260 260 handler_obj = notify_info->notify.handler_obj; 261 261 if (handler_obj) { 262 - handler_obj->notify.handler(notify_info->notify.node, 263 - notify_info->notify.value, 264 - handler_obj->notify.context); 262 + struct acpi_object_notify_handler *notifier; 263 + 264 + notifier = &handler_obj->notify; 265 + while (notifier) { 266 + notifier->handler(notify_info->notify.node, 267 + notify_info->notify.value, 268 + notifier->context); 269 + notifier = notifier->next; 270 + } 265 271 } 266 272 267 273 /* All done with the info object */
+139 -50
drivers/acpi/acpica/evxface.c
··· 218 218 219 219 /******************************************************************************* 220 220 * 221 + * FUNCTION: acpi_populate_handler_object 222 + * 223 + * PARAMETERS: handler_obj - Handler object to populate 224 + * handler_type - The type of handler: 225 + * ACPI_SYSTEM_NOTIFY: system_handler (00-7f) 226 + * ACPI_DEVICE_NOTIFY: driver_handler (80-ff) 227 + * ACPI_ALL_NOTIFY: both system and device 228 + * handler - Address of the handler 229 + * context - Value passed to the handler on each GPE 230 + * next - Address of a handler object to link to 231 + * 232 + * RETURN: None 233 + * 234 + * DESCRIPTION: Populate a handler object. 235 + * 236 + ******************************************************************************/ 237 + static void 238 + acpi_populate_handler_object(struct acpi_object_notify_handler *handler_obj, 239 + u32 handler_type, 240 + acpi_notify_handler handler, void *context, 241 + struct acpi_object_notify_handler *next) 242 + { 243 + handler_obj->handler_type = handler_type; 244 + handler_obj->handler = handler; 245 + handler_obj->context = context; 246 + handler_obj->next = next; 247 + } 248 + 249 + /******************************************************************************* 250 + * 251 + * FUNCTION: acpi_add_handler_object 252 + * 253 + * PARAMETERS: parent_obj - Parent of the new object 254 + * handler - Address of the handler 255 + * context - Value passed to the handler on each GPE 256 + * 257 + * RETURN: Status 258 + * 259 + * DESCRIPTION: Create a new handler object and populate it. 260 + * 261 + ******************************************************************************/ 262 + static acpi_status 263 + acpi_add_handler_object(struct acpi_object_notify_handler *parent_obj, 264 + acpi_notify_handler handler, void *context) 265 + { 266 + struct acpi_object_notify_handler *handler_obj; 267 + 268 + /* The parent must not be a defice notify handler object. */ 269 + if (parent_obj->handler_type & ACPI_DEVICE_NOTIFY) 270 + return AE_BAD_PARAMETER; 271 + 272 + handler_obj = ACPI_ALLOCATE_ZEROED(sizeof(*handler_obj)); 273 + if (!handler_obj) 274 + return AE_NO_MEMORY; 275 + 276 + acpi_populate_handler_object(handler_obj, 277 + ACPI_SYSTEM_NOTIFY, 278 + handler, context, 279 + parent_obj->next); 280 + parent_obj->next = handler_obj; 281 + 282 + return AE_OK; 283 + } 284 + 285 + /******************************************************************************* 286 + * 221 287 * FUNCTION: acpi_install_notify_handler 222 288 * 223 289 * PARAMETERS: Device - The device for which notifies will be handled ··· 382 316 obj_desc = acpi_ns_get_attached_object(node); 383 317 if (obj_desc) { 384 318 385 - /* Object exists - make sure there's no handler */ 319 + /* Object exists. */ 386 320 387 - if (((handler_type & ACPI_SYSTEM_NOTIFY) && 388 - obj_desc->common_notify.system_notify) || 389 - ((handler_type & ACPI_DEVICE_NOTIFY) && 390 - obj_desc->common_notify.device_notify)) { 321 + /* For a device notify, make sure there's no handler. */ 322 + if ((handler_type & ACPI_DEVICE_NOTIFY) && 323 + obj_desc->common_notify.device_notify) { 391 324 status = AE_ALREADY_EXISTS; 325 + goto unlock_and_exit; 326 + } 327 + 328 + /* System notifies may have more handlers installed. */ 329 + notify_obj = obj_desc->common_notify.system_notify; 330 + 331 + if ((handler_type & ACPI_SYSTEM_NOTIFY) && notify_obj) { 332 + struct acpi_object_notify_handler *parent_obj; 333 + 334 + if (handler_type & ACPI_DEVICE_NOTIFY) { 335 + status = AE_ALREADY_EXISTS; 336 + goto unlock_and_exit; 337 + } 338 + 339 + parent_obj = &notify_obj->notify; 340 + status = acpi_add_handler_object(parent_obj, 341 + handler, 342 + context); 392 343 goto unlock_and_exit; 393 344 } 394 345 } else { ··· 439 356 goto unlock_and_exit; 440 357 } 441 358 442 - notify_obj->notify.node = node; 443 - notify_obj->notify.handler = handler; 444 - notify_obj->notify.context = context; 359 + acpi_populate_handler_object(&notify_obj->notify, 360 + handler_type, 361 + handler, context, 362 + NULL); 445 363 446 364 if (handler_type & ACPI_SYSTEM_NOTIFY) { 447 365 obj_desc->common_notify.system_notify = notify_obj; ··· 502 418 goto exit; 503 419 } 504 420 421 + 422 + /* Make sure all deferred tasks are completed */ 423 + acpi_os_wait_events_complete(NULL); 424 + 505 425 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 506 426 if (ACPI_FAILURE(status)) { 507 427 goto exit; ··· 531 443 !acpi_gbl_device_notify.handler)) { 532 444 status = AE_NOT_EXIST; 533 445 goto unlock_and_exit; 534 - } 535 - 536 - /* Make sure all deferred tasks are completed */ 537 - 538 - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 539 - acpi_os_wait_events_complete(NULL); 540 - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 541 - if (ACPI_FAILURE(status)) { 542 - goto exit; 543 446 } 544 447 545 448 if (handler_type & ACPI_SYSTEM_NOTIFY) { ··· 567 488 /* Object exists - make sure there's an existing handler */ 568 489 569 490 if (handler_type & ACPI_SYSTEM_NOTIFY) { 491 + struct acpi_object_notify_handler *handler_obj; 492 + struct acpi_object_notify_handler *parent_obj; 493 + 570 494 notify_obj = obj_desc->common_notify.system_notify; 571 495 if (!notify_obj) { 572 496 status = AE_NOT_EXIST; 573 497 goto unlock_and_exit; 574 498 } 575 499 576 - if (notify_obj->notify.handler != handler) { 500 + handler_obj = &notify_obj->notify; 501 + parent_obj = NULL; 502 + while (handler_obj->handler != handler) { 503 + if (handler_obj->next) { 504 + parent_obj = handler_obj; 505 + handler_obj = handler_obj->next; 506 + } else { 507 + break; 508 + } 509 + } 510 + 511 + if (handler_obj->handler != handler) { 577 512 status = AE_BAD_PARAMETER; 578 513 goto unlock_and_exit; 579 514 } 580 - /* Make sure all deferred tasks are completed */ 581 515 582 - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 583 - acpi_os_wait_events_complete(NULL); 584 - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 585 - if (ACPI_FAILURE(status)) { 586 - goto exit; 516 + /* 517 + * Remove the handler. There are three possible cases. 518 + * First, we may need to remove a non-embedded object. 519 + * Second, we may need to remove the embedded object's 520 + * handler data, while non-embedded objects exist. 521 + * Finally, we may need to remove the embedded object 522 + * entirely along with its container. 523 + */ 524 + if (parent_obj) { 525 + /* Non-embedded object is being removed. */ 526 + parent_obj->next = handler_obj->next; 527 + ACPI_FREE(handler_obj); 528 + } else if (notify_obj->notify.next) { 529 + /* 530 + * The handler matches the embedded object, but 531 + * there are more handler objects in the list. 532 + * Replace the embedded object's data with the 533 + * first next object's data and remove that 534 + * object. 535 + */ 536 + parent_obj = &notify_obj->notify; 537 + handler_obj = notify_obj->notify.next; 538 + *parent_obj = *handler_obj; 539 + ACPI_FREE(handler_obj); 540 + } else { 541 + /* No more handler objects in the list. */ 542 + obj_desc->common_notify.system_notify = NULL; 543 + acpi_ut_remove_reference(notify_obj); 587 544 } 588 - 589 - /* Remove the handler */ 590 - obj_desc->common_notify.system_notify = NULL; 591 - acpi_ut_remove_reference(notify_obj); 592 545 } 593 546 594 547 if (handler_type & ACPI_DEVICE_NOTIFY) { ··· 633 522 if (notify_obj->notify.handler != handler) { 634 523 status = AE_BAD_PARAMETER; 635 524 goto unlock_and_exit; 636 - } 637 - /* Make sure all deferred tasks are completed */ 638 - 639 - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 640 - acpi_os_wait_events_complete(NULL); 641 - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 642 - if (ACPI_FAILURE(status)) { 643 - goto exit; 644 525 } 645 526 646 527 /* Remove the handler */ ··· 720 617 handler->context = context; 721 618 handler->method_node = gpe_event_info->dispatch.method_node; 722 619 723 - /* Disable the GPE before installing the handler */ 724 - 725 - status = acpi_ev_disable_gpe(gpe_event_info); 726 - if (ACPI_FAILURE(status)) { 727 - goto unlock_and_exit; 728 - } 729 - 730 620 /* Install the handler */ 731 621 732 622 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); ··· 800 704 801 705 if (gpe_event_info->dispatch.handler->address != address) { 802 706 status = AE_BAD_PARAMETER; 803 - goto unlock_and_exit; 804 - } 805 - 806 - /* Disable the GPE before removing the handler */ 807 - 808 - status = acpi_ev_disable_gpe(gpe_event_info); 809 - if (ACPI_FAILURE(status)) { 810 707 goto unlock_and_exit; 811 708 } 812 709
+101 -55
drivers/acpi/acpica/evxfevnt.c
··· 201 201 202 202 /******************************************************************************* 203 203 * 204 - * FUNCTION: acpi_set_gpe_type 204 + * FUNCTION: acpi_set_gpe 205 205 * 206 206 * PARAMETERS: gpe_device - Parent GPE Device 207 207 * gpe_number - GPE level within the GPE block 208 - * Type - New GPE type 209 - * 210 - * RETURN: Status 211 - * 212 - * DESCRIPTION: Set the type of an individual GPE 213 - * 214 - ******************************************************************************/ 215 - acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type) 216 - { 217 - acpi_status status = AE_OK; 218 - struct acpi_gpe_event_info *gpe_event_info; 219 - 220 - ACPI_FUNCTION_TRACE(acpi_set_gpe_type); 221 - 222 - /* Ensure that we have a valid GPE number */ 223 - 224 - gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 225 - if (!gpe_event_info) { 226 - status = AE_BAD_PARAMETER; 227 - goto unlock_and_exit; 228 - } 229 - 230 - if ((gpe_event_info->flags & ACPI_GPE_TYPE_MASK) == type) { 231 - return_ACPI_STATUS(AE_OK); 232 - } 233 - 234 - /* Set the new type (will disable GPE if currently enabled) */ 235 - 236 - status = acpi_ev_set_gpe_type(gpe_event_info, type); 237 - 238 - unlock_and_exit: 239 - return_ACPI_STATUS(status); 240 - } 241 - 242 - ACPI_EXPORT_SYMBOL(acpi_set_gpe_type) 243 - 244 - /******************************************************************************* 245 - * 246 - * FUNCTION: acpi_enable_gpe 247 - * 248 - * PARAMETERS: gpe_device - Parent GPE Device 249 - * gpe_number - GPE level within the GPE block 250 - * Flags - Just enable, or also wake enable? 208 + * action - Enable or disable 251 209 * Called from ISR or not 252 210 * 253 211 * RETURN: Status 254 212 * 255 - * DESCRIPTION: Enable an ACPI event (general purpose) 213 + * DESCRIPTION: Enable or disable an ACPI event (general purpose) 256 214 * 257 215 ******************************************************************************/ 258 - acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) 216 + acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action) 259 217 { 260 218 acpi_status status = AE_OK; 261 219 acpi_cpu_flags flags; 262 220 struct acpi_gpe_event_info *gpe_event_info; 263 221 264 - ACPI_FUNCTION_TRACE(acpi_enable_gpe); 222 + ACPI_FUNCTION_TRACE(acpi_set_gpe); 265 223 266 224 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 267 225 ··· 231 273 goto unlock_and_exit; 232 274 } 233 275 234 - /* Perform the enable */ 276 + /* Perform the action */ 235 277 236 - status = acpi_ev_enable_gpe(gpe_event_info, TRUE); 278 + switch (action) { 279 + case ACPI_GPE_ENABLE: 280 + status = acpi_ev_enable_gpe(gpe_event_info); 281 + break; 282 + 283 + case ACPI_GPE_DISABLE: 284 + status = acpi_ev_disable_gpe(gpe_event_info); 285 + break; 286 + 287 + default: 288 + ACPI_ERROR((AE_INFO, "Invalid action\n")); 289 + status = AE_BAD_PARAMETER; 290 + break; 291 + } 237 292 238 293 unlock_and_exit: 239 294 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 240 295 return_ACPI_STATUS(status); 241 296 } 242 297 298 + ACPI_EXPORT_SYMBOL(acpi_set_gpe) 299 + 300 + /******************************************************************************* 301 + * 302 + * FUNCTION: acpi_enable_gpe 303 + * 304 + * PARAMETERS: gpe_device - Parent GPE Device 305 + * gpe_number - GPE level within the GPE block 306 + * type - Purpose the GPE will be used for 307 + * 308 + * RETURN: Status 309 + * 310 + * DESCRIPTION: Take a reference to a GPE and enable it if necessary 311 + * 312 + ******************************************************************************/ 313 + acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type) 314 + { 315 + acpi_status status = AE_OK; 316 + acpi_cpu_flags flags; 317 + struct acpi_gpe_event_info *gpe_event_info; 318 + 319 + ACPI_FUNCTION_TRACE(acpi_enable_gpe); 320 + 321 + if (type & ~ACPI_GPE_TYPE_WAKE_RUN) 322 + return_ACPI_STATUS(AE_BAD_PARAMETER); 323 + 324 + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 325 + 326 + /* Ensure that we have a valid GPE number */ 327 + 328 + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 329 + if (!gpe_event_info) { 330 + status = AE_BAD_PARAMETER; 331 + goto unlock_and_exit; 332 + } 333 + 334 + if (type & ACPI_GPE_TYPE_RUNTIME) { 335 + if (++gpe_event_info->runtime_count == 1) { 336 + status = acpi_ev_enable_gpe(gpe_event_info); 337 + if (ACPI_FAILURE(status)) 338 + gpe_event_info->runtime_count--; 339 + } 340 + } 341 + 342 + if (type & ACPI_GPE_TYPE_WAKE) { 343 + if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { 344 + status = AE_BAD_PARAMETER; 345 + goto unlock_and_exit; 346 + } 347 + 348 + /* 349 + * Wake-up GPEs are only enabled right prior to putting the 350 + * system into a sleep state. 351 + */ 352 + if (++gpe_event_info->wakeup_count == 1) 353 + acpi_ev_update_gpe_enable_masks(gpe_event_info); 354 + } 355 + 356 + unlock_and_exit: 357 + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 358 + return_ACPI_STATUS(status); 359 + } 243 360 ACPI_EXPORT_SYMBOL(acpi_enable_gpe) 244 361 245 362 /******************************************************************************* ··· 323 290 * 324 291 * PARAMETERS: gpe_device - Parent GPE Device 325 292 * gpe_number - GPE level within the GPE block 326 - * Flags - Just disable, or also wake disable? 327 - * Called from ISR or not 293 + * type - Purpose the GPE won't be used for any more 328 294 * 329 295 * RETURN: Status 330 296 * 331 - * DESCRIPTION: Disable an ACPI event (general purpose) 297 + * DESCRIPTION: Release a reference to a GPE and disable it if necessary 332 298 * 333 299 ******************************************************************************/ 334 - acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) 300 + acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type) 335 301 { 336 302 acpi_status status = AE_OK; 337 303 acpi_cpu_flags flags; 338 304 struct acpi_gpe_event_info *gpe_event_info; 339 305 340 306 ACPI_FUNCTION_TRACE(acpi_disable_gpe); 307 + 308 + if (type & ~ACPI_GPE_TYPE_WAKE_RUN) 309 + return_ACPI_STATUS(AE_BAD_PARAMETER); 341 310 342 311 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 343 312 /* Ensure that we have a valid GPE number */ ··· 350 315 goto unlock_and_exit; 351 316 } 352 317 353 - status = acpi_ev_disable_gpe(gpe_event_info); 318 + if ((type & ACPI_GPE_TYPE_RUNTIME) && gpe_event_info->runtime_count) { 319 + if (--gpe_event_info->runtime_count == 0) 320 + status = acpi_ev_disable_gpe(gpe_event_info); 321 + } 322 + 323 + if ((type & ACPI_GPE_TYPE_WAKE) && gpe_event_info->wakeup_count) { 324 + /* 325 + * Wake-up GPEs are not enabled after leaving system sleep 326 + * states, so we don't need to disable them here. 327 + */ 328 + if (--gpe_event_info->wakeup_count == 0) 329 + acpi_ev_update_gpe_enable_masks(gpe_event_info); 330 + } 354 331 355 332 unlock_and_exit: 356 333 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 357 334 return_ACPI_STATUS(status); 358 335 } 359 - 360 336 ACPI_EXPORT_SYMBOL(acpi_disable_gpe) 361 337 362 338 /*******************************************************************************
+11 -4
drivers/acpi/button.c
··· 422 422 423 423 if (device->wakeup.flags.valid) { 424 424 /* Button's GPE is run-wake GPE */ 425 - acpi_set_gpe_type(device->wakeup.gpe_device, 426 - device->wakeup.gpe_number, 427 - ACPI_GPE_TYPE_WAKE_RUN); 428 425 acpi_enable_gpe(device->wakeup.gpe_device, 429 - device->wakeup.gpe_number); 426 + device->wakeup.gpe_number, 427 + ACPI_GPE_TYPE_WAKE_RUN); 428 + device->wakeup.run_wake_count++; 430 429 device->wakeup.state.enabled = 1; 431 430 } 432 431 ··· 444 445 static int acpi_button_remove(struct acpi_device *device, int type) 445 446 { 446 447 struct acpi_button *button = acpi_driver_data(device); 448 + 449 + if (device->wakeup.flags.valid) { 450 + acpi_disable_gpe(device->wakeup.gpe_device, 451 + device->wakeup.gpe_number, 452 + ACPI_GPE_TYPE_WAKE_RUN); 453 + device->wakeup.run_wake_count--; 454 + device->wakeup.state.enabled = 0; 455 + } 447 456 448 457 acpi_button_remove_fs(device); 449 458 input_unregister_device(button->input);
+19 -9
drivers/acpi/ec.c
··· 307 307 pr_debug(PREFIX "transaction start\n"); 308 308 /* disable GPE during transaction if storm is detected */ 309 309 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { 310 - acpi_disable_gpe(NULL, ec->gpe); 310 + /* 311 + * It has to be disabled at the hardware level regardless of the 312 + * GPE reference counting, so that it doesn't trigger. 313 + */ 314 + acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); 311 315 } 312 316 313 317 status = acpi_ec_transaction_unlocked(ec, t); ··· 320 316 ec_check_sci_sync(ec, acpi_ec_read_status(ec)); 321 317 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { 322 318 msleep(1); 323 - /* it is safe to enable GPE outside of transaction */ 324 - acpi_enable_gpe(NULL, ec->gpe); 319 + /* 320 + * It is safe to enable the GPE outside of the transaction. Use 321 + * acpi_set_gpe() for that, since we used it to disable the GPE 322 + * above. 323 + */ 324 + acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); 325 325 } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { 326 326 pr_info(PREFIX "GPE storm detected, " 327 327 "transactions will use polling mode\n"); ··· 796 788 &acpi_ec_gpe_handler, ec); 797 789 if (ACPI_FAILURE(status)) 798 790 return -ENODEV; 799 - acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME); 800 - acpi_enable_gpe(NULL, ec->gpe); 791 + 792 + acpi_enable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME); 801 793 status = acpi_install_address_space_handler(ec->handle, 802 794 ACPI_ADR_SPACE_EC, 803 795 &acpi_ec_space_handler, ··· 814 806 } else { 815 807 acpi_remove_gpe_handler(NULL, ec->gpe, 816 808 &acpi_ec_gpe_handler); 809 + acpi_disable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME); 817 810 return -ENODEV; 818 811 } 819 812 } ··· 825 816 826 817 static void ec_remove_handlers(struct acpi_ec *ec) 827 818 { 819 + acpi_disable_gpe(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME); 828 820 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, 829 821 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) 830 822 pr_err(PREFIX "failed to remove space handler\n"); ··· 1067 1057 static int acpi_ec_suspend(struct acpi_device *device, pm_message_t state) 1068 1058 { 1069 1059 struct acpi_ec *ec = acpi_driver_data(device); 1070 - /* Stop using GPE */ 1071 - acpi_disable_gpe(NULL, ec->gpe); 1060 + /* Stop using the GPE, but keep it reference counted. */ 1061 + acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); 1072 1062 return 0; 1073 1063 } 1074 1064 1075 1065 static int acpi_ec_resume(struct acpi_device *device) 1076 1066 { 1077 1067 struct acpi_ec *ec = acpi_driver_data(device); 1078 - /* Enable use of GPE back */ 1079 - acpi_enable_gpe(NULL, ec->gpe); 1068 + /* Enable the GPE again, but don't reference count it once more. */ 1069 + acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); 1080 1070 return 0; 1081 1071 } 1082 1072
-2
drivers/acpi/internal.h
··· 36 36 int acpi_power_init(void); 37 37 int acpi_device_sleep_wake(struct acpi_device *dev, 38 38 int enable, int sleep_state, int dev_state); 39 - int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state); 40 - int acpi_disable_wakeup_device_power(struct acpi_device *dev); 41 39 int acpi_power_get_inferred_state(struct acpi_device *device); 42 40 int acpi_power_transition(struct acpi_device *device, int state); 43 41 extern int acpi_power_nocheck;
+13 -1
drivers/acpi/pci_bind.c
··· 26 26 #include <linux/kernel.h> 27 27 #include <linux/types.h> 28 28 #include <linux/pci.h> 29 + #include <linux/pci-acpi.h> 29 30 #include <linux/acpi.h> 31 + #include <linux/pm_runtime.h> 30 32 #include <acpi/acpi_bus.h> 31 33 #include <acpi/acpi_drivers.h> 32 34 ··· 40 38 struct pci_dev *dev; 41 39 42 40 dev = acpi_get_pci_dev(device->handle); 43 - if (!dev || !dev->subordinate) 41 + if (!dev) 42 + goto out; 43 + 44 + device_set_run_wake(&dev->dev, false); 45 + pci_acpi_remove_pm_notifier(device); 46 + 47 + if (!dev->subordinate) 44 48 goto out; 45 49 46 50 acpi_pci_irq_del_prt(dev->subordinate); ··· 69 61 dev = acpi_get_pci_dev(device->handle); 70 62 if (!dev) 71 63 return 0; 64 + 65 + pci_acpi_add_pm_notifier(device, dev); 66 + if (device->wakeup.flags.run_wake) 67 + device_set_run_wake(&dev->dev, true); 72 68 73 69 /* 74 70 * Install the 'bind' function to facilitate callbacks for
+9
drivers/acpi/pci_root.c
··· 30 30 #include <linux/proc_fs.h> 31 31 #include <linux/spinlock.h> 32 32 #include <linux/pm.h> 33 + #include <linux/pm_runtime.h> 33 34 #include <linux/pci.h> 34 35 #include <linux/pci-acpi.h> 35 36 #include <linux/acpi.h> ··· 529 528 if (flags != base_flags) 530 529 acpi_pci_osc_support(root, flags); 531 530 531 + pci_acpi_add_bus_pm_notifier(device, root->bus); 532 + if (device->wakeup.flags.run_wake) 533 + device_set_run_wake(root->bus->bridge, true); 534 + 532 535 return 0; 533 536 534 537 end: ··· 554 549 { 555 550 struct acpi_pci_root *root = acpi_driver_data(device); 556 551 552 + device_set_run_wake(root->bus->bridge, false); 553 + pci_acpi_remove_bus_pm_notifier(device); 554 + 557 555 kfree(root); 558 556 return 0; 559 557 } ··· 566 558 if (acpi_pci_disabled) 567 559 return 0; 568 560 561 + pci_acpi_crs_quirks(); 569 562 if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0) 570 563 return -ENODEV; 571 564
+28 -10
drivers/acpi/scan.c
··· 741 741 return AE_OK; 742 742 } 743 743 744 - static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) 744 + static void acpi_bus_set_run_wake_flags(struct acpi_device *device) 745 745 { 746 - acpi_status status = 0; 747 - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 748 - union acpi_object *package = NULL; 749 - int psw_error; 750 - 751 746 struct acpi_device_id button_device_ids[] = { 752 747 {"PNP0C0D", 0}, 753 748 {"PNP0C0C", 0}, 754 749 {"PNP0C0E", 0}, 755 750 {"", 0}, 756 751 }; 752 + acpi_status status; 753 + acpi_event_status event_status; 754 + 755 + device->wakeup.run_wake_count = 0; 756 + device->wakeup.flags.notifier_present = 0; 757 + 758 + /* Power button, Lid switch always enable wakeup */ 759 + if (!acpi_match_device_ids(device, button_device_ids)) { 760 + device->wakeup.flags.run_wake = 1; 761 + device->wakeup.flags.always_enabled = 1; 762 + return; 763 + } 764 + 765 + status = acpi_get_gpe_status(NULL, device->wakeup.gpe_number, 766 + ACPI_NOT_ISR, &event_status); 767 + if (status == AE_OK) 768 + device->wakeup.flags.run_wake = 769 + !!(event_status & ACPI_EVENT_FLAG_HANDLE); 770 + } 771 + 772 + static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) 773 + { 774 + acpi_status status = 0; 775 + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 776 + union acpi_object *package = NULL; 777 + int psw_error; 757 778 758 779 /* _PRW */ 759 780 status = acpi_evaluate_object(device->handle, "_PRW", NULL, &buffer); ··· 794 773 795 774 device->wakeup.flags.valid = 1; 796 775 device->wakeup.prepare_count = 0; 776 + acpi_bus_set_run_wake_flags(device); 797 777 /* Call _PSW/_DSW object to disable its ability to wake the sleeping 798 778 * system for the ACPI device with the _PRW object. 799 779 * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW. ··· 805 783 if (psw_error) 806 784 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 807 785 "error in _DSW or _PSW evaluation\n")); 808 - 809 - /* Power button, Lid switch always enable wakeup */ 810 - if (!acpi_match_device_ids(device, button_device_ids)) 811 - device->wakeup.flags.run_wake = 1; 812 786 813 787 end: 814 788 if (ACPI_FAILURE(status))
+12 -3
drivers/acpi/sleep.c
··· 745 745 return -ENODEV; 746 746 } 747 747 748 - error = enable ? 749 - acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) : 750 - acpi_disable_wakeup_device_power(adev); 748 + if (enable) { 749 + error = acpi_enable_wakeup_device_power(adev, 750 + acpi_target_sleep_state); 751 + if (!error) 752 + acpi_enable_gpe(adev->wakeup.gpe_device, 753 + adev->wakeup.gpe_number, 754 + ACPI_GPE_TYPE_WAKE); 755 + } else { 756 + acpi_disable_gpe(adev->wakeup.gpe_device, adev->wakeup.gpe_number, 757 + ACPI_GPE_TYPE_WAKE); 758 + error = acpi_disable_wakeup_device_power(adev); 759 + } 751 760 if (!error) 752 761 dev_info(dev, "wake-up capability %s by ACPI\n", 753 762 enable ? "enabled" : "disabled");
+2 -2
drivers/acpi/system.c
··· 387 387 if (index < num_gpes) { 388 388 if (!strcmp(buf, "disable\n") && 389 389 (status & ACPI_EVENT_FLAG_ENABLED)) 390 - result = acpi_disable_gpe(handle, index); 390 + result = acpi_set_gpe(handle, index, ACPI_GPE_DISABLE); 391 391 else if (!strcmp(buf, "enable\n") && 392 392 !(status & ACPI_EVENT_FLAG_ENABLED)) 393 - result = acpi_enable_gpe(handle, index); 393 + result = acpi_set_gpe(handle, index, ACPI_GPE_ENABLE); 394 394 else if (!strcmp(buf, "clear\n") && 395 395 (status & ACPI_EVENT_FLAG_SET)) 396 396 result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR);
+29 -55
drivers/acpi/wakeup.c
··· 21 21 ACPI_MODULE_NAME("wakeup_devices") 22 22 23 23 /** 24 - * acpi_enable_wakeup_device_prep - prepare wakeup devices 25 - * @sleep_state: ACPI state 26 - * Enable all wakup devices power if the devices' wakeup level 27 - * is higher than requested sleep level 24 + * acpi_enable_wakeup_device_prep - Prepare wake-up devices. 25 + * @sleep_state: ACPI system sleep state. 26 + * 27 + * Enable all wake-up devices' power, unless the requested system sleep state is 28 + * too deep. 28 29 */ 29 - 30 30 void acpi_enable_wakeup_device_prep(u8 sleep_state) 31 31 { 32 32 struct list_head *node, *next; ··· 36 36 struct acpi_device, 37 37 wakeup_list); 38 38 39 - if (!dev->wakeup.flags.valid || 40 - !dev->wakeup.state.enabled || 41 - (sleep_state > (u32) dev->wakeup.sleep_state)) 39 + if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled 40 + || (sleep_state > (u32) dev->wakeup.sleep_state)) 42 41 continue; 43 42 44 43 acpi_enable_wakeup_device_power(dev, sleep_state); ··· 45 46 } 46 47 47 48 /** 48 - * acpi_enable_wakeup_device - enable wakeup devices 49 - * @sleep_state: ACPI state 50 - * Enable all wakup devices's GPE 49 + * acpi_enable_wakeup_device - Enable wake-up device GPEs. 50 + * @sleep_state: ACPI system sleep state. 51 + * 52 + * Enable all wake-up devices' GPEs, with the assumption that 53 + * acpi_disable_all_gpes() was executed before, so we don't need to disable any 54 + * GPEs here. 51 55 */ 52 56 void acpi_enable_wakeup_device(u8 sleep_state) 53 57 { ··· 67 65 if (!dev->wakeup.flags.valid) 68 66 continue; 69 67 70 - /* If users want to disable run-wake GPE, 71 - * we only disable it for wake and leave it for runtime 72 - */ 73 68 if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count) 74 - || sleep_state > (u32) dev->wakeup.sleep_state) { 75 - if (dev->wakeup.flags.run_wake) { 76 - /* set_gpe_type will disable GPE, leave it like that */ 77 - acpi_set_gpe_type(dev->wakeup.gpe_device, 78 - dev->wakeup.gpe_number, 79 - ACPI_GPE_TYPE_RUNTIME); 80 - } 69 + || sleep_state > (u32) dev->wakeup.sleep_state) 81 70 continue; 82 - } 83 - if (!dev->wakeup.flags.run_wake) 84 - acpi_enable_gpe(dev->wakeup.gpe_device, 85 - dev->wakeup.gpe_number); 71 + 72 + /* The wake-up power should have been enabled already. */ 73 + acpi_set_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number, 74 + ACPI_GPE_ENABLE); 86 75 } 87 76 } 88 77 89 78 /** 90 - * acpi_disable_wakeup_device - disable devices' wakeup capability 91 - * @sleep_state: ACPI state 92 - * Disable all wakup devices's GPE and wakeup capability 79 + * acpi_disable_wakeup_device - Disable devices' wakeup capability. 80 + * @sleep_state: ACPI system sleep state. 81 + * 82 + * This function only affects devices with wakeup.state.enabled set, which means 83 + * that it reverses the changes made by acpi_enable_wakeup_device_prep(). 93 84 */ 94 85 void acpi_disable_wakeup_device(u8 sleep_state) 95 86 { ··· 92 97 struct acpi_device *dev = 93 98 container_of(node, struct acpi_device, wakeup_list); 94 99 95 - if (!dev->wakeup.flags.valid) 100 + if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled 101 + || (sleep_state > (u32) dev->wakeup.sleep_state)) 96 102 continue; 97 - 98 - if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count) 99 - || sleep_state > (u32) dev->wakeup.sleep_state) { 100 - if (dev->wakeup.flags.run_wake) { 101 - acpi_set_gpe_type(dev->wakeup.gpe_device, 102 - dev->wakeup.gpe_number, 103 - ACPI_GPE_TYPE_WAKE_RUN); 104 - /* Re-enable it, since set_gpe_type will disable it */ 105 - acpi_enable_gpe(dev->wakeup.gpe_device, 106 - dev->wakeup.gpe_number); 107 - } 108 - continue; 109 - } 110 103 111 104 acpi_disable_wakeup_device_power(dev); 112 - /* Never disable run-wake GPE */ 113 - if (!dev->wakeup.flags.run_wake) { 114 - acpi_disable_gpe(dev->wakeup.gpe_device, 115 - dev->wakeup.gpe_number); 116 - acpi_clear_gpe(dev->wakeup.gpe_device, 117 - dev->wakeup.gpe_number, ACPI_NOT_ISR); 118 - } 119 105 } 120 106 } 121 107 ··· 110 134 struct acpi_device, 111 135 wakeup_list); 112 136 /* In case user doesn't load button driver */ 113 - if (!dev->wakeup.flags.run_wake || dev->wakeup.state.enabled) 137 + if (!dev->wakeup.flags.always_enabled || 138 + dev->wakeup.state.enabled) 114 139 continue; 115 - acpi_set_gpe_type(dev->wakeup.gpe_device, 116 - dev->wakeup.gpe_number, 117 - ACPI_GPE_TYPE_WAKE_RUN); 118 - acpi_enable_gpe(dev->wakeup.gpe_device, 119 - dev->wakeup.gpe_number); 140 + acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number, 141 + ACPI_GPE_TYPE_WAKE); 120 142 dev->wakeup.state.enabled = 1; 121 143 } 122 144 mutex_unlock(&acpi_device_lock);
+9 -9
drivers/isdn/hisax/Kconfig
··· 109 109 110 110 config HISAX_TELESPCI 111 111 bool "Teles PCI" 112 - depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) 112 + depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) 113 113 help 114 114 This enables HiSax support for the Teles PCI. 115 115 See <file:Documentation/isdn/README.HiSax> on how to configure it. ··· 237 237 238 238 config HISAX_NETJET 239 239 bool "NETjet card" 240 - depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) 240 + depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) 241 241 help 242 242 This enables HiSax support for the NetJet from Traverse 243 243 Technologies. ··· 248 248 249 249 config HISAX_NETJET_U 250 250 bool "NETspider U card" 251 - depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) 251 + depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) 252 252 help 253 253 This enables HiSax support for the Netspider U interface ISDN card 254 254 from Traverse Technologies. ··· 287 287 288 288 config HISAX_BKM_A4T 289 289 bool "Telekom A4T card" 290 - depends on PCI && PCI_LEGACY 290 + depends on PCI 291 291 help 292 292 This enables HiSax support for the Telekom A4T card. 293 293 ··· 297 297 298 298 config HISAX_SCT_QUADRO 299 299 bool "Scitel Quadro card" 300 - depends on PCI && PCI_LEGACY 300 + depends on PCI 301 301 help 302 302 This enables HiSax support for the Scitel Quadro card. 303 303 ··· 316 316 317 317 config HISAX_HFC_PCI 318 318 bool "HFC PCI-Bus cards" 319 - depends on PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) 319 + depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) 320 320 help 321 321 This enables HiSax support for the HFC-S PCI 2BDS0 based cards. 322 322 ··· 325 325 326 326 config HISAX_W6692 327 327 bool "Winbond W6692 based cards" 328 - depends on PCI && PCI_LEGACY 328 + depends on PCI 329 329 help 330 330 This enables HiSax support for Winbond W6692 based PCI ISDN cards. 331 331 ··· 341 341 342 342 config HISAX_ENTERNOW_PCI 343 343 bool "Formula-n enter:now PCI card" 344 - depends on HISAX_NETJET && PCI && PCI_LEGACY && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) 344 + depends on HISAX_NETJET && PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV)) 345 345 help 346 346 This enables HiSax support for the Formula-n enter:now PCI 347 347 ISDN card. ··· 412 412 413 413 config HISAX_FRITZ_PCIPNP 414 414 tristate "AVM Fritz!Card PCI/PCIv2/PnP support (EXPERIMENTAL)" 415 - depends on PCI && PCI_LEGACY && EXPERIMENTAL 415 + depends on PCI && EXPERIMENTAL 416 416 help 417 417 This enables the driver for the AVM Fritz!Card PCI, 418 418 Fritz!Card PCI v2 and Fritz!Card PnP.
+3 -3
drivers/isdn/hisax/avm_pci.c
··· 822 822 823 823 #endif /* __ISAPNP__ */ 824 824 825 - #ifndef CONFIG_PCI_LEGACY 825 + #ifndef CONFIG_PCI 826 826 827 827 static int __devinit avm_pci_setup(struct IsdnCardState *cs) 828 828 { ··· 835 835 836 836 static int __devinit avm_pci_setup(struct IsdnCardState *cs) 837 837 { 838 - if ((dev_avm = pci_find_device(PCI_VENDOR_ID_AVM, 838 + if ((dev_avm = hisax_find_pci_device(PCI_VENDOR_ID_AVM, 839 839 PCI_DEVICE_ID_AVM_A1, dev_avm))) { 840 840 841 841 if (pci_enable_device(dev_avm)) ··· 864 864 return (1); 865 865 } 866 866 867 - #endif /* CONFIG_PCI_LEGACY */ 867 + #endif /* CONFIG_PCI */ 868 868 869 869 int __devinit 870 870 setup_avm_pcipnp(struct IsdnCard *card)
+1 -1
drivers/isdn/hisax/bkm_a4t.c
··· 340 340 } else 341 341 return (0); 342 342 343 - while ((dev_a4t = pci_find_device(PCI_VENDOR_ID_ZORAN, 343 + while ((dev_a4t = hisax_find_pci_device(PCI_VENDOR_ID_ZORAN, 344 344 PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) { 345 345 ret = a4t_pci_probe(dev_a4t, cs, &found, &pci_memaddr); 346 346 if (!ret)
+1 -1
drivers/isdn/hisax/bkm_a8.c
··· 301 301 (sub_vendor_id != PCI_VENDOR_ID_BERKOM))) 302 302 return (0); 303 303 if (cs->subtyp == SCT_1) { 304 - while ((dev_a8 = pci_find_device(PCI_VENDOR_ID_PLX, 304 + while ((dev_a8 = hisax_find_pci_device(PCI_VENDOR_ID_PLX, 305 305 PCI_DEVICE_ID_PLX_9050, dev_a8))) { 306 306 307 307 sub_vendor_id = dev_a8->subsystem_vendor;
+7 -7
drivers/isdn/hisax/diva.c
··· 1148 1148 1149 1149 #endif /* ISAPNP */ 1150 1150 1151 - #ifdef CONFIG_PCI_LEGACY 1151 + #ifdef CONFIG_PCI 1152 1152 static struct pci_dev *dev_diva __devinitdata = NULL; 1153 1153 static struct pci_dev *dev_diva_u __devinitdata = NULL; 1154 1154 static struct pci_dev *dev_diva201 __devinitdata = NULL; ··· 1159 1159 struct IsdnCardState *cs = card->cs; 1160 1160 1161 1161 cs->subtyp = 0; 1162 - if ((dev_diva = pci_find_device(PCI_VENDOR_ID_EICON, 1162 + if ((dev_diva = hisax_find_pci_device(PCI_VENDOR_ID_EICON, 1163 1163 PCI_DEVICE_ID_EICON_DIVA20, dev_diva))) { 1164 1164 if (pci_enable_device(dev_diva)) 1165 1165 return(0); 1166 1166 cs->subtyp = DIVA_PCI; 1167 1167 cs->irq = dev_diva->irq; 1168 1168 cs->hw.diva.cfg_reg = pci_resource_start(dev_diva, 2); 1169 - } else if ((dev_diva_u = pci_find_device(PCI_VENDOR_ID_EICON, 1169 + } else if ((dev_diva_u = hisax_find_pci_device(PCI_VENDOR_ID_EICON, 1170 1170 PCI_DEVICE_ID_EICON_DIVA20_U, dev_diva_u))) { 1171 1171 if (pci_enable_device(dev_diva_u)) 1172 1172 return(0); 1173 1173 cs->subtyp = DIVA_PCI; 1174 1174 cs->irq = dev_diva_u->irq; 1175 1175 cs->hw.diva.cfg_reg = pci_resource_start(dev_diva_u, 2); 1176 - } else if ((dev_diva201 = pci_find_device(PCI_VENDOR_ID_EICON, 1176 + } else if ((dev_diva201 = hisax_find_pci_device(PCI_VENDOR_ID_EICON, 1177 1177 PCI_DEVICE_ID_EICON_DIVA201, dev_diva201))) { 1178 1178 if (pci_enable_device(dev_diva201)) 1179 1179 return(0); ··· 1183 1183 (ulong) ioremap(pci_resource_start(dev_diva201, 0), 4096); 1184 1184 cs->hw.diva.cfg_reg = 1185 1185 (ulong) ioremap(pci_resource_start(dev_diva201, 1), 4096); 1186 - } else if ((dev_diva202 = pci_find_device(PCI_VENDOR_ID_EICON, 1186 + } else if ((dev_diva202 = hisax_find_pci_device(PCI_VENDOR_ID_EICON, 1187 1187 PCI_DEVICE_ID_EICON_DIVA202, dev_diva202))) { 1188 1188 if (pci_enable_device(dev_diva202)) 1189 1189 return(0); ··· 1229 1229 return (1); /* card found */ 1230 1230 } 1231 1231 1232 - #else /* if !CONFIG_PCI_LEGACY */ 1232 + #else /* if !CONFIG_PCI */ 1233 1233 1234 1234 static int __devinit setup_diva_pci(struct IsdnCard *card) 1235 1235 { 1236 1236 return (-1); /* card not found; continue search */ 1237 1237 } 1238 1238 1239 - #endif /* CONFIG_PCI_LEGACY */ 1239 + #endif /* CONFIG_PCI */ 1240 1240 1241 1241 int __devinit 1242 1242 setup_diva(struct IsdnCard *card)
+4 -4
drivers/isdn/hisax/elsa.c
··· 1025 1025 cs->irq); 1026 1026 } 1027 1027 1028 - #ifdef CONFIG_PCI_LEGACY 1028 + #ifdef CONFIG_PCI 1029 1029 static struct pci_dev *dev_qs1000 __devinitdata = NULL; 1030 1030 static struct pci_dev *dev_qs3000 __devinitdata = NULL; 1031 1031 ··· 1035 1035 struct IsdnCardState *cs = card->cs; 1036 1036 1037 1037 cs->subtyp = 0; 1038 - if ((dev_qs1000 = pci_find_device(PCI_VENDOR_ID_ELSA, 1038 + if ((dev_qs1000 = hisax_find_pci_device(PCI_VENDOR_ID_ELSA, 1039 1039 PCI_DEVICE_ID_ELSA_MICROLINK, dev_qs1000))) { 1040 1040 if (pci_enable_device(dev_qs1000)) 1041 1041 return(0); ··· 1043 1043 cs->irq = dev_qs1000->irq; 1044 1044 cs->hw.elsa.cfg = pci_resource_start(dev_qs1000, 1); 1045 1045 cs->hw.elsa.base = pci_resource_start(dev_qs1000, 3); 1046 - } else if ((dev_qs3000 = pci_find_device(PCI_VENDOR_ID_ELSA, 1046 + } else if ((dev_qs3000 = hisax_find_pci_device(PCI_VENDOR_ID_ELSA, 1047 1047 PCI_DEVICE_ID_ELSA_QS3000, dev_qs3000))) { 1048 1048 if (pci_enable_device(dev_qs3000)) 1049 1049 return(0); ··· 1093 1093 { 1094 1094 return (1); 1095 1095 } 1096 - #endif /* CONFIG_PCI_LEGACY */ 1096 + #endif /* CONFIG_PCI */ 1097 1097 1098 1098 static int __devinit 1099 1099 setup_elsa_common(struct IsdnCard *card)
+1 -1
drivers/isdn/hisax/enternow_pci.c
··· 406 406 407 407 for ( ;; ) 408 408 { 409 - if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET, 409 + if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET, 410 410 PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { 411 411 ret = en_pci_probe(dev_netjet, cs); 412 412 if (!ret)
+4 -4
drivers/isdn/hisax/gazel.c
··· 531 531 return (0); 532 532 } 533 533 534 - #ifdef CONFIG_PCI_LEGACY 534 + #ifdef CONFIG_PCI 535 535 static struct pci_dev *dev_tel __devinitdata = NULL; 536 536 537 537 static int __devinit ··· 546 546 found = 0; 547 547 seekcard = PCI_DEVICE_ID_PLX_R685; 548 548 for (nbseek = 0; nbseek < 4; nbseek++) { 549 - if ((dev_tel = pci_find_device(PCI_VENDOR_ID_PLX, 549 + if ((dev_tel = hisax_find_pci_device(PCI_VENDOR_ID_PLX, 550 550 seekcard, dev_tel))) { 551 551 if (pci_enable_device(dev_tel)) 552 552 return 1; ··· 620 620 621 621 return (0); 622 622 } 623 - #endif /* CONFIG_PCI_LEGACY */ 623 + #endif /* CONFIG_PCI */ 624 624 625 625 int __devinit 626 626 setup_gazel(struct IsdnCard *card) ··· 640 640 return (0); 641 641 } else { 642 642 643 - #ifdef CONFIG_PCI_LEGACY 643 + #ifdef CONFIG_PCI 644 644 if (setup_gazelpci(cs)) 645 645 return (0); 646 646 #else
+1 -1
drivers/isdn/hisax/hfc_pci.c
··· 1658 1658 1659 1659 i = 0; 1660 1660 while (id_list[i].vendor_id) { 1661 - tmp_hfcpci = pci_find_device(id_list[i].vendor_id, 1661 + tmp_hfcpci = hisax_find_pci_device(id_list[i].vendor_id, 1662 1662 id_list[i].device_id, 1663 1663 dev_hfcpci); 1664 1664 i++;
+23
drivers/isdn/hisax/hisax.h
··· 1323 1323 char *HiSax_getrev(const char *revision); 1324 1324 int TeiNew(void); 1325 1325 void TeiFree(void); 1326 + 1327 + #ifdef CONFIG_PCI 1328 + 1329 + #include <linux/pci.h> 1330 + 1331 + /* adaptation wrapper for old usage 1332 + * WARNING! This is unfit for use in a PCI hotplug environment, 1333 + * as the returned PCI device can disappear at any moment in time. 1334 + * Callers should be converted to use pci_get_device() instead. 1335 + */ 1336 + static inline struct pci_dev *hisax_find_pci_device(unsigned int vendor, 1337 + unsigned int device, 1338 + struct pci_dev *from) 1339 + { 1340 + struct pci_dev *pdev; 1341 + 1342 + pci_dev_get(from); 1343 + pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); 1344 + pci_dev_put(pdev); 1345 + return pdev; 1346 + } 1347 + 1348 + #endif
+3 -3
drivers/isdn/hisax/niccy.c
··· 297 297 return 0; 298 298 } 299 299 } else { 300 - #ifdef CONFIG_PCI_LEGACY 300 + #ifdef CONFIG_PCI 301 301 static struct pci_dev *niccy_dev __devinitdata; 302 302 303 303 u_int pci_ioaddr; 304 304 cs->subtyp = 0; 305 - if ((niccy_dev = pci_find_device(PCI_VENDOR_ID_SATSAGEM, 305 + if ((niccy_dev = hisax_find_pci_device(PCI_VENDOR_ID_SATSAGEM, 306 306 PCI_DEVICE_ID_SATSAGEM_NICCY, 307 307 niccy_dev))) { 308 308 if (pci_enable_device(niccy_dev)) ··· 354 354 printk(KERN_WARNING "Niccy: io0 0 and NO_PCI_BIOS\n"); 355 355 printk(KERN_WARNING "Niccy: unable to config NICCY PCI\n"); 356 356 return 0; 357 - #endif /* CONFIG_PCI_LEGACY */ 357 + #endif /* CONFIG_PCI */ 358 358 } 359 359 printk(KERN_INFO "HiSax: NICCY %s config irq:%d data:0x%X ale:0x%X\n", 360 360 (cs->subtyp == 1) ? "PnP" : "PCI",
+1 -1
drivers/isdn/hisax/nj_s.c
··· 276 276 277 277 for ( ;; ) 278 278 { 279 - if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET, 279 + if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET, 280 280 PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { 281 281 ret = njs_pci_probe(dev_netjet, cs); 282 282 if (!ret)
+1 -1
drivers/isdn/hisax/nj_u.c
··· 240 240 241 241 for ( ;; ) 242 242 { 243 - if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET, 243 + if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET, 244 244 PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { 245 245 ret = nju_pci_probe(dev_netjet, cs); 246 246 if (!ret)
+3 -3
drivers/isdn/hisax/sedlbauer.c
··· 598 598 } 599 599 #endif /* __ISAPNP__ */ 600 600 601 - #ifdef CONFIG_PCI_LEGACY 601 + #ifdef CONFIG_PCI 602 602 static struct pci_dev *dev_sedl __devinitdata = NULL; 603 603 604 604 static int __devinit ··· 607 607 struct IsdnCardState *cs = card->cs; 608 608 u16 sub_vendor_id, sub_id; 609 609 610 - if ((dev_sedl = pci_find_device(PCI_VENDOR_ID_TIGERJET, 610 + if ((dev_sedl = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET, 611 611 PCI_DEVICE_ID_TIGERJET_100, dev_sedl))) { 612 612 if (pci_enable_device(dev_sedl)) 613 613 return(0); ··· 673 673 return (1); 674 674 } 675 675 676 - #endif /* CONFIG_PCI_LEGACY */ 676 + #endif /* CONFIG_PCI */ 677 677 678 678 int __devinit 679 679 setup_sedlbauer(struct IsdnCard *card)
+1 -1
drivers/isdn/hisax/telespci.c
··· 300 300 if (cs->typ != ISDN_CTYPE_TELESPCI) 301 301 return (0); 302 302 303 - if ((dev_tel = pci_find_device (PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36120, dev_tel))) { 303 + if ((dev_tel = hisax_find_pci_device (PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36120, dev_tel))) { 304 304 if (pci_enable_device(dev_tel)) 305 305 return(0); 306 306 cs->irq = dev_tel->irq;
+1 -1
drivers/isdn/hisax/w6692.c
··· 1007 1007 return (0); 1008 1008 1009 1009 while (id_list[id_idx].vendor_id) { 1010 - dev_w6692 = pci_find_device(id_list[id_idx].vendor_id, 1010 + dev_w6692 = hisax_find_pci_device(id_list[id_idx].vendor_id, 1011 1011 id_list[id_idx].device_id, 1012 1012 dev_w6692); 1013 1013 if (dev_w6692) {
-11
drivers/pci/Kconfig
··· 21 21 22 22 If you don't know what to do here, say N. 23 23 24 - config PCI_LEGACY 25 - bool "Enable deprecated pci_find_* API" 26 - depends on PCI 27 - default y 28 - help 29 - Say Y here if you want to include support for the deprecated 30 - pci_find_device() API. Most drivers have been converted over 31 - to using the proper hotplug APIs, so this option serves to 32 - include/exclude only a few drivers that are still using this 33 - API. 34 - 35 24 config PCI_DEBUG 36 25 bool "PCI Debugging" 37 26 depends on PCI && DEBUG_KERNEL
+2 -3
drivers/pci/Makefile
··· 2 2 # Makefile for the PCI bus specific drivers. 3 3 # 4 4 5 - obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \ 5 + obj-y += access.o bus.o probe.o remove.o pci.o \ 6 6 pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ 7 7 irq.o 8 8 obj-$(CONFIG_PROC_FS) += proc.o 9 9 obj-$(CONFIG_SYSFS) += slot.o 10 10 11 - obj-$(CONFIG_PCI_LEGACY) += legacy.o 12 - CFLAGS_legacy.o += -Wno-deprecated-declarations 11 + obj-$(CONFIG_PCI_QUIRKS) += quirks.o 13 12 14 13 # Build PCI Express stuff if needed 15 14 obj-$(CONFIG_PCIEPORTBUS) += pcie/
+52 -4
drivers/pci/bus.c
··· 17 17 18 18 #include "pci.h" 19 19 20 + void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, 21 + unsigned int flags) 22 + { 23 + struct pci_bus_resource *bus_res; 24 + 25 + bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL); 26 + if (!bus_res) { 27 + dev_err(&bus->dev, "can't add %pR resource\n", res); 28 + return; 29 + } 30 + 31 + bus_res->res = res; 32 + bus_res->flags = flags; 33 + list_add_tail(&bus_res->list, &bus->resources); 34 + } 35 + 36 + struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n) 37 + { 38 + struct pci_bus_resource *bus_res; 39 + 40 + if (n < PCI_BRIDGE_RESOURCE_NUM) 41 + return bus->resource[n]; 42 + 43 + n -= PCI_BRIDGE_RESOURCE_NUM; 44 + list_for_each_entry(bus_res, &bus->resources, list) { 45 + if (n-- == 0) 46 + return bus_res->res; 47 + } 48 + return NULL; 49 + } 50 + EXPORT_SYMBOL_GPL(pci_bus_resource_n); 51 + 52 + void pci_bus_remove_resources(struct pci_bus *bus) 53 + { 54 + struct pci_bus_resource *bus_res, *tmp; 55 + int i; 56 + 57 + for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) 58 + bus->resource[i] = 0; 59 + 60 + list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) { 61 + list_del(&bus_res->list); 62 + kfree(bus_res); 63 + } 64 + } 65 + 20 66 /** 21 67 * pci_bus_alloc_resource - allocate a resource from a parent bus 22 68 * @bus: PCI bus ··· 82 36 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, 83 37 resource_size_t size, resource_size_t align, 84 38 resource_size_t min, unsigned int type_mask, 85 - void (*alignf)(void *, struct resource *, resource_size_t, 86 - resource_size_t), 39 + resource_size_t (*alignf)(void *, 40 + const struct resource *, 41 + resource_size_t, 42 + resource_size_t), 87 43 void *alignf_data) 88 44 { 89 45 int i, ret = -ENOMEM; 46 + struct resource *r; 90 47 resource_size_t max = -1; 91 48 92 49 type_mask |= IORESOURCE_IO | IORESOURCE_MEM; ··· 98 49 if (!(res->flags & IORESOURCE_MEM_64)) 99 50 max = PCIBIOS_MAX_MEM_32; 100 51 101 - for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 102 - struct resource *r = bus->resource[i]; 52 + pci_bus_for_each_resource(bus, r, i) { 103 53 if (!r) 104 54 continue; 105 55
-2
drivers/pci/hotplug/acpiphp_core.c
··· 332 332 slot->hotplug_slot->info->attention_status = 0; 333 333 slot->hotplug_slot->info->latch_status = acpiphp_get_latch_status(slot->acpi_slot); 334 334 slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot); 335 - slot->hotplug_slot->info->max_bus_speed = PCI_SPEED_UNKNOWN; 336 - slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; 337 335 338 336 acpiphp_slot->slot = slot; 339 337 snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun);
+1
drivers/pci/hotplug/cpcihp_generic.c
··· 162 162 dev = pci_get_slot(bus, PCI_DEVFN(bridge_slot, 0)); 163 163 if(!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { 164 164 err("Invalid bridge device %s", bridge); 165 + pci_dev_put(dev); 165 166 return -EINVAL; 166 167 } 167 168 bus = dev->subordinate;
-2
drivers/pci/hotplug/cpqphp.h
··· 310 310 u8 first_slot; 311 311 u8 add_support; 312 312 u8 push_flag; 313 - enum pci_bus_speed speed; 314 - enum pci_bus_speed speed_capability; 315 313 u8 push_button; /* 0 = no pushbutton, 1 = pushbutton present */ 316 314 u8 slot_switch_type; /* 0 = no switch, 1 = switch present */ 317 315 u8 defeature_PHP; /* 0 = PHP not supported, 1 = PHP supported */
+17 -40
drivers/pci/hotplug/cpqphp_core.c
··· 583 583 return 0; 584 584 } 585 585 586 - static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) 587 - { 588 - struct slot *slot = hotplug_slot->private; 589 - struct controller *ctrl = slot->ctrl; 590 - 591 - dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); 592 - 593 - *value = ctrl->speed_capability; 594 - 595 - return 0; 596 - } 597 - 598 - static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) 599 - { 600 - struct slot *slot = hotplug_slot->private; 601 - struct controller *ctrl = slot->ctrl; 602 - 603 - dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); 604 - 605 - *value = ctrl->speed; 606 - 607 - return 0; 608 - } 609 - 610 586 static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = { 611 587 .set_attention_status = set_attention_status, 612 588 .enable_slot = process_SI, ··· 592 616 .get_attention_status = get_attention_status, 593 617 .get_latch_status = get_latch_status, 594 618 .get_adapter_status = get_adapter_status, 595 - .get_max_bus_speed = get_max_bus_speed, 596 - .get_cur_bus_speed = get_cur_bus_speed, 597 619 }; 598 620 599 621 #define SLOT_NAME_SIZE 10 ··· 603 629 struct slot *slot; 604 630 struct hotplug_slot *hotplug_slot; 605 631 struct hotplug_slot_info *hotplug_slot_info; 632 + struct pci_bus *bus = ctrl->pci_bus; 606 633 u8 number_of_slots; 607 634 u8 slot_device; 608 635 u8 slot_number; ··· 669 694 slot->capabilities |= PCISLOT_64_BIT_SUPPORTED; 670 695 if (is_slot66mhz(slot)) 671 696 slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED; 672 - if (ctrl->speed == PCI_SPEED_66MHz) 697 + if (bus->cur_bus_speed == PCI_SPEED_66MHz) 673 698 slot->capabilities |= PCISLOT_66_MHZ_OPERATION; 674 699 675 700 ctrl_slot = ··· 819 844 u32 rc; 820 845 struct controller *ctrl; 821 846 struct pci_func *func; 847 + struct pci_bus *bus; 822 848 int err; 823 849 824 850 err = pci_enable_device(pdev); ··· 828 852 pci_name(pdev), err); 829 853 return err; 830 854 } 855 + bus = pdev->subordinate; 831 856 832 857 /* Need to read VID early b/c it's used to differentiate CPQ and INTC 833 858 * discovery ··· 906 929 pci_read_config_byte(pdev, 0x41, &bus_cap); 907 930 if (bus_cap & 0x80) { 908 931 dbg("bus max supports 133MHz PCI-X\n"); 909 - ctrl->speed_capability = PCI_SPEED_133MHz_PCIX; 932 + bus->max_bus_speed = PCI_SPEED_133MHz_PCIX; 910 933 break; 911 934 } 912 935 if (bus_cap & 0x40) { 913 936 dbg("bus max supports 100MHz PCI-X\n"); 914 - ctrl->speed_capability = PCI_SPEED_100MHz_PCIX; 937 + bus->max_bus_speed = PCI_SPEED_100MHz_PCIX; 915 938 break; 916 939 } 917 940 if (bus_cap & 20) { 918 941 dbg("bus max supports 66MHz PCI-X\n"); 919 - ctrl->speed_capability = PCI_SPEED_66MHz_PCIX; 942 + bus->max_bus_speed = PCI_SPEED_66MHz_PCIX; 920 943 break; 921 944 } 922 945 if (bus_cap & 10) { 923 946 dbg("bus max supports 66MHz PCI\n"); 924 - ctrl->speed_capability = PCI_SPEED_66MHz; 947 + bus->max_bus_speed = PCI_SPEED_66MHz; 925 948 break; 926 949 } 927 950 ··· 932 955 case PCI_SUB_HPC_ID: 933 956 /* Original 6500/7000 implementation */ 934 957 ctrl->slot_switch_type = 1; 935 - ctrl->speed_capability = PCI_SPEED_33MHz; 958 + bus->max_bus_speed = PCI_SPEED_33MHz; 936 959 ctrl->push_button = 0; 937 960 ctrl->pci_config_space = 1; 938 961 ctrl->defeature_PHP = 1; ··· 943 966 /* First Pushbutton implementation */ 944 967 ctrl->push_flag = 1; 945 968 ctrl->slot_switch_type = 1; 946 - ctrl->speed_capability = PCI_SPEED_33MHz; 969 + bus->max_bus_speed = PCI_SPEED_33MHz; 947 970 ctrl->push_button = 1; 948 971 ctrl->pci_config_space = 1; 949 972 ctrl->defeature_PHP = 1; ··· 953 976 case PCI_SUB_HPC_ID_INTC: 954 977 /* Third party (6500/7000) */ 955 978 ctrl->slot_switch_type = 1; 956 - ctrl->speed_capability = PCI_SPEED_33MHz; 979 + bus->max_bus_speed = PCI_SPEED_33MHz; 957 980 ctrl->push_button = 0; 958 981 ctrl->pci_config_space = 1; 959 982 ctrl->defeature_PHP = 1; ··· 964 987 /* First 66 Mhz implementation */ 965 988 ctrl->push_flag = 1; 966 989 ctrl->slot_switch_type = 1; 967 - ctrl->speed_capability = PCI_SPEED_66MHz; 990 + bus->max_bus_speed = PCI_SPEED_66MHz; 968 991 ctrl->push_button = 1; 969 992 ctrl->pci_config_space = 1; 970 993 ctrl->defeature_PHP = 1; ··· 975 998 /* First PCI-X implementation, 100MHz */ 976 999 ctrl->push_flag = 1; 977 1000 ctrl->slot_switch_type = 1; 978 - ctrl->speed_capability = PCI_SPEED_100MHz_PCIX; 1001 + bus->max_bus_speed = PCI_SPEED_100MHz_PCIX; 979 1002 ctrl->push_button = 1; 980 1003 ctrl->pci_config_space = 1; 981 1004 ctrl->defeature_PHP = 1; ··· 992 1015 case PCI_VENDOR_ID_INTEL: 993 1016 /* Check for speed capability (0=33, 1=66) */ 994 1017 if (subsystem_deviceid & 0x0001) 995 - ctrl->speed_capability = PCI_SPEED_66MHz; 1018 + bus->max_bus_speed = PCI_SPEED_66MHz; 996 1019 else 997 - ctrl->speed_capability = PCI_SPEED_33MHz; 1020 + bus->max_bus_speed = PCI_SPEED_33MHz; 998 1021 999 1022 /* Check for push button */ 1000 1023 if (subsystem_deviceid & 0x0002) ··· 1056 1079 pdev->bus->number); 1057 1080 1058 1081 dbg("Hotplug controller capabilities:\n"); 1059 - dbg(" speed_capability %d\n", ctrl->speed_capability); 1082 + dbg(" speed_capability %d\n", bus->max_bus_speed); 1060 1083 dbg(" slot_switch_type %s\n", ctrl->slot_switch_type ? 1061 1084 "switch present" : "no switch"); 1062 1085 dbg(" defeature_PHP %s\n", ctrl->defeature_PHP ? ··· 1119 1142 } 1120 1143 1121 1144 /* Check for 66Mhz operation */ 1122 - ctrl->speed = get_controller_speed(ctrl); 1145 + bus->cur_bus_speed = get_controller_speed(ctrl); 1123 1146 1124 1147 1125 1148 /********************************************************
+15 -12
drivers/pci/hotplug/cpqphp_ctrl.c
··· 1130 1130 static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_slot) 1131 1131 { 1132 1132 struct slot *slot; 1133 + struct pci_bus *bus = ctrl->pci_bus; 1133 1134 u8 reg; 1134 1135 u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER); 1135 1136 u16 reg16; 1136 1137 u32 leds = readl(ctrl->hpc_reg + LED_CONTROL); 1137 1138 1138 - if (ctrl->speed == adapter_speed) 1139 + if (bus->cur_bus_speed == adapter_speed) 1139 1140 return 0; 1140 1141 1141 1142 /* We don't allow freq/mode changes if we find another adapter running ··· 1153 1152 * lower speed/mode, we allow the new adapter to function at 1154 1153 * this rate if supported 1155 1154 */ 1156 - if (ctrl->speed < adapter_speed) 1155 + if (bus->cur_bus_speed < adapter_speed) 1157 1156 return 0; 1158 1157 1159 1158 return 1; ··· 1162 1161 /* If the controller doesn't support freq/mode changes and the 1163 1162 * controller is running at a higher mode, we bail 1164 1163 */ 1165 - if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability)) 1164 + if ((bus->cur_bus_speed > adapter_speed) && (!ctrl->pcix_speed_capability)) 1166 1165 return 1; 1167 1166 1168 1167 /* But we allow the adapter to run at a lower rate if possible */ 1169 - if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability)) 1168 + if ((bus->cur_bus_speed < adapter_speed) && (!ctrl->pcix_speed_capability)) 1170 1169 return 0; 1171 1170 1172 1171 /* We try to set the max speed supported by both the adapter and 1173 1172 * controller 1174 1173 */ 1175 - if (ctrl->speed_capability < adapter_speed) { 1176 - if (ctrl->speed == ctrl->speed_capability) 1174 + if (bus->max_bus_speed < adapter_speed) { 1175 + if (bus->cur_bus_speed == bus->max_bus_speed) 1177 1176 return 0; 1178 - adapter_speed = ctrl->speed_capability; 1177 + adapter_speed = bus->max_bus_speed; 1179 1178 } 1180 1179 1181 1180 writel(0x0L, ctrl->hpc_reg + LED_CONTROL); ··· 1230 1229 pci_write_config_byte(ctrl->pci_dev, 0x43, reg); 1231 1230 1232 1231 /* Only if mode change...*/ 1233 - if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || 1234 - ((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) 1232 + if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || 1233 + ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) 1235 1234 set_SOGO(ctrl); 1236 1235 1237 1236 wait_for_ctrl_irq(ctrl); ··· 1244 1243 set_SOGO(ctrl); 1245 1244 wait_for_ctrl_irq(ctrl); 1246 1245 1247 - ctrl->speed = adapter_speed; 1246 + bus->cur_bus_speed = adapter_speed; 1248 1247 slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 1249 1248 1250 1249 info("Successfully changed frequency/mode for adapter in slot %d\n", ··· 1270 1269 */ 1271 1270 static u32 board_replaced(struct pci_func *func, struct controller *ctrl) 1272 1271 { 1272 + struct pci_bus *bus = ctrl->pci_bus; 1273 1273 u8 hp_slot; 1274 1274 u8 temp_byte; 1275 1275 u8 adapter_speed; ··· 1311 1309 wait_for_ctrl_irq (ctrl); 1312 1310 1313 1311 adapter_speed = get_adapter_speed(ctrl, hp_slot); 1314 - if (ctrl->speed != adapter_speed) 1312 + if (bus->cur_bus_speed != adapter_speed) 1315 1313 if (set_controller_speed(ctrl, adapter_speed, hp_slot)) 1316 1314 rc = WRONG_BUS_FREQUENCY; 1317 1315 ··· 1428 1426 u32 temp_register = 0xFFFFFFFF; 1429 1427 u32 rc = 0; 1430 1428 struct pci_func *new_slot = NULL; 1429 + struct pci_bus *bus = ctrl->pci_bus; 1431 1430 struct slot *p_slot; 1432 1431 struct resource_lists res_lists; 1433 1432 ··· 1459 1456 wait_for_ctrl_irq (ctrl); 1460 1457 1461 1458 adapter_speed = get_adapter_speed(ctrl, hp_slot); 1462 - if (ctrl->speed != adapter_speed) 1459 + if (bus->cur_bus_speed != adapter_speed) 1463 1460 if (set_controller_speed(ctrl, adapter_speed, hp_slot)) 1464 1461 rc = WRONG_BUS_FREQUENCY; 1465 1462
+28 -78
drivers/pci/hotplug/ibmphp_core.c
··· 395 395 return rc; 396 396 } 397 397 398 - static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) 398 + static int get_max_bus_speed(struct slot *slot) 399 399 { 400 - int rc = -ENODEV; 401 - struct slot *pslot; 400 + int rc; 402 401 u8 mode = 0; 402 + enum pci_bus_speed speed; 403 + struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus; 403 404 404 - debug("%s - Entry hotplug_slot[%p] pvalue[%p]\n", __func__, 405 - hotplug_slot, value); 405 + debug("%s - Entry slot[%p]\n", __func__, slot); 406 406 407 407 ibmphp_lock_operations(); 408 + mode = slot->supported_bus_mode; 409 + speed = slot->supported_speed; 410 + ibmphp_unlock_operations(); 408 411 409 - if (hotplug_slot) { 410 - pslot = hotplug_slot->private; 411 - if (pslot) { 412 - rc = 0; 413 - mode = pslot->supported_bus_mode; 414 - *value = pslot->supported_speed; 415 - switch (*value) { 416 - case BUS_SPEED_33: 417 - break; 418 - case BUS_SPEED_66: 419 - if (mode == BUS_MODE_PCIX) 420 - *value += 0x01; 421 - break; 422 - case BUS_SPEED_100: 423 - case BUS_SPEED_133: 424 - *value = pslot->supported_speed + 0x01; 425 - break; 426 - default: 427 - /* Note (will need to change): there would be soon 256, 512 also */ 428 - rc = -ENODEV; 429 - } 430 - } 412 + switch (speed) { 413 + case BUS_SPEED_33: 414 + break; 415 + case BUS_SPEED_66: 416 + if (mode == BUS_MODE_PCIX) 417 + speed += 0x01; 418 + break; 419 + case BUS_SPEED_100: 420 + case BUS_SPEED_133: 421 + speed += 0x01; 422 + break; 423 + default: 424 + /* Note (will need to change): there would be soon 256, 512 also */ 425 + rc = -ENODEV; 431 426 } 432 427 433 - ibmphp_unlock_operations(); 434 - debug("%s - Exit rc[%d] value[%x]\n", __func__, rc, *value); 435 - return rc; 436 - } 428 + if (!rc) 429 + bus->max_bus_speed = speed; 437 430 438 - static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) 439 - { 440 - int rc = -ENODEV; 441 - struct slot *pslot; 442 - u8 mode = 0; 443 - 444 - debug("%s - Entry hotplug_slot[%p] pvalue[%p]\n", __func__, 445 - hotplug_slot, value); 446 - 447 - ibmphp_lock_operations(); 448 - 449 - if (hotplug_slot) { 450 - pslot = hotplug_slot->private; 451 - if (pslot) { 452 - rc = get_cur_bus_info(&pslot); 453 - if (!rc) { 454 - mode = pslot->bus_on->current_bus_mode; 455 - *value = pslot->bus_on->current_speed; 456 - switch (*value) { 457 - case BUS_SPEED_33: 458 - break; 459 - case BUS_SPEED_66: 460 - if (mode == BUS_MODE_PCIX) 461 - *value += 0x01; 462 - else if (mode == BUS_MODE_PCI) 463 - ; 464 - else 465 - *value = PCI_SPEED_UNKNOWN; 466 - break; 467 - case BUS_SPEED_100: 468 - case BUS_SPEED_133: 469 - *value += 0x01; 470 - break; 471 - default: 472 - /* Note of change: there would also be 256, 512 soon */ 473 - rc = -ENODEV; 474 - } 475 - } 476 - } 477 - } 478 - 479 - ibmphp_unlock_operations(); 480 - debug("%s - Exit rc[%d] value[%x]\n", __func__, rc, *value); 431 + debug("%s - Exit rc[%d] speed[%x]\n", __func__, rc, speed); 481 432 return rc; 482 433 } 483 434 ··· 523 572 if (slot_cur->bus_on->current_speed == 0xFF) 524 573 if (get_cur_bus_info(&slot_cur)) 525 574 return -1; 575 + get_max_bus_speed(slot_cur); 526 576 527 577 if (slot_cur->ctrl->options == 0xFF) 528 578 if (get_hpc_options(slot_cur, &slot_cur->ctrl->options)) ··· 607 655 int ibmphp_update_slot_info(struct slot *slot_cur) 608 656 { 609 657 struct hotplug_slot_info *info; 658 + struct pci_bus *bus = slot_cur->hotplug_slot->pci_slot->bus; 610 659 int rc; 611 660 u8 bus_speed; 612 661 u8 mode; ··· 653 700 bus_speed = PCI_SPEED_UNKNOWN; 654 701 } 655 702 656 - info->cur_bus_speed = bus_speed; 657 - info->max_bus_speed = slot_cur->hotplug_slot->info->max_bus_speed; 703 + bus->cur_bus_speed = bus_speed; 658 704 // To do: bus_names 659 705 660 706 rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info); ··· 1278 1326 .get_attention_status = get_attention_status, 1279 1327 .get_latch_status = get_latch_status, 1280 1328 .get_adapter_status = get_adapter_present, 1281 - .get_max_bus_speed = get_max_bus_speed, 1282 - .get_cur_bus_speed = get_cur_bus_speed, 1283 1329 /* .get_max_adapter_speed = get_max_adapter_speed, 1284 1330 .get_bus_name_status = get_bus_name, 1285 1331 */
+11 -2
drivers/pci/hotplug/ibmphp_ebda.c
··· 245 245 246 246 int __init ibmphp_access_ebda (void) 247 247 { 248 - u8 format, num_ctlrs, rio_complete, hs_complete; 248 + u8 format, num_ctlrs, rio_complete, hs_complete, ebda_sz; 249 249 u16 ebda_seg, num_entries, next_offset, offset, blk_id, sub_addr, re, rc_id, re_id, base; 250 250 int rc = 0; 251 251 ··· 260 260 iounmap (io_mem); 261 261 debug ("returned ebda segment: %x\n", ebda_seg); 262 262 263 - io_mem = ioremap(ebda_seg<<4, 1024); 263 + io_mem = ioremap(ebda_seg<<4, 1); 264 + if (!io_mem) 265 + return -ENOMEM; 266 + ebda_sz = readb(io_mem); 267 + iounmap(io_mem); 268 + debug("ebda size: %d(KiB)\n", ebda_sz); 269 + if (ebda_sz == 0) 270 + return -ENOMEM; 271 + 272 + io_mem = ioremap(ebda_seg<<4, (ebda_sz * 1024)); 264 273 if (!io_mem ) 265 274 return -ENOMEM; 266 275 next_offset = 0x180;
+1
drivers/pci/hotplug/ibmphp_hpc.c
··· 35 35 #include <linux/init.h> 36 36 #include <linux/mutex.h> 37 37 #include <linux/sched.h> 38 + #include <linux/semaphore.h> 38 39 #include <linux/kthread.h> 39 40 #include "ibmphp.h" 40 41
-132
drivers/pci/hotplug/pci_hotplug_core.c
··· 64 64 static LIST_HEAD(pci_hotplug_slot_list); 65 65 static DEFINE_MUTEX(pci_hp_mutex); 66 66 67 - /* these strings match up with the values in pci_bus_speed */ 68 - static char *pci_bus_speed_strings[] = { 69 - "33 MHz PCI", /* 0x00 */ 70 - "66 MHz PCI", /* 0x01 */ 71 - "66 MHz PCI-X", /* 0x02 */ 72 - "100 MHz PCI-X", /* 0x03 */ 73 - "133 MHz PCI-X", /* 0x04 */ 74 - NULL, /* 0x05 */ 75 - NULL, /* 0x06 */ 76 - NULL, /* 0x07 */ 77 - NULL, /* 0x08 */ 78 - "66 MHz PCI-X 266", /* 0x09 */ 79 - "100 MHz PCI-X 266", /* 0x0a */ 80 - "133 MHz PCI-X 266", /* 0x0b */ 81 - NULL, /* 0x0c */ 82 - NULL, /* 0x0d */ 83 - NULL, /* 0x0e */ 84 - NULL, /* 0x0f */ 85 - NULL, /* 0x10 */ 86 - "66 MHz PCI-X 533", /* 0x11 */ 87 - "100 MHz PCI-X 533", /* 0x12 */ 88 - "133 MHz PCI-X 533", /* 0x13 */ 89 - "2.5 GT/s PCIe", /* 0x14 */ 90 - "5.0 GT/s PCIe", /* 0x15 */ 91 - }; 92 - 93 67 #ifdef CONFIG_HOTPLUG_PCI_CPCI 94 68 extern int cpci_hotplug_init(int debug); 95 69 extern void cpci_hotplug_exit(void); ··· 92 118 GET_STATUS(attention_status, u8) 93 119 GET_STATUS(latch_status, u8) 94 120 GET_STATUS(adapter_status, u8) 95 - GET_STATUS(max_bus_speed, enum pci_bus_speed) 96 - GET_STATUS(cur_bus_speed, enum pci_bus_speed) 97 121 98 122 static ssize_t power_read_file(struct pci_slot *slot, char *buf) 99 123 { ··· 235 263 .show = presence_read_file, 236 264 }; 237 265 238 - static char *unknown_speed = "Unknown bus speed"; 239 - 240 - static ssize_t max_bus_speed_read_file(struct pci_slot *slot, char *buf) 241 - { 242 - char *speed_string; 243 - int retval; 244 - enum pci_bus_speed value; 245 - 246 - retval = get_max_bus_speed(slot->hotplug, &value); 247 - if (retval) 248 - goto exit; 249 - 250 - if (value == PCI_SPEED_UNKNOWN) 251 - speed_string = unknown_speed; 252 - else 253 - speed_string = pci_bus_speed_strings[value]; 254 - 255 - retval = sprintf (buf, "%s\n", speed_string); 256 - 257 - exit: 258 - return retval; 259 - } 260 - 261 - static struct pci_slot_attribute hotplug_slot_attr_max_bus_speed = { 262 - .attr = {.name = "max_bus_speed", .mode = S_IFREG | S_IRUGO}, 263 - .show = max_bus_speed_read_file, 264 - }; 265 - 266 - static ssize_t cur_bus_speed_read_file(struct pci_slot *slot, char *buf) 267 - { 268 - char *speed_string; 269 - int retval; 270 - enum pci_bus_speed value; 271 - 272 - retval = get_cur_bus_speed(slot->hotplug, &value); 273 - if (retval) 274 - goto exit; 275 - 276 - if (value == PCI_SPEED_UNKNOWN) 277 - speed_string = unknown_speed; 278 - else 279 - speed_string = pci_bus_speed_strings[value]; 280 - 281 - retval = sprintf (buf, "%s\n", speed_string); 282 - 283 - exit: 284 - return retval; 285 - } 286 - 287 - static struct pci_slot_attribute hotplug_slot_attr_cur_bus_speed = { 288 - .attr = {.name = "cur_bus_speed", .mode = S_IFREG | S_IRUGO}, 289 - .show = cur_bus_speed_read_file, 290 - }; 291 - 292 266 static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf, 293 267 size_t count) 294 268 { ··· 309 391 return false; 310 392 } 311 393 312 - static bool has_max_bus_speed_file(struct pci_slot *pci_slot) 313 - { 314 - struct hotplug_slot *slot = pci_slot->hotplug; 315 - if ((!slot) || (!slot->ops)) 316 - return false; 317 - if (slot->ops->get_max_bus_speed) 318 - return true; 319 - return false; 320 - } 321 - 322 - static bool has_cur_bus_speed_file(struct pci_slot *pci_slot) 323 - { 324 - struct hotplug_slot *slot = pci_slot->hotplug; 325 - if ((!slot) || (!slot->ops)) 326 - return false; 327 - if (slot->ops->get_cur_bus_speed) 328 - return true; 329 - return false; 330 - } 331 - 332 394 static bool has_test_file(struct pci_slot *pci_slot) 333 395 { 334 396 struct hotplug_slot *slot = pci_slot->hotplug; ··· 354 456 goto exit_adapter; 355 457 } 356 458 357 - if (has_max_bus_speed_file(slot)) { 358 - retval = sysfs_create_file(&slot->kobj, 359 - &hotplug_slot_attr_max_bus_speed.attr); 360 - if (retval) 361 - goto exit_max_speed; 362 - } 363 - 364 - if (has_cur_bus_speed_file(slot)) { 365 - retval = sysfs_create_file(&slot->kobj, 366 - &hotplug_slot_attr_cur_bus_speed.attr); 367 - if (retval) 368 - goto exit_cur_speed; 369 - } 370 - 371 459 if (has_test_file(slot)) { 372 460 retval = sysfs_create_file(&slot->kobj, 373 461 &hotplug_slot_attr_test.attr); ··· 364 480 goto exit; 365 481 366 482 exit_test: 367 - if (has_cur_bus_speed_file(slot)) 368 - sysfs_remove_file(&slot->kobj, 369 - &hotplug_slot_attr_cur_bus_speed.attr); 370 - exit_cur_speed: 371 - if (has_max_bus_speed_file(slot)) 372 - sysfs_remove_file(&slot->kobj, 373 - &hotplug_slot_attr_max_bus_speed.attr); 374 - exit_max_speed: 375 483 if (has_adapter_file(slot)) 376 484 sysfs_remove_file(&slot->kobj, 377 485 &hotplug_slot_attr_presence.attr); ··· 398 522 if (has_adapter_file(slot)) 399 523 sysfs_remove_file(&slot->kobj, 400 524 &hotplug_slot_attr_presence.attr); 401 - 402 - if (has_max_bus_speed_file(slot)) 403 - sysfs_remove_file(&slot->kobj, 404 - &hotplug_slot_attr_max_bus_speed.attr); 405 - 406 - if (has_cur_bus_speed_file(slot)) 407 - sysfs_remove_file(&slot->kobj, 408 - &hotplug_slot_attr_cur_bus_speed.attr); 409 525 410 526 if (has_test_file(slot)) 411 527 sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr);
-25
drivers/pci/hotplug/pciehp_core.c
··· 69 69 static int get_attention_status (struct hotplug_slot *slot, u8 *value); 70 70 static int get_latch_status (struct hotplug_slot *slot, u8 *value); 71 71 static int get_adapter_status (struct hotplug_slot *slot, u8 *value); 72 - static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 73 - static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 74 72 75 73 /** 76 74 * release_slot - free up the memory used by a slot ··· 111 113 ops->disable_slot = disable_slot; 112 114 ops->get_power_status = get_power_status; 113 115 ops->get_adapter_status = get_adapter_status; 114 - ops->get_max_bus_speed = get_max_bus_speed; 115 - ops->get_cur_bus_speed = get_cur_bus_speed; 116 116 if (MRL_SENS(ctrl)) 117 117 ops->get_latch_status = get_latch_status; 118 118 if (ATTN_LED(ctrl)) { ··· 221 225 __func__, slot_name(slot)); 222 226 223 227 return pciehp_get_adapter_status(slot, value); 224 - } 225 - 226 - static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, 227 - enum pci_bus_speed *value) 228 - { 229 - struct slot *slot = hotplug_slot->private; 230 - 231 - ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 232 - __func__, slot_name(slot)); 233 - 234 - return pciehp_get_max_link_speed(slot, value); 235 - } 236 - 237 - static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) 238 - { 239 - struct slot *slot = hotplug_slot->private; 240 - 241 - ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 242 - __func__, slot_name(slot)); 243 - 244 - return pciehp_get_cur_link_speed(slot, value); 245 228 } 246 229 247 230 static int pciehp_probe(struct pcie_device *dev)
+1
drivers/pci/hotplug/pciehp_ctrl.c
··· 341 341 p_slot->state = POWERON_STATE; 342 342 break; 343 343 default: 344 + kfree(info); 344 345 goto out; 345 346 } 346 347 queue_work(pciehp_wq, &info->work);
+9 -63
drivers/pci/hotplug/pciehp_hpc.c
··· 492 492 u16 slot_cmd; 493 493 u16 cmd_mask; 494 494 u16 slot_status; 495 + u16 lnk_status; 495 496 int retval = 0; 496 497 497 498 /* Clear sticky power-fault bit from previous power failures */ ··· 523 522 } 524 523 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 525 524 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 525 + 526 + retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); 527 + if (retval) { 528 + ctrl_err(ctrl, "%s: Cannot read LNKSTA register\n", 529 + __func__); 530 + return retval; 531 + } 532 + pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); 526 533 527 534 return retval; 528 535 } ··· 619 610 return IRQ_HANDLED; 620 611 } 621 612 622 - int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *value) 623 - { 624 - struct controller *ctrl = slot->ctrl; 625 - enum pcie_link_speed lnk_speed; 626 - u32 lnk_cap; 627 - int retval = 0; 628 - 629 - retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap); 630 - if (retval) { 631 - ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); 632 - return retval; 633 - } 634 - 635 - switch (lnk_cap & 0x000F) { 636 - case 1: 637 - lnk_speed = PCIE_2_5GB; 638 - break; 639 - case 2: 640 - lnk_speed = PCIE_5_0GB; 641 - break; 642 - default: 643 - lnk_speed = PCIE_LNK_SPEED_UNKNOWN; 644 - break; 645 - } 646 - 647 - *value = lnk_speed; 648 - ctrl_dbg(ctrl, "Max link speed = %d\n", lnk_speed); 649 - 650 - return retval; 651 - } 652 - 653 613 int pciehp_get_max_lnk_width(struct slot *slot, 654 614 enum pcie_link_width *value) 655 615 { ··· 665 687 666 688 *value = lnk_wdth; 667 689 ctrl_dbg(ctrl, "Max link width = %d\n", lnk_wdth); 668 - 669 - return retval; 670 - } 671 - 672 - int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *value) 673 - { 674 - struct controller *ctrl = slot->ctrl; 675 - enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN; 676 - int retval = 0; 677 - u16 lnk_status; 678 - 679 - retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); 680 - if (retval) { 681 - ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", 682 - __func__); 683 - return retval; 684 - } 685 - 686 - switch (lnk_status & PCI_EXP_LNKSTA_CLS) { 687 - case 1: 688 - lnk_speed = PCIE_2_5GB; 689 - break; 690 - case 2: 691 - lnk_speed = PCIE_5_0GB; 692 - break; 693 - default: 694 - lnk_speed = PCIE_LNK_SPEED_UNKNOWN; 695 - break; 696 - } 697 - 698 - *value = lnk_speed; 699 - ctrl_dbg(ctrl, "Current link speed = %d\n", lnk_speed); 700 690 701 691 return retval; 702 692 }
+17 -6
drivers/pci/hotplug/pciehp_pci.c
··· 53 53 busnr = pci_scan_bridge(parent, dev, busnr, pass); 54 54 if (!dev->subordinate) 55 55 return -1; 56 - pci_bus_size_bridges(dev->subordinate); 57 - pci_bus_assign_resources(parent); 58 - pci_enable_bridges(parent); 59 - pci_bus_add_devices(parent); 56 + 60 57 return 0; 61 58 } 62 59 63 60 int pciehp_configure_device(struct slot *p_slot) 64 61 { 65 62 struct pci_dev *dev; 66 - struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate; 63 + struct pci_dev *bridge = p_slot->ctrl->pcie->port; 64 + struct pci_bus *parent = bridge->subordinate; 67 65 int num, fn; 68 66 struct controller *ctrl = p_slot->ctrl; 69 67 ··· 94 96 (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { 95 97 pciehp_add_bridge(dev); 96 98 } 99 + pci_dev_put(dev); 100 + } 101 + 102 + pci_assign_unassigned_bridge_resources(bridge); 103 + 104 + for (fn = 0; fn < 8; fn++) { 105 + dev = pci_get_slot(parent, PCI_DEVFN(0, fn)); 106 + if (!dev) 107 + continue; 108 + if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { 109 + pci_dev_put(dev); 110 + continue; 111 + } 97 112 pci_configure_slot(dev); 98 113 pci_dev_put(dev); 99 114 } 100 115 101 - pci_bus_assign_resources(parent); 102 116 pci_bus_add_devices(parent); 117 + 103 118 return 0; 104 119 } 105 120
+12 -12
drivers/pci/hotplug/rpaphp_core.c
··· 130 130 return 0; 131 131 } 132 132 133 - static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) 133 + static enum pci_bus_speed get_max_bus_speed(struct slot *slot) 134 134 { 135 - struct slot *slot = (struct slot *)hotplug_slot->private; 136 - 135 + enum pci_bus_speed speed; 137 136 switch (slot->type) { 138 137 case 1: 139 138 case 2: ··· 140 141 case 4: 141 142 case 5: 142 143 case 6: 143 - *value = PCI_SPEED_33MHz; /* speed for case 1-6 */ 144 + speed = PCI_SPEED_33MHz; /* speed for case 1-6 */ 144 145 break; 145 146 case 7: 146 147 case 8: 147 - *value = PCI_SPEED_66MHz; 148 + speed = PCI_SPEED_66MHz; 148 149 break; 149 150 case 11: 150 151 case 14: 151 - *value = PCI_SPEED_66MHz_PCIX; 152 + speed = PCI_SPEED_66MHz_PCIX; 152 153 break; 153 154 case 12: 154 155 case 15: 155 - *value = PCI_SPEED_100MHz_PCIX; 156 + speed = PCI_SPEED_100MHz_PCIX; 156 157 break; 157 158 case 13: 158 159 case 16: 159 - *value = PCI_SPEED_133MHz_PCIX; 160 + speed = PCI_SPEED_133MHz_PCIX; 160 161 break; 161 162 default: 162 - *value = PCI_SPEED_UNKNOWN; 163 + speed = PCI_SPEED_UNKNOWN; 163 164 break; 164 - 165 165 } 166 - return 0; 166 + 167 + return speed; 167 168 } 168 169 169 170 static int get_children_props(struct device_node *dn, const int **drc_indexes, ··· 407 408 slot->state = NOT_VALID; 408 409 return -EINVAL; 409 410 } 411 + 412 + slot->bus->max_bus_speed = get_max_bus_speed(slot); 410 413 return 0; 411 414 } 412 415 ··· 430 429 .get_power_status = get_power_status, 431 430 .get_attention_status = get_attention_status, 432 431 .get_adapter_status = get_adapter_status, 433 - .get_max_bus_speed = get_max_bus_speed, 434 432 }; 435 433 436 434 module_init(rpaphp_init);
-2
drivers/pci/hotplug/shpchp.h
··· 333 333 int (*set_attention_status)(struct slot *slot, u8 status); 334 334 int (*get_latch_status)(struct slot *slot, u8 *status); 335 335 int (*get_adapter_status)(struct slot *slot, u8 *status); 336 - int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); 337 - int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed); 338 336 int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed); 339 337 int (*get_mode1_ECC_cap)(struct slot *slot, u8 *mode); 340 338 int (*get_prog_int)(struct slot *slot, u8 *prog_int);
-35
drivers/pci/hotplug/shpchp_core.c
··· 65 65 static int get_attention_status (struct hotplug_slot *slot, u8 *value); 66 66 static int get_latch_status (struct hotplug_slot *slot, u8 *value); 67 67 static int get_adapter_status (struct hotplug_slot *slot, u8 *value); 68 - static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 69 - static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 70 68 71 69 static struct hotplug_slot_ops shpchp_hotplug_slot_ops = { 72 70 .set_attention_status = set_attention_status, ··· 74 76 .get_attention_status = get_attention_status, 75 77 .get_latch_status = get_latch_status, 76 78 .get_adapter_status = get_adapter_status, 77 - .get_max_bus_speed = get_max_bus_speed, 78 - .get_cur_bus_speed = get_cur_bus_speed, 79 79 }; 80 80 81 81 /** ··· 271 275 retval = slot->hpc_ops->get_adapter_status(slot, value); 272 276 if (retval < 0) 273 277 *value = hotplug_slot->info->adapter_status; 274 - 275 - return 0; 276 - } 277 - 278 - static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, 279 - enum pci_bus_speed *value) 280 - { 281 - struct slot *slot = get_slot(hotplug_slot); 282 - int retval; 283 - 284 - ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 285 - __func__, slot_name(slot)); 286 - 287 - retval = slot->hpc_ops->get_max_bus_speed(slot, value); 288 - if (retval < 0) 289 - *value = PCI_SPEED_UNKNOWN; 290 - 291 - return 0; 292 - } 293 - 294 - static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) 295 - { 296 - struct slot *slot = get_slot(hotplug_slot); 297 - int retval; 298 - 299 - ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 300 - __func__, slot_name(slot)); 301 - 302 - retval = slot->hpc_ops->get_cur_bus_speed(slot, value); 303 - if (retval < 0) 304 - *value = PCI_SPEED_UNKNOWN; 305 278 306 279 return 0; 307 280 }
+3 -11
drivers/pci/hotplug/shpchp_ctrl.c
··· 285 285 return WRONG_BUS_FREQUENCY; 286 286 } 287 287 288 - rc = p_slot->hpc_ops->get_cur_bus_speed(p_slot, &bsp); 289 - if (rc) { 290 - ctrl_err(ctrl, "Can't get bus operation speed\n"); 291 - return WRONG_BUS_FREQUENCY; 292 - } 293 - 294 - rc = p_slot->hpc_ops->get_max_bus_speed(p_slot, &msp); 295 - if (rc) { 296 - ctrl_err(ctrl, "Can't get max bus operation speed\n"); 297 - msp = bsp; 298 - } 288 + bsp = ctrl->pci_dev->bus->cur_bus_speed; 289 + msp = ctrl->pci_dev->bus->max_bus_speed; 299 290 300 291 /* Check if there are other slots or devices on the same bus */ 301 292 if (!list_empty(&ctrl->pci_dev->subordinate->devices)) ··· 453 462 p_slot->state = POWERON_STATE; 454 463 break; 455 464 default: 465 + kfree(info); 456 466 goto out; 457 467 } 458 468 queue_work(shpchp_wq, &info->work);
+77 -72
drivers/pci/hotplug/shpchp_hpc.c
··· 660 660 return retval; 661 661 } 662 662 663 + static int shpc_get_cur_bus_speed(struct controller *ctrl) 664 + { 665 + int retval = 0; 666 + struct pci_bus *bus = ctrl->pci_dev->subordinate; 667 + enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; 668 + u16 sec_bus_reg = shpc_readw(ctrl, SEC_BUS_CONFIG); 669 + u8 pi = shpc_readb(ctrl, PROG_INTERFACE); 670 + u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7); 671 + 672 + if ((pi == 1) && (speed_mode > 4)) { 673 + retval = -ENODEV; 674 + goto out; 675 + } 676 + 677 + switch (speed_mode) { 678 + case 0x0: 679 + bus_speed = PCI_SPEED_33MHz; 680 + break; 681 + case 0x1: 682 + bus_speed = PCI_SPEED_66MHz; 683 + break; 684 + case 0x2: 685 + bus_speed = PCI_SPEED_66MHz_PCIX; 686 + break; 687 + case 0x3: 688 + bus_speed = PCI_SPEED_100MHz_PCIX; 689 + break; 690 + case 0x4: 691 + bus_speed = PCI_SPEED_133MHz_PCIX; 692 + break; 693 + case 0x5: 694 + bus_speed = PCI_SPEED_66MHz_PCIX_ECC; 695 + break; 696 + case 0x6: 697 + bus_speed = PCI_SPEED_100MHz_PCIX_ECC; 698 + break; 699 + case 0x7: 700 + bus_speed = PCI_SPEED_133MHz_PCIX_ECC; 701 + break; 702 + case 0x8: 703 + bus_speed = PCI_SPEED_66MHz_PCIX_266; 704 + break; 705 + case 0x9: 706 + bus_speed = PCI_SPEED_100MHz_PCIX_266; 707 + break; 708 + case 0xa: 709 + bus_speed = PCI_SPEED_133MHz_PCIX_266; 710 + break; 711 + case 0xb: 712 + bus_speed = PCI_SPEED_66MHz_PCIX_533; 713 + break; 714 + case 0xc: 715 + bus_speed = PCI_SPEED_100MHz_PCIX_533; 716 + break; 717 + case 0xd: 718 + bus_speed = PCI_SPEED_133MHz_PCIX_533; 719 + break; 720 + default: 721 + retval = -ENODEV; 722 + break; 723 + } 724 + 725 + out: 726 + bus->cur_bus_speed = bus_speed; 727 + dbg("Current bus speed = %d\n", bus_speed); 728 + return retval; 729 + } 730 + 731 + 663 732 static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value) 664 733 { 665 734 int retval; ··· 789 720 retval = shpc_write_cmd(slot, 0, cmd); 790 721 if (retval) 791 722 ctrl_err(ctrl, "%s: Write command failed!\n", __func__); 723 + else 724 + shpc_get_cur_bus_speed(ctrl); 792 725 793 726 return retval; 794 727 } ··· 874 803 return IRQ_HANDLED; 875 804 } 876 805 877 - static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value) 806 + static int shpc_get_max_bus_speed(struct controller *ctrl) 878 807 { 879 808 int retval = 0; 880 - struct controller *ctrl = slot->ctrl; 809 + struct pci_bus *bus = ctrl->pci_dev->subordinate; 881 810 enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; 882 811 u8 pi = shpc_readb(ctrl, PROG_INTERFACE); 883 812 u32 slot_avail1 = shpc_readl(ctrl, SLOT_AVAIL1); ··· 913 842 retval = -ENODEV; 914 843 } 915 844 916 - *value = bus_speed; 845 + bus->max_bus_speed = bus_speed; 917 846 ctrl_dbg(ctrl, "Max bus speed = %d\n", bus_speed); 918 847 919 - return retval; 920 - } 921 - 922 - static int hpc_get_cur_bus_speed (struct slot *slot, enum pci_bus_speed *value) 923 - { 924 - int retval = 0; 925 - struct controller *ctrl = slot->ctrl; 926 - enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN; 927 - u16 sec_bus_reg = shpc_readw(ctrl, SEC_BUS_CONFIG); 928 - u8 pi = shpc_readb(ctrl, PROG_INTERFACE); 929 - u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7); 930 - 931 - if ((pi == 1) && (speed_mode > 4)) { 932 - *value = PCI_SPEED_UNKNOWN; 933 - return -ENODEV; 934 - } 935 - 936 - switch (speed_mode) { 937 - case 0x0: 938 - *value = PCI_SPEED_33MHz; 939 - break; 940 - case 0x1: 941 - *value = PCI_SPEED_66MHz; 942 - break; 943 - case 0x2: 944 - *value = PCI_SPEED_66MHz_PCIX; 945 - break; 946 - case 0x3: 947 - *value = PCI_SPEED_100MHz_PCIX; 948 - break; 949 - case 0x4: 950 - *value = PCI_SPEED_133MHz_PCIX; 951 - break; 952 - case 0x5: 953 - *value = PCI_SPEED_66MHz_PCIX_ECC; 954 - break; 955 - case 0x6: 956 - *value = PCI_SPEED_100MHz_PCIX_ECC; 957 - break; 958 - case 0x7: 959 - *value = PCI_SPEED_133MHz_PCIX_ECC; 960 - break; 961 - case 0x8: 962 - *value = PCI_SPEED_66MHz_PCIX_266; 963 - break; 964 - case 0x9: 965 - *value = PCI_SPEED_100MHz_PCIX_266; 966 - break; 967 - case 0xa: 968 - *value = PCI_SPEED_133MHz_PCIX_266; 969 - break; 970 - case 0xb: 971 - *value = PCI_SPEED_66MHz_PCIX_533; 972 - break; 973 - case 0xc: 974 - *value = PCI_SPEED_100MHz_PCIX_533; 975 - break; 976 - case 0xd: 977 - *value = PCI_SPEED_133MHz_PCIX_533; 978 - break; 979 - default: 980 - *value = PCI_SPEED_UNKNOWN; 981 - retval = -ENODEV; 982 - break; 983 - } 984 - 985 - ctrl_dbg(ctrl, "Current bus speed = %d\n", bus_speed); 986 848 return retval; 987 849 } 988 850 ··· 930 926 .get_latch_status = hpc_get_latch_status, 931 927 .get_adapter_status = hpc_get_adapter_status, 932 928 933 - .get_max_bus_speed = hpc_get_max_bus_speed, 934 - .get_cur_bus_speed = hpc_get_cur_bus_speed, 935 929 .get_adapter_speed = hpc_get_adapter_speed, 936 930 .get_mode1_ECC_cap = hpc_get_mode1_ECC_cap, 937 931 .get_prog_int = hpc_get_prog_int, ··· 1087 1085 } 1088 1086 } 1089 1087 ctrl_dbg(ctrl, "HPC at %s irq=%x\n", pci_name(pdev), pdev->irq); 1088 + 1089 + shpc_get_max_bus_speed(ctrl); 1090 + shpc_get_cur_bus_speed(ctrl); 1090 1091 1091 1092 /* 1092 1093 * If this is the first controller to be initialized,
+3 -6
drivers/pci/hotplug/shpchp_sysfs.c
··· 47 47 bus = pdev->subordinate; 48 48 49 49 out += sprintf(buf, "Free resources: memory\n"); 50 - for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) { 51 - res = bus->resource[index]; 50 + pci_bus_for_each_resource(bus, res, index) { 52 51 if (res && (res->flags & IORESOURCE_MEM) && 53 52 !(res->flags & IORESOURCE_PREFETCH)) { 54 53 out += sprintf(out, "start = %8.8llx, " ··· 57 58 } 58 59 } 59 60 out += sprintf(out, "Free resources: prefetchable memory\n"); 60 - for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) { 61 - res = bus->resource[index]; 61 + pci_bus_for_each_resource(bus, res, index) { 62 62 if (res && (res->flags & IORESOURCE_MEM) && 63 63 (res->flags & IORESOURCE_PREFETCH)) { 64 64 out += sprintf(out, "start = %8.8llx, " ··· 67 69 } 68 70 } 69 71 out += sprintf(out, "Free resources: IO\n"); 70 - for (index = 0; index < PCI_BUS_NUM_RESOURCES; index++) { 71 - res = bus->resource[index]; 72 + pci_bus_for_each_resource(bus, res, index) { 72 73 if (res && (res->flags & IORESOURCE_IO)) { 73 74 out += sprintf(out, "start = %8.8llx, " 74 75 "length = %8.8llx\n",
-34
drivers/pci/legacy.c
··· 1 - #include <linux/init.h> 2 - #include <linux/pci.h> 3 - #include <linux/module.h> 4 - #include <linux/interrupt.h> 5 - #include "pci.h" 6 - 7 - /** 8 - * pci_find_device - begin or continue searching for a PCI device by vendor/device id 9 - * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids 10 - * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids 11 - * @from: Previous PCI device found in search, or %NULL for new search. 12 - * 13 - * Iterates through the list of known PCI devices. If a PCI device is found 14 - * with a matching @vendor and @device, a pointer to its device structure is 15 - * returned. Otherwise, %NULL is returned. 16 - * A new search is initiated by passing %NULL as the @from argument. 17 - * Otherwise if @from is not %NULL, searches continue from next device 18 - * on the global list. 19 - * 20 - * NOTE: Do not use this function any more; use pci_get_device() instead, as 21 - * the PCI device returned by this function can disappear at any moment in 22 - * time. 23 - */ 24 - struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, 25 - struct pci_dev *from) 26 - { 27 - struct pci_dev *pdev; 28 - 29 - pci_dev_get(from); 30 - pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); 31 - pci_dev_put(pdev); 32 - return pdev; 33 - } 34 - EXPORT_SYMBOL(pci_find_device);
+211
drivers/pci/pci-acpi.c
··· 16 16 #include <acpi/acpi_bus.h> 17 17 18 18 #include <linux/pci-acpi.h> 19 + #include <linux/pm_runtime.h> 19 20 #include "pci.h" 21 + 22 + static DEFINE_MUTEX(pci_acpi_pm_notify_mtx); 23 + 24 + /** 25 + * pci_acpi_wake_bus - Wake-up notification handler for root buses. 26 + * @handle: ACPI handle of a device the notification is for. 27 + * @event: Type of the signaled event. 28 + * @context: PCI root bus to wake up devices on. 29 + */ 30 + static void pci_acpi_wake_bus(acpi_handle handle, u32 event, void *context) 31 + { 32 + struct pci_bus *pci_bus = context; 33 + 34 + if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_bus) 35 + pci_pme_wakeup_bus(pci_bus); 36 + } 37 + 38 + /** 39 + * pci_acpi_wake_dev - Wake-up notification handler for PCI devices. 40 + * @handle: ACPI handle of a device the notification is for. 41 + * @event: Type of the signaled event. 42 + * @context: PCI device object to wake up. 43 + */ 44 + static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) 45 + { 46 + struct pci_dev *pci_dev = context; 47 + 48 + if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { 49 + pci_check_pme_status(pci_dev); 50 + pm_runtime_resume(&pci_dev->dev); 51 + if (pci_dev->subordinate) 52 + pci_pme_wakeup_bus(pci_dev->subordinate); 53 + } 54 + } 55 + 56 + /** 57 + * add_pm_notifier - Register PM notifier for given ACPI device. 58 + * @dev: ACPI device to add the notifier for. 59 + * @context: PCI device or bus to check for PME status if an event is signaled. 60 + * 61 + * NOTE: @dev need not be a run-wake or wake-up device to be a valid source of 62 + * PM wake-up events. For example, wake-up events may be generated for bridges 63 + * if one of the devices below the bridge is signaling PME, even if the bridge 64 + * itself doesn't have a wake-up GPE associated with it. 65 + */ 66 + static acpi_status add_pm_notifier(struct acpi_device *dev, 67 + acpi_notify_handler handler, 68 + void *context) 69 + { 70 + acpi_status status = AE_ALREADY_EXISTS; 71 + 72 + mutex_lock(&pci_acpi_pm_notify_mtx); 73 + 74 + if (dev->wakeup.flags.notifier_present) 75 + goto out; 76 + 77 + status = acpi_install_notify_handler(dev->handle, 78 + ACPI_SYSTEM_NOTIFY, 79 + handler, context); 80 + if (ACPI_FAILURE(status)) 81 + goto out; 82 + 83 + dev->wakeup.flags.notifier_present = true; 84 + 85 + out: 86 + mutex_unlock(&pci_acpi_pm_notify_mtx); 87 + return status; 88 + } 89 + 90 + /** 91 + * remove_pm_notifier - Unregister PM notifier from given ACPI device. 92 + * @dev: ACPI device to remove the notifier from. 93 + */ 94 + static acpi_status remove_pm_notifier(struct acpi_device *dev, 95 + acpi_notify_handler handler) 96 + { 97 + acpi_status status = AE_BAD_PARAMETER; 98 + 99 + mutex_lock(&pci_acpi_pm_notify_mtx); 100 + 101 + if (!dev->wakeup.flags.notifier_present) 102 + goto out; 103 + 104 + status = acpi_remove_notify_handler(dev->handle, 105 + ACPI_SYSTEM_NOTIFY, 106 + handler); 107 + if (ACPI_FAILURE(status)) 108 + goto out; 109 + 110 + dev->wakeup.flags.notifier_present = false; 111 + 112 + out: 113 + mutex_unlock(&pci_acpi_pm_notify_mtx); 114 + return status; 115 + } 116 + 117 + /** 118 + * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus. 119 + * @dev: ACPI device to add the notifier for. 120 + * @pci_bus: PCI bus to walk checking for PME status if an event is signaled. 121 + */ 122 + acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, 123 + struct pci_bus *pci_bus) 124 + { 125 + return add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus); 126 + } 127 + 128 + /** 129 + * pci_acpi_remove_bus_pm_notifier - Unregister PCI bus PM notifier. 130 + * @dev: ACPI device to remove the notifier from. 131 + */ 132 + acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) 133 + { 134 + return remove_pm_notifier(dev, pci_acpi_wake_bus); 135 + } 136 + 137 + /** 138 + * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device. 139 + * @dev: ACPI device to add the notifier for. 140 + * @pci_dev: PCI device to check for the PME status if an event is signaled. 141 + */ 142 + acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, 143 + struct pci_dev *pci_dev) 144 + { 145 + return add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev); 146 + } 147 + 148 + /** 149 + * pci_acpi_remove_pm_notifier - Unregister PCI device PM notifier. 150 + * @dev: ACPI device to remove the notifier from. 151 + */ 152 + acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) 153 + { 154 + return remove_pm_notifier(dev, pci_acpi_wake_dev); 155 + } 20 156 21 157 /* 22 158 * _SxD returns the D-state with the highest power ··· 267 131 return 0; 268 132 } 269 133 134 + /** 135 + * acpi_dev_run_wake - Enable/disable wake-up for given device. 136 + * @phys_dev: Device to enable/disable the platform to wake-up the system for. 137 + * @enable: Whether enable or disable the wake-up functionality. 138 + * 139 + * Find the ACPI device object corresponding to @pci_dev and try to 140 + * enable/disable the GPE associated with it. 141 + */ 142 + static int acpi_dev_run_wake(struct device *phys_dev, bool enable) 143 + { 144 + struct acpi_device *dev; 145 + acpi_handle handle; 146 + int error = -ENODEV; 147 + 148 + if (!device_run_wake(phys_dev)) 149 + return -EINVAL; 150 + 151 + handle = DEVICE_ACPI_HANDLE(phys_dev); 152 + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) { 153 + dev_dbg(phys_dev, "ACPI handle has no context in %s!\n", 154 + __func__); 155 + return -ENODEV; 156 + } 157 + 158 + if (enable) { 159 + if (!dev->wakeup.run_wake_count++) { 160 + acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0); 161 + acpi_enable_gpe(dev->wakeup.gpe_device, 162 + dev->wakeup.gpe_number, 163 + ACPI_GPE_TYPE_RUNTIME); 164 + } 165 + } else if (dev->wakeup.run_wake_count > 0) { 166 + if (!--dev->wakeup.run_wake_count) { 167 + acpi_disable_gpe(dev->wakeup.gpe_device, 168 + dev->wakeup.gpe_number, 169 + ACPI_GPE_TYPE_RUNTIME); 170 + acpi_disable_wakeup_device_power(dev); 171 + } 172 + } else { 173 + error = -EALREADY; 174 + } 175 + 176 + return error; 177 + } 178 + 179 + static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable) 180 + { 181 + while (bus->parent) { 182 + struct pci_dev *bridge = bus->self; 183 + 184 + if (bridge->pme_interrupt) 185 + return; 186 + if (!acpi_dev_run_wake(&bridge->dev, enable)) 187 + return; 188 + bus = bus->parent; 189 + } 190 + 191 + /* We have reached the root bus. */ 192 + if (bus->bridge) 193 + acpi_dev_run_wake(bus->bridge, enable); 194 + } 195 + 196 + static int acpi_pci_run_wake(struct pci_dev *dev, bool enable) 197 + { 198 + if (dev->pme_interrupt) 199 + return 0; 200 + 201 + if (!acpi_dev_run_wake(&dev->dev, enable)) 202 + return 0; 203 + 204 + acpi_pci_propagate_run_wake(dev->bus, enable); 205 + return 0; 206 + } 207 + 270 208 static struct pci_platform_pm_ops acpi_pci_platform_pm = { 271 209 .is_manageable = acpi_pci_power_manageable, 272 210 .set_state = acpi_pci_set_power_state, 273 211 .choose_state = acpi_pci_choose_state, 274 212 .can_wakeup = acpi_pci_can_wakeup, 275 213 .sleep_wake = acpi_pci_sleep_wake, 214 + .run_wake = acpi_pci_run_wake, 276 215 }; 277 216 278 217 /* ACPI bus type */
+133 -27
drivers/pci/pci-driver.c
··· 17 17 #include <linux/slab.h> 18 18 #include <linux/sched.h> 19 19 #include <linux/cpu.h> 20 + #include <linux/pm_runtime.h> 20 21 #include "pci.h" 21 22 22 23 struct pci_dynid { ··· 405 404 pci_msix_shutdown(pci_dev); 406 405 } 407 406 407 + #ifdef CONFIG_PM_OPS 408 + 409 + /* Auxiliary functions used for system resume and run-time resume. */ 410 + 411 + /** 412 + * pci_restore_standard_config - restore standard config registers of PCI device 413 + * @pci_dev: PCI device to handle 414 + */ 415 + static int pci_restore_standard_config(struct pci_dev *pci_dev) 416 + { 417 + pci_update_current_state(pci_dev, PCI_UNKNOWN); 418 + 419 + if (pci_dev->current_state != PCI_D0) { 420 + int error = pci_set_power_state(pci_dev, PCI_D0); 421 + if (error) 422 + return error; 423 + } 424 + 425 + return pci_restore_state(pci_dev); 426 + } 427 + 428 + static void pci_pm_default_resume_early(struct pci_dev *pci_dev) 429 + { 430 + pci_restore_standard_config(pci_dev); 431 + pci_fixup_device(pci_fixup_resume_early, pci_dev); 432 + } 433 + 434 + #endif 435 + 408 436 #ifdef CONFIG_PM_SLEEP 409 437 410 438 /* ··· 550 520 551 521 /* Auxiliary functions used by the new power management framework */ 552 522 553 - /** 554 - * pci_restore_standard_config - restore standard config registers of PCI device 555 - * @pci_dev: PCI device to handle 556 - */ 557 - static int pci_restore_standard_config(struct pci_dev *pci_dev) 558 - { 559 - pci_update_current_state(pci_dev, PCI_UNKNOWN); 560 - 561 - if (pci_dev->current_state != PCI_D0) { 562 - int error = pci_set_power_state(pci_dev, PCI_D0); 563 - if (error) 564 - return error; 565 - } 566 - 567 - return pci_restore_state(pci_dev); 568 - } 569 - 570 - static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) 571 - { 572 - pci_restore_standard_config(pci_dev); 573 - pci_fixup_device(pci_fixup_resume_early, pci_dev); 574 - } 575 - 576 523 static void pci_pm_default_resume(struct pci_dev *pci_dev) 577 524 { 578 525 pci_fixup_device(pci_fixup_resume, pci_dev); ··· 588 581 struct device_driver *drv = dev->driver; 589 582 int error = 0; 590 583 584 + /* 585 + * PCI devices suspended at run time need to be resumed at this 586 + * point, because in general it is necessary to reconfigure them for 587 + * system suspend. Namely, if the device is supposed to wake up the 588 + * system from the sleep state, we may need to reconfigure it for this 589 + * purpose. In turn, if the device is not supposed to wake up the 590 + * system from the sleep state, we'll have to prevent it from signaling 591 + * wake-up. 592 + */ 593 + pm_runtime_resume(dev); 594 + 591 595 if (drv && drv->pm && drv->pm->prepare) 592 596 error = drv->pm->prepare(dev); 593 597 ··· 612 594 if (drv && drv->pm && drv->pm->complete) 613 595 drv->pm->complete(dev); 614 596 } 597 + 598 + #else /* !CONFIG_PM_SLEEP */ 599 + 600 + #define pci_pm_prepare NULL 601 + #define pci_pm_complete NULL 602 + 603 + #endif /* !CONFIG_PM_SLEEP */ 615 604 616 605 #ifdef CONFIG_SUSPEND 617 606 ··· 706 681 struct device_driver *drv = dev->driver; 707 682 int error = 0; 708 683 709 - pci_pm_default_resume_noirq(pci_dev); 684 + pci_pm_default_resume_early(pci_dev); 710 685 711 686 if (pci_has_legacy_pm_support(pci_dev)) 712 687 return pci_legacy_resume_early(dev); ··· 904 879 struct device_driver *drv = dev->driver; 905 880 int error = 0; 906 881 907 - pci_pm_default_resume_noirq(pci_dev); 882 + pci_pm_default_resume_early(pci_dev); 908 883 909 884 if (pci_has_legacy_pm_support(pci_dev)) 910 885 return pci_legacy_resume_early(dev); ··· 956 931 957 932 #endif /* !CONFIG_HIBERNATION */ 958 933 934 + #ifdef CONFIG_PM_RUNTIME 935 + 936 + static int pci_pm_runtime_suspend(struct device *dev) 937 + { 938 + struct pci_dev *pci_dev = to_pci_dev(dev); 939 + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 940 + pci_power_t prev = pci_dev->current_state; 941 + int error; 942 + 943 + if (!pm || !pm->runtime_suspend) 944 + return -ENOSYS; 945 + 946 + error = pm->runtime_suspend(dev); 947 + suspend_report_result(pm->runtime_suspend, error); 948 + if (error) 949 + return error; 950 + 951 + pci_fixup_device(pci_fixup_suspend, pci_dev); 952 + 953 + if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 954 + && pci_dev->current_state != PCI_UNKNOWN) { 955 + WARN_ONCE(pci_dev->current_state != prev, 956 + "PCI PM: State of device not saved by %pF\n", 957 + pm->runtime_suspend); 958 + return 0; 959 + } 960 + 961 + if (!pci_dev->state_saved) 962 + pci_save_state(pci_dev); 963 + 964 + pci_finish_runtime_suspend(pci_dev); 965 + 966 + return 0; 967 + } 968 + 969 + static int pci_pm_runtime_resume(struct device *dev) 970 + { 971 + struct pci_dev *pci_dev = to_pci_dev(dev); 972 + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 973 + 974 + if (!pm || !pm->runtime_resume) 975 + return -ENOSYS; 976 + 977 + pci_pm_default_resume_early(pci_dev); 978 + __pci_enable_wake(pci_dev, PCI_D0, true, false); 979 + pci_fixup_device(pci_fixup_resume, pci_dev); 980 + 981 + return pm->runtime_resume(dev); 982 + } 983 + 984 + static int pci_pm_runtime_idle(struct device *dev) 985 + { 986 + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 987 + 988 + if (!pm) 989 + return -ENOSYS; 990 + 991 + if (pm->runtime_idle) { 992 + int ret = pm->runtime_idle(dev); 993 + if (ret) 994 + return ret; 995 + } 996 + 997 + pm_runtime_suspend(dev); 998 + 999 + return 0; 1000 + } 1001 + 1002 + #else /* !CONFIG_PM_RUNTIME */ 1003 + 1004 + #define pci_pm_runtime_suspend NULL 1005 + #define pci_pm_runtime_resume NULL 1006 + #define pci_pm_runtime_idle NULL 1007 + 1008 + #endif /* !CONFIG_PM_RUNTIME */ 1009 + 1010 + #ifdef CONFIG_PM_OPS 1011 + 959 1012 const struct dev_pm_ops pci_dev_pm_ops = { 960 1013 .prepare = pci_pm_prepare, 961 1014 .complete = pci_pm_complete, ··· 1049 946 .thaw_noirq = pci_pm_thaw_noirq, 1050 947 .poweroff_noirq = pci_pm_poweroff_noirq, 1051 948 .restore_noirq = pci_pm_restore_noirq, 949 + .runtime_suspend = pci_pm_runtime_suspend, 950 + .runtime_resume = pci_pm_runtime_resume, 951 + .runtime_idle = pci_pm_runtime_idle, 1052 952 }; 1053 953 1054 954 #define PCI_PM_OPS_PTR (&pci_dev_pm_ops) 1055 955 1056 - #else /* !CONFIG_PM_SLEEP */ 956 + #else /* !COMFIG_PM_OPS */ 1057 957 1058 958 #define PCI_PM_OPS_PTR NULL 1059 959 1060 - #endif /* !CONFIG_PM_SLEEP */ 960 + #endif /* !COMFIG_PM_OPS */ 1061 961 1062 962 /** 1063 963 * __pci_register_driver - register a new pci driver
+147 -10
drivers/pci/pci.c
··· 19 19 #include <linux/pci-aspm.h> 20 20 #include <linux/pm_wakeup.h> 21 21 #include <linux/interrupt.h> 22 - #include <asm/dma.h> /* isa_dma_bridge_buggy */ 23 22 #include <linux/device.h> 23 + #include <linux/pm_runtime.h> 24 24 #include <asm/setup.h> 25 25 #include "pci.h" 26 26 ··· 28 28 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown", 29 29 }; 30 30 EXPORT_SYMBOL_GPL(pci_power_names); 31 + 32 + int isa_dma_bridge_buggy; 33 + EXPORT_SYMBOL(isa_dma_bridge_buggy); 34 + 35 + int pci_pci_problems; 36 + EXPORT_SYMBOL(pci_pci_problems); 31 37 32 38 unsigned int pci_pm_d3_delay; 33 39 ··· 386 380 { 387 381 const struct pci_bus *bus = dev->bus; 388 382 int i; 389 - struct resource *best = NULL; 383 + struct resource *best = NULL, *r; 390 384 391 - for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 392 - struct resource *r = bus->resource[i]; 385 + pci_bus_for_each_resource(bus, r, i) { 393 386 if (!r) 394 387 continue; 395 388 if (res->start && !(res->start >= r->start && res->end <= r->end)) ··· 460 455 { 461 456 return pci_platform_pm ? 462 457 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; 458 + } 459 + 460 + static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable) 461 + { 462 + return pci_platform_pm ? 463 + pci_platform_pm->run_wake(dev, enable) : -ENODEV; 463 464 } 464 465 465 466 /** ··· 1201 1190 } 1202 1191 1203 1192 /** 1193 + * pci_check_pme_status - Check if given device has generated PME. 1194 + * @dev: Device to check. 1195 + * 1196 + * Check the PME status of the device and if set, clear it and clear PME enable 1197 + * (if set). Return 'true' if PME status and PME enable were both set or 1198 + * 'false' otherwise. 1199 + */ 1200 + bool pci_check_pme_status(struct pci_dev *dev) 1201 + { 1202 + int pmcsr_pos; 1203 + u16 pmcsr; 1204 + bool ret = false; 1205 + 1206 + if (!dev->pm_cap) 1207 + return false; 1208 + 1209 + pmcsr_pos = dev->pm_cap + PCI_PM_CTRL; 1210 + pci_read_config_word(dev, pmcsr_pos, &pmcsr); 1211 + if (!(pmcsr & PCI_PM_CTRL_PME_STATUS)) 1212 + return false; 1213 + 1214 + /* Clear PME status. */ 1215 + pmcsr |= PCI_PM_CTRL_PME_STATUS; 1216 + if (pmcsr & PCI_PM_CTRL_PME_ENABLE) { 1217 + /* Disable PME to avoid interrupt flood. */ 1218 + pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 1219 + ret = true; 1220 + } 1221 + 1222 + pci_write_config_word(dev, pmcsr_pos, pmcsr); 1223 + 1224 + return ret; 1225 + } 1226 + 1227 + /** 1228 + * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. 1229 + * @dev: Device to handle. 1230 + * @ign: Ignored. 1231 + * 1232 + * Check if @dev has generated PME and queue a resume request for it in that 1233 + * case. 1234 + */ 1235 + static int pci_pme_wakeup(struct pci_dev *dev, void *ign) 1236 + { 1237 + if (pci_check_pme_status(dev)) 1238 + pm_request_resume(&dev->dev); 1239 + return 0; 1240 + } 1241 + 1242 + /** 1243 + * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary. 1244 + * @bus: Top bus of the subtree to walk. 1245 + */ 1246 + void pci_pme_wakeup_bus(struct pci_bus *bus) 1247 + { 1248 + if (bus) 1249 + pci_walk_bus(bus, pci_pme_wakeup, NULL); 1250 + } 1251 + 1252 + /** 1204 1253 * pci_pme_capable - check the capability of PCI device to generate PME# 1205 1254 * @dev: PCI device to handle. 1206 1255 * @state: PCI state from which device will issue PME#. ··· 1301 1230 } 1302 1231 1303 1232 /** 1304 - * pci_enable_wake - enable PCI device as wakeup event source 1233 + * __pci_enable_wake - enable PCI device as wakeup event source 1305 1234 * @dev: PCI device affected 1306 1235 * @state: PCI state from which device will issue wakeup events 1236 + * @runtime: True if the events are to be generated at run time 1307 1237 * @enable: True to enable event generation; false to disable 1308 1238 * 1309 1239 * This enables the device as a wakeup event source, or disables it. ··· 1320 1248 * Error code depending on the platform is returned if both the platform and 1321 1249 * the native mechanism fail to enable the generation of wake-up events 1322 1250 */ 1323 - int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) 1251 + int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, 1252 + bool runtime, bool enable) 1324 1253 { 1325 1254 int ret = 0; 1326 1255 1327 - if (enable && !device_may_wakeup(&dev->dev)) 1256 + if (enable && !runtime && !device_may_wakeup(&dev->dev)) 1328 1257 return -EINVAL; 1329 1258 1330 1259 /* Don't do the same thing twice in a row for one device. */ ··· 1345 1272 pci_pme_active(dev, true); 1346 1273 else 1347 1274 ret = 1; 1348 - error = platform_pci_sleep_wake(dev, true); 1275 + error = runtime ? platform_pci_run_wake(dev, true) : 1276 + platform_pci_sleep_wake(dev, true); 1349 1277 if (ret) 1350 1278 ret = error; 1351 1279 if (!ret) 1352 1280 dev->wakeup_prepared = true; 1353 1281 } else { 1354 - platform_pci_sleep_wake(dev, false); 1282 + if (runtime) 1283 + platform_pci_run_wake(dev, false); 1284 + else 1285 + platform_pci_sleep_wake(dev, false); 1355 1286 pci_pme_active(dev, false); 1356 1287 dev->wakeup_prepared = false; 1357 1288 } 1358 1289 1359 1290 return ret; 1360 1291 } 1292 + EXPORT_SYMBOL(__pci_enable_wake); 1361 1293 1362 1294 /** 1363 1295 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold ··· 1470 1392 pci_enable_wake(dev, PCI_D0, false); 1471 1393 return pci_set_power_state(dev, PCI_D0); 1472 1394 } 1395 + 1396 + /** 1397 + * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend. 1398 + * @dev: PCI device being suspended. 1399 + * 1400 + * Prepare @dev to generate wake-up events at run time and put it into a low 1401 + * power state. 1402 + */ 1403 + int pci_finish_runtime_suspend(struct pci_dev *dev) 1404 + { 1405 + pci_power_t target_state = pci_target_state(dev); 1406 + int error; 1407 + 1408 + if (target_state == PCI_POWER_ERROR) 1409 + return -EIO; 1410 + 1411 + __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev)); 1412 + 1413 + error = pci_set_power_state(dev, target_state); 1414 + 1415 + if (error) 1416 + __pci_enable_wake(dev, target_state, true, false); 1417 + 1418 + return error; 1419 + } 1420 + 1421 + /** 1422 + * pci_dev_run_wake - Check if device can generate run-time wake-up events. 1423 + * @dev: Device to check. 1424 + * 1425 + * Return true if the device itself is cabable of generating wake-up events 1426 + * (through the platform or using the native PCIe PME) or if the device supports 1427 + * PME and one of its upstream bridges can generate wake-up events. 1428 + */ 1429 + bool pci_dev_run_wake(struct pci_dev *dev) 1430 + { 1431 + struct pci_bus *bus = dev->bus; 1432 + 1433 + if (device_run_wake(&dev->dev)) 1434 + return true; 1435 + 1436 + if (!dev->pme_support) 1437 + return false; 1438 + 1439 + while (bus->parent) { 1440 + struct pci_dev *bridge = bus->self; 1441 + 1442 + if (device_run_wake(&bridge->dev)) 1443 + return true; 1444 + 1445 + bus = bus->parent; 1446 + } 1447 + 1448 + /* We have reached the root bus. */ 1449 + if (bus->bridge) 1450 + return device_run_wake(bus->bridge); 1451 + 1452 + return false; 1453 + } 1454 + EXPORT_SYMBOL_GPL(pci_dev_run_wake); 1473 1455 1474 1456 /** 1475 1457 * pci_pm_init - Initialize PM functions of given PCI device ··· 3009 2871 EXPORT_SYMBOL(pci_restore_state); 3010 2872 EXPORT_SYMBOL(pci_pme_capable); 3011 2873 EXPORT_SYMBOL(pci_pme_active); 3012 - EXPORT_SYMBOL(pci_enable_wake); 3013 2874 EXPORT_SYMBOL(pci_wake_from_d3); 3014 2875 EXPORT_SYMBOL(pci_target_state); 3015 2876 EXPORT_SYMBOL(pci_prepare_to_sleep);
+16
drivers/pci/pci.h
··· 35 35 * 36 36 * @sleep_wake: enables/disables the system wake up capability of given device 37 37 * 38 + * @run_wake: enables/disables the platform to generate run-time wake-up events 39 + * for given device (the device's wake-up capability has to be 40 + * enabled by @sleep_wake for this feature to work) 41 + * 38 42 * If given platform is generally capable of power managing PCI devices, all of 39 43 * these callbacks are mandatory. 40 44 */ ··· 48 44 pci_power_t (*choose_state)(struct pci_dev *dev); 49 45 bool (*can_wakeup)(struct pci_dev *dev); 50 46 int (*sleep_wake)(struct pci_dev *dev, bool enable); 47 + int (*run_wake)(struct pci_dev *dev, bool enable); 51 48 }; 52 49 53 50 extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops); 54 51 extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state); 55 52 extern void pci_disable_enabled_device(struct pci_dev *dev); 53 + extern bool pci_check_pme_status(struct pci_dev *dev); 54 + extern int pci_finish_runtime_suspend(struct pci_dev *dev); 55 + extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign); 56 + extern void pci_pme_wakeup_bus(struct pci_bus *bus); 56 57 extern void pci_pm_init(struct pci_dev *dev); 57 58 extern void platform_pci_wakeup_init(struct pci_dev *dev); 58 59 extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); ··· 328 319 int (*reset)(struct pci_dev *dev, int probe); 329 320 }; 330 321 322 + #ifdef CONFIG_PCI_QUIRKS 331 323 extern int pci_dev_specific_reset(struct pci_dev *dev, int probe); 324 + #else 325 + static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe) 326 + { 327 + return -ENOTTY; 328 + } 329 + #endif 332 330 333 331 #endif /* DRIVERS_PCI_H */
+4
drivers/pci/pcie/Kconfig
··· 46 46 help 47 47 This enables PCI Express ASPM debug support. It will add per-device 48 48 interface to control ASPM. 49 + 50 + config PCIE_PME 51 + def_bool y 52 + depends on PCIEPORTBUS && PM_RUNTIME && EXPERIMENTAL && ACPI
+2
drivers/pci/pcie/Makefile
··· 11 11 12 12 # Build PCI Express AER if needed 13 13 obj-$(CONFIG_PCIEAER) += aer/ 14 + 15 + obj-$(CONFIG_PCIE_PME) += pme/
+8
drivers/pci/pcie/pme/Makefile
··· 1 + # 2 + # Makefile for PCI-Express Root Port PME signaling driver 3 + # 4 + 5 + obj-$(CONFIG_PCIE_PME) += pmedriver.o 6 + 7 + pmedriver-objs := pcie_pme.o 8 + pmedriver-$(CONFIG_ACPI) += pcie_pme_acpi.o
+505
drivers/pci/pcie/pme/pcie_pme.c
··· 1 + /* 2 + * PCIe Native PME support 3 + * 4 + * Copyright (C) 2007 - 2009 Intel Corp 5 + * Copyright (C) 2007 - 2009 Shaohua Li <shaohua.li@intel.com> 6 + * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 7 + * 8 + * This file is subject to the terms and conditions of the GNU General Public 9 + * License V2. See the file "COPYING" in the main directory of this archive 10 + * for more details. 11 + */ 12 + 13 + #include <linux/module.h> 14 + #include <linux/pci.h> 15 + #include <linux/kernel.h> 16 + #include <linux/errno.h> 17 + #include <linux/init.h> 18 + #include <linux/interrupt.h> 19 + #include <linux/device.h> 20 + #include <linux/pcieport_if.h> 21 + #include <linux/acpi.h> 22 + #include <linux/pci-acpi.h> 23 + #include <linux/pm_runtime.h> 24 + 25 + #include "../../pci.h" 26 + #include "pcie_pme.h" 27 + 28 + #define PCI_EXP_RTSTA_PME 0x10000 /* PME status */ 29 + #define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */ 30 + 31 + /* 32 + * If set, this switch will prevent the PCIe root port PME service driver from 33 + * being registered. Consequently, the interrupt-based PCIe PME signaling will 34 + * not be used by any PCIe root ports in that case. 35 + */ 36 + static bool pcie_pme_disabled; 37 + 38 + /* 39 + * The PCI Express Base Specification 2.0, Section 6.1.8, states the following: 40 + * "In order to maintain compatibility with non-PCI Express-aware system 41 + * software, system power management logic must be configured by firmware to use 42 + * the legacy mechanism of signaling PME by default. PCI Express-aware system 43 + * software must notify the firmware prior to enabling native, interrupt-based 44 + * PME signaling." However, if the platform doesn't provide us with a suitable 45 + * notification mechanism or the notification fails, it is not clear whether or 46 + * not we are supposed to use the interrupt-based PCIe PME signaling. The 47 + * switch below can be used to indicate the desired behaviour. When set, it 48 + * will make the kernel use the interrupt-based PCIe PME signaling regardless of 49 + * the platform notification status, although the kernel will attempt to notify 50 + * the platform anyway. When unset, it will prevent the kernel from using the 51 + * the interrupt-based PCIe PME signaling if the platform notification fails, 52 + * which is the default. 53 + */ 54 + static bool pcie_pme_force_enable; 55 + 56 + /* 57 + * If this switch is set, MSI will not be used for PCIe PME signaling. This 58 + * causes the PCIe port driver to use INTx interrupts only, but it turns out 59 + * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based 60 + * wake-up from system sleep states. 61 + */ 62 + bool pcie_pme_msi_disabled; 63 + 64 + static int __init pcie_pme_setup(char *str) 65 + { 66 + if (!strcmp(str, "off")) 67 + pcie_pme_disabled = true; 68 + else if (!strcmp(str, "force")) 69 + pcie_pme_force_enable = true; 70 + else if (!strcmp(str, "nomsi")) 71 + pcie_pme_msi_disabled = true; 72 + return 1; 73 + } 74 + __setup("pcie_pme=", pcie_pme_setup); 75 + 76 + /** 77 + * pcie_pme_platform_setup - Ensure that the kernel controls the PCIe PME. 78 + * @srv: PCIe PME root port service to use for carrying out the check. 79 + * 80 + * Notify the platform that the native PCIe PME is going to be used and return 81 + * 'true' if the control of the PCIe PME registers has been acquired from the 82 + * platform. 83 + */ 84 + static bool pcie_pme_platform_setup(struct pcie_device *srv) 85 + { 86 + if (!pcie_pme_platform_notify(srv)) 87 + return true; 88 + return pcie_pme_force_enable; 89 + } 90 + 91 + struct pcie_pme_service_data { 92 + spinlock_t lock; 93 + struct pcie_device *srv; 94 + struct work_struct work; 95 + bool noirq; /* Don't enable the PME interrupt used by this service. */ 96 + }; 97 + 98 + /** 99 + * pcie_pme_interrupt_enable - Enable/disable PCIe PME interrupt generation. 100 + * @dev: PCIe root port or event collector. 101 + * @enable: Enable or disable the interrupt. 102 + */ 103 + static void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable) 104 + { 105 + int rtctl_pos; 106 + u16 rtctl; 107 + 108 + rtctl_pos = pci_pcie_cap(dev) + PCI_EXP_RTCTL; 109 + 110 + pci_read_config_word(dev, rtctl_pos, &rtctl); 111 + if (enable) 112 + rtctl |= PCI_EXP_RTCTL_PMEIE; 113 + else 114 + rtctl &= ~PCI_EXP_RTCTL_PMEIE; 115 + pci_write_config_word(dev, rtctl_pos, rtctl); 116 + } 117 + 118 + /** 119 + * pcie_pme_clear_status - Clear root port PME interrupt status. 120 + * @dev: PCIe root port or event collector. 121 + */ 122 + static void pcie_pme_clear_status(struct pci_dev *dev) 123 + { 124 + int rtsta_pos; 125 + u32 rtsta; 126 + 127 + rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA; 128 + 129 + pci_read_config_dword(dev, rtsta_pos, &rtsta); 130 + rtsta |= PCI_EXP_RTSTA_PME; 131 + pci_write_config_dword(dev, rtsta_pos, rtsta); 132 + } 133 + 134 + /** 135 + * pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#. 136 + * @bus: PCI bus to scan. 137 + * 138 + * Scan given PCI bus and all buses under it for devices asserting PME#. 139 + */ 140 + static bool pcie_pme_walk_bus(struct pci_bus *bus) 141 + { 142 + struct pci_dev *dev; 143 + bool ret = false; 144 + 145 + list_for_each_entry(dev, &bus->devices, bus_list) { 146 + /* Skip PCIe devices in case we started from a root port. */ 147 + if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { 148 + pm_request_resume(&dev->dev); 149 + ret = true; 150 + } 151 + 152 + if (dev->subordinate && pcie_pme_walk_bus(dev->subordinate)) 153 + ret = true; 154 + } 155 + 156 + return ret; 157 + } 158 + 159 + /** 160 + * pcie_pme_from_pci_bridge - Check if PCIe-PCI bridge generated a PME. 161 + * @bus: Secondary bus of the bridge. 162 + * @devfn: Device/function number to check. 163 + * 164 + * PME from PCI devices under a PCIe-PCI bridge may be converted to an in-band 165 + * PCIe PME message. In such that case the bridge should use the Requester ID 166 + * of device/function number 0 on its secondary bus. 167 + */ 168 + static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn) 169 + { 170 + struct pci_dev *dev; 171 + bool found = false; 172 + 173 + if (devfn) 174 + return false; 175 + 176 + dev = pci_dev_get(bus->self); 177 + if (!dev) 178 + return false; 179 + 180 + if (pci_is_pcie(dev) && dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { 181 + down_read(&pci_bus_sem); 182 + if (pcie_pme_walk_bus(bus)) 183 + found = true; 184 + up_read(&pci_bus_sem); 185 + } 186 + 187 + pci_dev_put(dev); 188 + return found; 189 + } 190 + 191 + /** 192 + * pcie_pme_handle_request - Find device that generated PME and handle it. 193 + * @port: Root port or event collector that generated the PME interrupt. 194 + * @req_id: PCIe Requester ID of the device that generated the PME. 195 + */ 196 + static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id) 197 + { 198 + u8 busnr = req_id >> 8, devfn = req_id & 0xff; 199 + struct pci_bus *bus; 200 + struct pci_dev *dev; 201 + bool found = false; 202 + 203 + /* First, check if the PME is from the root port itself. */ 204 + if (port->devfn == devfn && port->bus->number == busnr) { 205 + if (pci_check_pme_status(port)) { 206 + pm_request_resume(&port->dev); 207 + found = true; 208 + } else { 209 + /* 210 + * Apparently, the root port generated the PME on behalf 211 + * of a non-PCIe device downstream. If this is done by 212 + * a root port, the Requester ID field in its status 213 + * register may contain either the root port's, or the 214 + * source device's information (PCI Express Base 215 + * Specification, Rev. 2.0, Section 6.1.9). 216 + */ 217 + down_read(&pci_bus_sem); 218 + found = pcie_pme_walk_bus(port->subordinate); 219 + up_read(&pci_bus_sem); 220 + } 221 + goto out; 222 + } 223 + 224 + /* Second, find the bus the source device is on. */ 225 + bus = pci_find_bus(pci_domain_nr(port->bus), busnr); 226 + if (!bus) 227 + goto out; 228 + 229 + /* Next, check if the PME is from a PCIe-PCI bridge. */ 230 + found = pcie_pme_from_pci_bridge(bus, devfn); 231 + if (found) 232 + goto out; 233 + 234 + /* Finally, try to find the PME source on the bus. */ 235 + down_read(&pci_bus_sem); 236 + list_for_each_entry(dev, &bus->devices, bus_list) { 237 + pci_dev_get(dev); 238 + if (dev->devfn == devfn) { 239 + found = true; 240 + break; 241 + } 242 + pci_dev_put(dev); 243 + } 244 + up_read(&pci_bus_sem); 245 + 246 + if (found) { 247 + /* The device is there, but we have to check its PME status. */ 248 + found = pci_check_pme_status(dev); 249 + if (found) 250 + pm_request_resume(&dev->dev); 251 + pci_dev_put(dev); 252 + } else if (devfn) { 253 + /* 254 + * The device is not there, but we can still try to recover by 255 + * assuming that the PME was reported by a PCIe-PCI bridge that 256 + * used devfn different from zero. 257 + */ 258 + dev_dbg(&port->dev, "PME interrupt generated for " 259 + "non-existent device %02x:%02x.%d\n", 260 + busnr, PCI_SLOT(devfn), PCI_FUNC(devfn)); 261 + found = pcie_pme_from_pci_bridge(bus, 0); 262 + } 263 + 264 + out: 265 + if (!found) 266 + dev_dbg(&port->dev, "Spurious native PME interrupt!\n"); 267 + } 268 + 269 + /** 270 + * pcie_pme_work_fn - Work handler for PCIe PME interrupt. 271 + * @work: Work structure giving access to service data. 272 + */ 273 + static void pcie_pme_work_fn(struct work_struct *work) 274 + { 275 + struct pcie_pme_service_data *data = 276 + container_of(work, struct pcie_pme_service_data, work); 277 + struct pci_dev *port = data->srv->port; 278 + int rtsta_pos; 279 + u32 rtsta; 280 + 281 + rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA; 282 + 283 + spin_lock_irq(&data->lock); 284 + 285 + for (;;) { 286 + if (data->noirq) 287 + break; 288 + 289 + pci_read_config_dword(port, rtsta_pos, &rtsta); 290 + if (rtsta & PCI_EXP_RTSTA_PME) { 291 + /* 292 + * Clear PME status of the port. If there are other 293 + * pending PMEs, the status will be set again. 294 + */ 295 + pcie_pme_clear_status(port); 296 + 297 + spin_unlock_irq(&data->lock); 298 + pcie_pme_handle_request(port, rtsta & 0xffff); 299 + spin_lock_irq(&data->lock); 300 + 301 + continue; 302 + } 303 + 304 + /* No need to loop if there are no more PMEs pending. */ 305 + if (!(rtsta & PCI_EXP_RTSTA_PENDING)) 306 + break; 307 + 308 + spin_unlock_irq(&data->lock); 309 + cpu_relax(); 310 + spin_lock_irq(&data->lock); 311 + } 312 + 313 + if (!data->noirq) 314 + pcie_pme_interrupt_enable(port, true); 315 + 316 + spin_unlock_irq(&data->lock); 317 + } 318 + 319 + /** 320 + * pcie_pme_irq - Interrupt handler for PCIe root port PME interrupt. 321 + * @irq: Interrupt vector. 322 + * @context: Interrupt context pointer. 323 + */ 324 + static irqreturn_t pcie_pme_irq(int irq, void *context) 325 + { 326 + struct pci_dev *port; 327 + struct pcie_pme_service_data *data; 328 + int rtsta_pos; 329 + u32 rtsta; 330 + unsigned long flags; 331 + 332 + port = ((struct pcie_device *)context)->port; 333 + data = get_service_data((struct pcie_device *)context); 334 + 335 + rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA; 336 + 337 + spin_lock_irqsave(&data->lock, flags); 338 + pci_read_config_dword(port, rtsta_pos, &rtsta); 339 + 340 + if (!(rtsta & PCI_EXP_RTSTA_PME)) { 341 + spin_unlock_irqrestore(&data->lock, flags); 342 + return IRQ_NONE; 343 + } 344 + 345 + pcie_pme_interrupt_enable(port, false); 346 + spin_unlock_irqrestore(&data->lock, flags); 347 + 348 + /* We don't use pm_wq, because it's freezable. */ 349 + schedule_work(&data->work); 350 + 351 + return IRQ_HANDLED; 352 + } 353 + 354 + /** 355 + * pcie_pme_set_native - Set the PME interrupt flag for given device. 356 + * @dev: PCI device to handle. 357 + * @ign: Ignored. 358 + */ 359 + static int pcie_pme_set_native(struct pci_dev *dev, void *ign) 360 + { 361 + dev_info(&dev->dev, "Signaling PME through PCIe PME interrupt\n"); 362 + 363 + device_set_run_wake(&dev->dev, true); 364 + dev->pme_interrupt = true; 365 + return 0; 366 + } 367 + 368 + /** 369 + * pcie_pme_mark_devices - Set the PME interrupt flag for devices below a port. 370 + * @port: PCIe root port or event collector to handle. 371 + * 372 + * For each device below given root port, including the port itself (or for each 373 + * root complex integrated endpoint if @port is a root complex event collector) 374 + * set the flag indicating that it can signal run-time wake-up events via PCIe 375 + * PME interrupts. 376 + */ 377 + static void pcie_pme_mark_devices(struct pci_dev *port) 378 + { 379 + pcie_pme_set_native(port, NULL); 380 + if (port->subordinate) { 381 + pci_walk_bus(port->subordinate, pcie_pme_set_native, NULL); 382 + } else { 383 + struct pci_bus *bus = port->bus; 384 + struct pci_dev *dev; 385 + 386 + /* Check if this is a root port event collector. */ 387 + if (port->pcie_type != PCI_EXP_TYPE_RC_EC || !bus) 388 + return; 389 + 390 + down_read(&pci_bus_sem); 391 + list_for_each_entry(dev, &bus->devices, bus_list) 392 + if (pci_is_pcie(dev) 393 + && dev->pcie_type == PCI_EXP_TYPE_RC_END) 394 + pcie_pme_set_native(dev, NULL); 395 + up_read(&pci_bus_sem); 396 + } 397 + } 398 + 399 + /** 400 + * pcie_pme_probe - Initialize PCIe PME service for given root port. 401 + * @srv: PCIe service to initialize. 402 + */ 403 + static int pcie_pme_probe(struct pcie_device *srv) 404 + { 405 + struct pci_dev *port; 406 + struct pcie_pme_service_data *data; 407 + int ret; 408 + 409 + if (!pcie_pme_platform_setup(srv)) 410 + return -EACCES; 411 + 412 + data = kzalloc(sizeof(*data), GFP_KERNEL); 413 + if (!data) 414 + return -ENOMEM; 415 + 416 + spin_lock_init(&data->lock); 417 + INIT_WORK(&data->work, pcie_pme_work_fn); 418 + data->srv = srv; 419 + set_service_data(srv, data); 420 + 421 + port = srv->port; 422 + pcie_pme_interrupt_enable(port, false); 423 + pcie_pme_clear_status(port); 424 + 425 + ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv); 426 + if (ret) { 427 + kfree(data); 428 + } else { 429 + pcie_pme_mark_devices(port); 430 + pcie_pme_interrupt_enable(port, true); 431 + } 432 + 433 + return ret; 434 + } 435 + 436 + /** 437 + * pcie_pme_suspend - Suspend PCIe PME service device. 438 + * @srv: PCIe service device to suspend. 439 + */ 440 + static int pcie_pme_suspend(struct pcie_device *srv) 441 + { 442 + struct pcie_pme_service_data *data = get_service_data(srv); 443 + struct pci_dev *port = srv->port; 444 + 445 + spin_lock_irq(&data->lock); 446 + pcie_pme_interrupt_enable(port, false); 447 + pcie_pme_clear_status(port); 448 + data->noirq = true; 449 + spin_unlock_irq(&data->lock); 450 + 451 + synchronize_irq(srv->irq); 452 + 453 + return 0; 454 + } 455 + 456 + /** 457 + * pcie_pme_resume - Resume PCIe PME service device. 458 + * @srv - PCIe service device to resume. 459 + */ 460 + static int pcie_pme_resume(struct pcie_device *srv) 461 + { 462 + struct pcie_pme_service_data *data = get_service_data(srv); 463 + struct pci_dev *port = srv->port; 464 + 465 + spin_lock_irq(&data->lock); 466 + data->noirq = false; 467 + pcie_pme_clear_status(port); 468 + pcie_pme_interrupt_enable(port, true); 469 + spin_unlock_irq(&data->lock); 470 + 471 + return 0; 472 + } 473 + 474 + /** 475 + * pcie_pme_remove - Prepare PCIe PME service device for removal. 476 + * @srv - PCIe service device to resume. 477 + */ 478 + static void pcie_pme_remove(struct pcie_device *srv) 479 + { 480 + pcie_pme_suspend(srv); 481 + free_irq(srv->irq, srv); 482 + kfree(get_service_data(srv)); 483 + } 484 + 485 + static struct pcie_port_service_driver pcie_pme_driver = { 486 + .name = "pcie_pme", 487 + .port_type = PCI_EXP_TYPE_ROOT_PORT, 488 + .service = PCIE_PORT_SERVICE_PME, 489 + 490 + .probe = pcie_pme_probe, 491 + .suspend = pcie_pme_suspend, 492 + .resume = pcie_pme_resume, 493 + .remove = pcie_pme_remove, 494 + }; 495 + 496 + /** 497 + * pcie_pme_service_init - Register the PCIe PME service driver. 498 + */ 499 + static int __init pcie_pme_service_init(void) 500 + { 501 + return pcie_pme_disabled ? 502 + -ENODEV : pcie_port_service_register(&pcie_pme_driver); 503 + } 504 + 505 + module_init(pcie_pme_service_init);
+28
drivers/pci/pcie/pme/pcie_pme.h
··· 1 + /* 2 + * drivers/pci/pcie/pme/pcie_pme.h 3 + * 4 + * PCI Express Root Port PME signaling support 5 + * 6 + * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 7 + */ 8 + 9 + #ifndef _PCIE_PME_H_ 10 + #define _PCIE_PME_H_ 11 + 12 + struct pcie_device; 13 + 14 + #ifdef CONFIG_ACPI 15 + extern int pcie_pme_acpi_setup(struct pcie_device *srv); 16 + 17 + static inline int pcie_pme_platform_notify(struct pcie_device *srv) 18 + { 19 + return pcie_pme_acpi_setup(srv); 20 + } 21 + #else /* !CONFIG_ACPI */ 22 + static inline int pcie_pme_platform_notify(struct pcie_device *srv) 23 + { 24 + return 0; 25 + } 26 + #endif /* !CONFIG_ACPI */ 27 + 28 + #endif
+54
drivers/pci/pcie/pme/pcie_pme_acpi.c
··· 1 + /* 2 + * PCIe Native PME support, ACPI-related part 3 + * 4 + * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 5 + * 6 + * This file is subject to the terms and conditions of the GNU General Public 7 + * License V2. See the file "COPYING" in the main directory of this archive 8 + * for more details. 9 + */ 10 + 11 + #include <linux/pci.h> 12 + #include <linux/kernel.h> 13 + #include <linux/errno.h> 14 + #include <linux/acpi.h> 15 + #include <linux/pci-acpi.h> 16 + #include <linux/pcieport_if.h> 17 + 18 + /** 19 + * pcie_pme_acpi_setup - Request the ACPI BIOS to release control over PCIe PME. 20 + * @srv - PCIe PME service for a root port or event collector. 21 + * 22 + * Invoked when the PCIe bus type loads PCIe PME service driver. To avoid 23 + * conflict with the BIOS PCIe support requires the BIOS to yield PCIe PME 24 + * control to the kernel. 25 + */ 26 + int pcie_pme_acpi_setup(struct pcie_device *srv) 27 + { 28 + acpi_status status = AE_NOT_FOUND; 29 + struct pci_dev *port = srv->port; 30 + acpi_handle handle; 31 + int error = 0; 32 + 33 + if (acpi_pci_disabled) 34 + return -ENOSYS; 35 + 36 + dev_info(&port->dev, "Requesting control of PCIe PME from ACPI BIOS\n"); 37 + 38 + handle = acpi_find_root_bridge_handle(port); 39 + if (!handle) 40 + return -EINVAL; 41 + 42 + status = acpi_pci_osc_control_set(handle, 43 + OSC_PCI_EXPRESS_PME_CONTROL | 44 + OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); 45 + if (ACPI_FAILURE(status)) { 46 + dev_info(&port->dev, 47 + "Failed to receive control of PCIe PME service: %s\n", 48 + (status == AE_SUPPORT || status == AE_NOT_FOUND) ? 49 + "no _OSC support" : "ACPI _OSC failed"); 50 + error = -ENODEV; 51 + } 52 + 53 + return error; 54 + }
+17
drivers/pci/pcie/portdrv.h
··· 30 30 extern int __must_check pcie_port_bus_register(void); 31 31 extern void pcie_port_bus_unregister(void); 32 32 33 + #ifdef CONFIG_PCIE_PME 34 + extern bool pcie_pme_msi_disabled; 35 + 36 + static inline void pcie_pme_disable_msi(void) 37 + { 38 + pcie_pme_msi_disabled = true; 39 + } 40 + 41 + static inline bool pcie_pme_no_msi(void) 42 + { 43 + return pcie_pme_msi_disabled; 44 + } 45 + #else /* !CONFIG_PCIE_PME */ 46 + static inline void pcie_pme_disable_msi(void) {} 47 + static inline bool pcie_pme_no_msi(void) { return false; } 48 + #endif /* !CONFIG_PCIE_PME */ 49 + 33 50 #endif /* _PORTDRV_H_ */
+10 -2
drivers/pci/pcie/portdrv_core.c
··· 186 186 */ 187 187 static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) 188 188 { 189 - int i, irq; 189 + int i, irq = -1; 190 + 191 + /* We have to use INTx if MSI cannot be used for PCIe PME. */ 192 + if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) { 193 + if (dev->pin) 194 + irq = dev->irq; 195 + goto no_msi; 196 + } 190 197 191 198 /* Try to use MSI-X if supported */ 192 199 if (!pcie_port_enable_msix(dev, irqs, mask)) 193 200 return 0; 201 + 194 202 /* We're not going to use MSI-X, so try MSI and fall back to INTx */ 195 - irq = -1; 196 203 if (!pci_enable_msi(dev) || dev->pin) 197 204 irq = dev->irq; 198 205 206 + no_msi: 199 207 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) 200 208 irqs[i] = irq; 201 209 irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1;
+27
drivers/pci/pcie/portdrv_pci.c
··· 15 15 #include <linux/slab.h> 16 16 #include <linux/pcieport_if.h> 17 17 #include <linux/aer.h> 18 + #include <linux/dmi.h> 18 19 19 20 #include "portdrv.h" 20 21 #include "aer/aerdrv.h" ··· 274 273 .driver.pm = PCIE_PORTDRV_PM_OPS, 275 274 }; 276 275 276 + static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d) 277 + { 278 + pr_notice("%s detected: will not use MSI for PCIe PME signaling\n", 279 + d->ident); 280 + pcie_pme_disable_msi(); 281 + return 0; 282 + } 283 + 284 + static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = { 285 + /* 286 + * Boxes that should not use MSI for PCIe PME signaling. 287 + */ 288 + { 289 + .callback = dmi_pcie_pme_disable_msi, 290 + .ident = "MSI Wind U-100", 291 + .matches = { 292 + DMI_MATCH(DMI_SYS_VENDOR, 293 + "MICRO-STAR INTERNATIONAL CO., LTD"), 294 + DMI_MATCH(DMI_PRODUCT_NAME, "U-100"), 295 + }, 296 + }, 297 + {} 298 + }; 299 + 277 300 static int __init pcie_portdrv_init(void) 278 301 { 279 302 int retval; 303 + 304 + dmi_check_system(pcie_portdrv_dmi_table); 280 305 281 306 retval = pcie_port_bus_register(); 282 307 if (retval) {
+264 -28
drivers/pci/probe.c
··· 89 89 90 90 if (pci_bus->bridge) 91 91 put_device(pci_bus->bridge); 92 + pci_bus_remove_resources(pci_bus); 92 93 kfree(pci_bus); 93 94 } 94 95 ··· 282 281 } 283 282 } 284 283 285 - void __devinit pci_read_bridge_bases(struct pci_bus *child) 284 + static void __devinit pci_read_bridge_io(struct pci_bus *child) 286 285 { 287 286 struct pci_dev *dev = child->self; 288 287 u8 io_base_lo, io_limit_lo; 289 - u16 mem_base_lo, mem_limit_lo; 290 288 unsigned long base, limit; 291 289 struct resource *res; 292 - int i; 293 - 294 - if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ 295 - return; 296 - 297 - dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n", 298 - child->secondary, child->subordinate, 299 - dev->transparent ? " (subtractive decode)": ""); 300 - 301 - if (dev->transparent) { 302 - for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++) 303 - child->resource[i] = child->parent->resource[i - 3]; 304 - } 305 290 306 291 res = child->resource[0]; 307 292 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); ··· 303 316 limit |= (io_limit_hi << 16); 304 317 } 305 318 306 - if (base <= limit) { 319 + if (base && base <= limit) { 307 320 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; 308 321 if (!res->start) 309 322 res->start = base; 310 323 if (!res->end) 311 324 res->end = limit + 0xfff; 312 325 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 326 + } else { 327 + dev_printk(KERN_DEBUG, &dev->dev, 328 + " bridge window [io %04lx - %04lx] reg reading\n", 329 + base, limit); 313 330 } 331 + } 332 + 333 + static void __devinit pci_read_bridge_mmio(struct pci_bus *child) 334 + { 335 + struct pci_dev *dev = child->self; 336 + u16 mem_base_lo, mem_limit_lo; 337 + unsigned long base, limit; 338 + struct resource *res; 314 339 315 340 res = child->resource[1]; 316 341 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); 317 342 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); 318 343 base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; 319 344 limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; 320 - if (base <= limit) { 345 + if (base && base <= limit) { 321 346 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; 322 347 res->start = base; 323 348 res->end = limit + 0xfffff; 324 349 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 350 + } else { 351 + dev_printk(KERN_DEBUG, &dev->dev, 352 + " bridge window [mem 0x%08lx - 0x%08lx] reg reading\n", 353 + base, limit + 0xfffff); 325 354 } 355 + } 356 + 357 + static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child) 358 + { 359 + struct pci_dev *dev = child->self; 360 + u16 mem_base_lo, mem_limit_lo; 361 + unsigned long base, limit; 362 + struct resource *res; 326 363 327 364 res = child->resource[2]; 328 365 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); ··· 377 366 #endif 378 367 } 379 368 } 380 - if (base <= limit) { 369 + if (base && base <= limit) { 381 370 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | 382 371 IORESOURCE_MEM | IORESOURCE_PREFETCH; 383 372 if (res->flags & PCI_PREF_RANGE_TYPE_64) ··· 385 374 res->start = base; 386 375 res->end = limit + 0xfffff; 387 376 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 377 + } else { 378 + dev_printk(KERN_DEBUG, &dev->dev, 379 + " bridge window [mem 0x%08lx - %08lx pref] reg reading\n", 380 + base, limit + 0xfffff); 381 + } 382 + } 383 + 384 + void __devinit pci_read_bridge_bases(struct pci_bus *child) 385 + { 386 + struct pci_dev *dev = child->self; 387 + struct resource *res; 388 + int i; 389 + 390 + if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ 391 + return; 392 + 393 + dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n", 394 + child->secondary, child->subordinate, 395 + dev->transparent ? " (subtractive decode)" : ""); 396 + 397 + pci_bus_remove_resources(child); 398 + for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) 399 + child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; 400 + 401 + pci_read_bridge_io(child); 402 + pci_read_bridge_mmio(child); 403 + pci_read_bridge_mmio_pref(child); 404 + 405 + if (dev->transparent) { 406 + pci_bus_for_each_resource(child->parent, res, i) { 407 + if (res) { 408 + pci_bus_add_resource(child, res, 409 + PCI_SUBTRACTIVE_DECODE); 410 + dev_printk(KERN_DEBUG, &dev->dev, 411 + " bridge window %pR (subtractive decode)\n", 412 + res); 413 + } 414 + } 388 415 } 389 416 } 390 417 ··· 436 387 INIT_LIST_HEAD(&b->children); 437 388 INIT_LIST_HEAD(&b->devices); 438 389 INIT_LIST_HEAD(&b->slots); 390 + INIT_LIST_HEAD(&b->resources); 391 + b->max_bus_speed = PCI_SPEED_UNKNOWN; 392 + b->cur_bus_speed = PCI_SPEED_UNKNOWN; 439 393 } 440 394 return b; 441 395 } 396 + 397 + static unsigned char pcix_bus_speed[] = { 398 + PCI_SPEED_UNKNOWN, /* 0 */ 399 + PCI_SPEED_66MHz_PCIX, /* 1 */ 400 + PCI_SPEED_100MHz_PCIX, /* 2 */ 401 + PCI_SPEED_133MHz_PCIX, /* 3 */ 402 + PCI_SPEED_UNKNOWN, /* 4 */ 403 + PCI_SPEED_66MHz_PCIX_ECC, /* 5 */ 404 + PCI_SPEED_100MHz_PCIX_ECC, /* 6 */ 405 + PCI_SPEED_133MHz_PCIX_ECC, /* 7 */ 406 + PCI_SPEED_UNKNOWN, /* 8 */ 407 + PCI_SPEED_66MHz_PCIX_266, /* 9 */ 408 + PCI_SPEED_100MHz_PCIX_266, /* A */ 409 + PCI_SPEED_133MHz_PCIX_266, /* B */ 410 + PCI_SPEED_UNKNOWN, /* C */ 411 + PCI_SPEED_66MHz_PCIX_533, /* D */ 412 + PCI_SPEED_100MHz_PCIX_533, /* E */ 413 + PCI_SPEED_133MHz_PCIX_533 /* F */ 414 + }; 415 + 416 + static unsigned char pcie_link_speed[] = { 417 + PCI_SPEED_UNKNOWN, /* 0 */ 418 + PCIE_SPEED_2_5GT, /* 1 */ 419 + PCIE_SPEED_5_0GT, /* 2 */ 420 + PCIE_SPEED_8_0GT, /* 3 */ 421 + PCI_SPEED_UNKNOWN, /* 4 */ 422 + PCI_SPEED_UNKNOWN, /* 5 */ 423 + PCI_SPEED_UNKNOWN, /* 6 */ 424 + PCI_SPEED_UNKNOWN, /* 7 */ 425 + PCI_SPEED_UNKNOWN, /* 8 */ 426 + PCI_SPEED_UNKNOWN, /* 9 */ 427 + PCI_SPEED_UNKNOWN, /* A */ 428 + PCI_SPEED_UNKNOWN, /* B */ 429 + PCI_SPEED_UNKNOWN, /* C */ 430 + PCI_SPEED_UNKNOWN, /* D */ 431 + PCI_SPEED_UNKNOWN, /* E */ 432 + PCI_SPEED_UNKNOWN /* F */ 433 + }; 434 + 435 + void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) 436 + { 437 + bus->cur_bus_speed = pcie_link_speed[linksta & 0xf]; 438 + } 439 + EXPORT_SYMBOL_GPL(pcie_update_link_speed); 440 + 441 + static unsigned char agp_speeds[] = { 442 + AGP_UNKNOWN, 443 + AGP_1X, 444 + AGP_2X, 445 + AGP_4X, 446 + AGP_8X 447 + }; 448 + 449 + static enum pci_bus_speed agp_speed(int agp3, int agpstat) 450 + { 451 + int index = 0; 452 + 453 + if (agpstat & 4) 454 + index = 3; 455 + else if (agpstat & 2) 456 + index = 2; 457 + else if (agpstat & 1) 458 + index = 1; 459 + else 460 + goto out; 461 + 462 + if (agp3) { 463 + index += 2; 464 + if (index == 5) 465 + index = 0; 466 + } 467 + 468 + out: 469 + return agp_speeds[index]; 470 + } 471 + 472 + 473 + static void pci_set_bus_speed(struct pci_bus *bus) 474 + { 475 + struct pci_dev *bridge = bus->self; 476 + int pos; 477 + 478 + pos = pci_find_capability(bridge, PCI_CAP_ID_AGP); 479 + if (!pos) 480 + pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3); 481 + if (pos) { 482 + u32 agpstat, agpcmd; 483 + 484 + pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat); 485 + bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7); 486 + 487 + pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd); 488 + bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7); 489 + } 490 + 491 + pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); 492 + if (pos) { 493 + u16 status; 494 + enum pci_bus_speed max; 495 + pci_read_config_word(bridge, pos + 2, &status); 496 + 497 + if (status & 0x8000) { 498 + max = PCI_SPEED_133MHz_PCIX_533; 499 + } else if (status & 0x4000) { 500 + max = PCI_SPEED_133MHz_PCIX_266; 501 + } else if (status & 0x0002) { 502 + if (((status >> 12) & 0x3) == 2) { 503 + max = PCI_SPEED_133MHz_PCIX_ECC; 504 + } else { 505 + max = PCI_SPEED_133MHz_PCIX; 506 + } 507 + } else { 508 + max = PCI_SPEED_66MHz_PCIX; 509 + } 510 + 511 + bus->max_bus_speed = max; 512 + bus->cur_bus_speed = pcix_bus_speed[(status >> 6) & 0xf]; 513 + 514 + return; 515 + } 516 + 517 + pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); 518 + if (pos) { 519 + u32 linkcap; 520 + u16 linksta; 521 + 522 + pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap); 523 + bus->max_bus_speed = pcie_link_speed[linkcap & 0xf]; 524 + 525 + pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta); 526 + pcie_update_link_speed(bus, linksta); 527 + } 528 + } 529 + 442 530 443 531 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, 444 532 struct pci_dev *bridge, int busnr) ··· 615 429 616 430 child->self = bridge; 617 431 child->bridge = get_device(&bridge->dev); 432 + 433 + pci_set_bus_speed(child); 618 434 619 435 /* Set up default resource pointers and names.. */ 620 436 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { ··· 1269 1081 } 1270 1082 EXPORT_SYMBOL(pci_scan_single_device); 1271 1083 1084 + static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn) 1085 + { 1086 + u16 cap; 1087 + unsigned pos, next_fn; 1088 + 1089 + if (!dev) 1090 + return 0; 1091 + 1092 + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); 1093 + if (!pos) 1094 + return 0; 1095 + pci_read_config_word(dev, pos + 4, &cap); 1096 + next_fn = cap >> 8; 1097 + if (next_fn <= fn) 1098 + return 0; 1099 + return next_fn; 1100 + } 1101 + 1102 + static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn) 1103 + { 1104 + return (fn + 1) % 8; 1105 + } 1106 + 1107 + static unsigned no_next_fn(struct pci_dev *dev, unsigned fn) 1108 + { 1109 + return 0; 1110 + } 1111 + 1112 + static int only_one_child(struct pci_bus *bus) 1113 + { 1114 + struct pci_dev *parent = bus->self; 1115 + if (!parent || !pci_is_pcie(parent)) 1116 + return 0; 1117 + if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT || 1118 + parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) 1119 + return 1; 1120 + return 0; 1121 + } 1122 + 1272 1123 /** 1273 1124 * pci_scan_slot - scan a PCI slot on a bus for devices. 1274 1125 * @bus: PCI bus to scan ··· 1321 1094 */ 1322 1095 int pci_scan_slot(struct pci_bus *bus, int devfn) 1323 1096 { 1324 - int fn, nr = 0; 1097 + unsigned fn, nr = 0; 1325 1098 struct pci_dev *dev; 1099 + unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn; 1100 + 1101 + if (only_one_child(bus) && (devfn > 0)) 1102 + return 0; /* Already scanned the entire slot */ 1326 1103 1327 1104 dev = pci_scan_single_device(bus, devfn); 1328 - if (dev && !dev->is_added) /* new device? */ 1105 + if (!dev) 1106 + return 0; 1107 + if (!dev->is_added) 1329 1108 nr++; 1330 1109 1331 - if (dev && dev->multifunction) { 1332 - for (fn = 1; fn < 8; fn++) { 1333 - dev = pci_scan_single_device(bus, devfn + fn); 1334 - if (dev) { 1335 - if (!dev->is_added) 1336 - nr++; 1337 - dev->multifunction = 1; 1338 - } 1110 + if (pci_ari_enabled(bus)) 1111 + next_fn = next_ari_fn; 1112 + else if (dev->multifunction) 1113 + next_fn = next_trad_fn; 1114 + 1115 + for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) { 1116 + dev = pci_scan_single_device(bus, devfn + fn); 1117 + if (dev) { 1118 + if (!dev->is_added) 1119 + nr++; 1120 + dev->multifunction = 1; 1339 1121 } 1340 1122 } 1341 1123
+2 -12
drivers/pci/quirks.c
··· 25 25 #include <linux/dmi.h> 26 26 #include <linux/pci-aspm.h> 27 27 #include <linux/ioport.h> 28 + #include <asm/dma.h> /* isa_dma_bridge_buggy */ 28 29 #include "pci.h" 29 30 30 - int isa_dma_bridge_buggy; 31 - EXPORT_SYMBOL(isa_dma_bridge_buggy); 32 - int pci_pci_problems; 33 - EXPORT_SYMBOL(pci_pci_problems); 34 - 35 - #ifdef CONFIG_PCI_QUIRKS 36 31 /* 37 32 * This quirk function disables memory decoding and releases memory resources 38 33 * of the device specified by kernel's boot parameter 'pci=resource_alignment='. ··· 2607 2612 } 2608 2613 pci_do_fixups(dev, start, end); 2609 2614 } 2615 + EXPORT_SYMBOL(pci_fixup_device); 2610 2616 2611 2617 static int __init pci_apply_final_quirks(void) 2612 2618 { ··· 2719 2723 2720 2724 return -ENOTTY; 2721 2725 } 2722 - 2723 - #else 2724 - void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {} 2725 - int pci_dev_specific_reset(struct pci_dev *dev, int probe) { return -ENOTTY; } 2726 - #endif 2727 - EXPORT_SYMBOL(pci_fixup_device);
+456 -54
drivers/pci/setup-bus.c
··· 27 27 #include <linux/slab.h> 28 28 #include "pci.h" 29 29 30 - static void pbus_assign_resources_sorted(const struct pci_bus *bus) 31 - { 32 - struct pci_dev *dev; 30 + struct resource_list_x { 31 + struct resource_list_x *next; 33 32 struct resource *res; 34 - struct resource_list head, *list, *tmp; 35 - int idx; 33 + struct pci_dev *dev; 34 + resource_size_t start; 35 + resource_size_t end; 36 + unsigned long flags; 37 + }; 36 38 37 - head.next = NULL; 38 - list_for_each_entry(dev, &bus->devices, bus_list) { 39 - u16 class = dev->class >> 8; 39 + static void add_to_failed_list(struct resource_list_x *head, 40 + struct pci_dev *dev, struct resource *res) 41 + { 42 + struct resource_list_x *list = head; 43 + struct resource_list_x *ln = list->next; 44 + struct resource_list_x *tmp; 40 45 41 - /* Don't touch classless devices or host bridges or ioapics. */ 42 - if (class == PCI_CLASS_NOT_DEFINED || 43 - class == PCI_CLASS_BRIDGE_HOST) 44 - continue; 45 - 46 - /* Don't touch ioapic devices already enabled by firmware */ 47 - if (class == PCI_CLASS_SYSTEM_PIC) { 48 - u16 command; 49 - pci_read_config_word(dev, PCI_COMMAND, &command); 50 - if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) 51 - continue; 52 - } 53 - 54 - pdev_sort_resources(dev, &head); 46 + tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); 47 + if (!tmp) { 48 + pr_warning("add_to_failed_list: kmalloc() failed!\n"); 49 + return; 55 50 } 56 51 57 - for (list = head.next; list;) { 52 + tmp->next = ln; 53 + tmp->res = res; 54 + tmp->dev = dev; 55 + tmp->start = res->start; 56 + tmp->end = res->end; 57 + tmp->flags = res->flags; 58 + list->next = tmp; 59 + } 60 + 61 + static void free_failed_list(struct resource_list_x *head) 62 + { 63 + struct resource_list_x *list, *tmp; 64 + 65 + for (list = head->next; list;) { 66 + tmp = list; 67 + list = list->next; 68 + kfree(tmp); 69 + } 70 + 71 + head->next = NULL; 72 + } 73 + 74 + static void __dev_sort_resources(struct pci_dev *dev, 75 + struct resource_list *head) 76 + { 77 + u16 class = dev->class >> 8; 78 + 79 + /* Don't touch classless devices or host bridges or ioapics. */ 80 + if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST) 81 + return; 82 + 83 + /* Don't touch ioapic devices already enabled by firmware */ 84 + if (class == PCI_CLASS_SYSTEM_PIC) { 85 + u16 command; 86 + pci_read_config_word(dev, PCI_COMMAND, &command); 87 + if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) 88 + return; 89 + } 90 + 91 + pdev_sort_resources(dev, head); 92 + } 93 + 94 + static void __assign_resources_sorted(struct resource_list *head, 95 + struct resource_list_x *fail_head) 96 + { 97 + struct resource *res; 98 + struct resource_list *list, *tmp; 99 + int idx; 100 + 101 + for (list = head->next; list;) { 58 102 res = list->res; 59 103 idx = res - &list->dev->resource[0]; 60 104 if (pci_assign_resource(list->dev, idx)) { 105 + if (fail_head && !pci_is_root_bus(list->dev->bus)) 106 + add_to_failed_list(fail_head, list->dev, res); 61 107 res->start = 0; 62 108 res->end = 0; 63 109 res->flags = 0; ··· 112 66 list = list->next; 113 67 kfree(tmp); 114 68 } 69 + } 70 + 71 + static void pdev_assign_resources_sorted(struct pci_dev *dev, 72 + struct resource_list_x *fail_head) 73 + { 74 + struct resource_list head; 75 + 76 + head.next = NULL; 77 + __dev_sort_resources(dev, &head); 78 + __assign_resources_sorted(&head, fail_head); 79 + 80 + } 81 + 82 + static void pbus_assign_resources_sorted(const struct pci_bus *bus, 83 + struct resource_list_x *fail_head) 84 + { 85 + struct pci_dev *dev; 86 + struct resource_list head; 87 + 88 + head.next = NULL; 89 + list_for_each_entry(dev, &bus->devices, bus_list) 90 + __dev_sort_resources(dev, &head); 91 + 92 + __assign_resources_sorted(&head, fail_head); 115 93 } 116 94 117 95 void pci_setup_cardbus(struct pci_bus *bus) ··· 204 134 config space writes, so it's quite possible that an I/O window of 205 135 the bridge will have some undesirable address (e.g. 0) after the 206 136 first write. Ditto 64-bit prefetchable MMIO. */ 207 - static void pci_setup_bridge(struct pci_bus *bus) 137 + static void pci_setup_bridge_io(struct pci_bus *bus) 208 138 { 209 139 struct pci_dev *bridge = bus->self; 210 140 struct resource *res; 211 141 struct pci_bus_region region; 212 - u32 l, bu, lu, io_upper16; 213 - 214 - if (pci_is_enabled(bridge)) 215 - return; 216 - 217 - dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n", 218 - bus->secondary, bus->subordinate); 142 + u32 l, io_upper16; 219 143 220 144 /* Set up the top and bottom of the PCI I/O segment for this bus. */ 221 145 res = bus->resource[0]; ··· 222 158 /* Set up upper 16 bits of I/O base/limit. */ 223 159 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); 224 160 dev_info(&bridge->dev, " bridge window %pR\n", res); 225 - } 226 - else { 161 + } else { 227 162 /* Clear upper 16 bits of I/O base/limit. */ 228 163 io_upper16 = 0; 229 164 l = 0x00f0; ··· 234 171 pci_write_config_dword(bridge, PCI_IO_BASE, l); 235 172 /* Update upper 16 bits of I/O base/limit. */ 236 173 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); 174 + } 237 175 238 - /* Set up the top and bottom of the PCI Memory segment 239 - for this bus. */ 176 + static void pci_setup_bridge_mmio(struct pci_bus *bus) 177 + { 178 + struct pci_dev *bridge = bus->self; 179 + struct resource *res; 180 + struct pci_bus_region region; 181 + u32 l; 182 + 183 + /* Set up the top and bottom of the PCI Memory segment for this bus. */ 240 184 res = bus->resource[1]; 241 185 pcibios_resource_to_bus(bridge, &region, res); 242 186 if (res->flags & IORESOURCE_MEM) { 243 187 l = (region.start >> 16) & 0xfff0; 244 188 l |= region.end & 0xfff00000; 245 189 dev_info(&bridge->dev, " bridge window %pR\n", res); 246 - } 247 - else { 190 + } else { 248 191 l = 0x0000fff0; 249 192 dev_info(&bridge->dev, " bridge window [mem disabled]\n"); 250 193 } 251 194 pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); 195 + } 196 + 197 + static void pci_setup_bridge_mmio_pref(struct pci_bus *bus) 198 + { 199 + struct pci_dev *bridge = bus->self; 200 + struct resource *res; 201 + struct pci_bus_region region; 202 + u32 l, bu, lu; 252 203 253 204 /* Clear out the upper 32 bits of PREF limit. 254 205 If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily ··· 281 204 lu = upper_32_bits(region.end); 282 205 } 283 206 dev_info(&bridge->dev, " bridge window %pR\n", res); 284 - } 285 - else { 207 + } else { 286 208 l = 0x0000fff0; 287 209 dev_info(&bridge->dev, " bridge window [mem pref disabled]\n"); 288 210 } ··· 290 214 /* Set the upper 32 bits of PREF base & limit. */ 291 215 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); 292 216 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); 217 + } 218 + 219 + static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type) 220 + { 221 + struct pci_dev *bridge = bus->self; 222 + 223 + dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n", 224 + bus->secondary, bus->subordinate); 225 + 226 + if (type & IORESOURCE_IO) 227 + pci_setup_bridge_io(bus); 228 + 229 + if (type & IORESOURCE_MEM) 230 + pci_setup_bridge_mmio(bus); 231 + 232 + if (type & IORESOURCE_PREFETCH) 233 + pci_setup_bridge_mmio_pref(bus); 293 234 294 235 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); 236 + } 237 + 238 + static void pci_setup_bridge(struct pci_bus *bus) 239 + { 240 + unsigned long type = IORESOURCE_IO | IORESOURCE_MEM | 241 + IORESOURCE_PREFETCH; 242 + 243 + __pci_setup_bridge(bus, type); 295 244 } 296 245 297 246 /* Check whether the bridge supports optional I/O and ··· 354 253 } 355 254 if (pmem) { 356 255 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; 357 - if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) 256 + if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == 257 + PCI_PREF_RANGE_TYPE_64) { 358 258 b_res[2].flags |= IORESOURCE_MEM_64; 259 + b_res[2].flags |= PCI_PREF_RANGE_TYPE_64; 260 + } 359 261 } 360 262 361 263 /* double check if bridge does support 64 bit pref */ ··· 387 283 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | 388 284 IORESOURCE_PREFETCH; 389 285 390 - for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 391 - r = bus->resource[i]; 286 + pci_bus_for_each_resource(bus, r, i) { 392 287 if (r == &ioport_resource || r == &iomem_resource) 393 288 continue; 394 289 if (r && (r->flags & type_mask) == type && !r->parent) ··· 404 301 { 405 302 struct pci_dev *dev; 406 303 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); 407 - unsigned long size = 0, size1 = 0; 304 + unsigned long size = 0, size1 = 0, old_size; 408 305 409 306 if (!b_res) 410 307 return; ··· 429 326 } 430 327 if (size < min_size) 431 328 size = min_size; 329 + old_size = resource_size(b_res); 330 + if (old_size == 1) 331 + old_size = 0; 432 332 /* To be fixed in 2.5: we should have sort of HAVE_ISA 433 333 flag in the struct pci_bus. */ 434 334 #if defined(CONFIG_ISA) || defined(CONFIG_EISA) 435 335 size = (size & 0xff) + ((size & ~0xffUL) << 2); 436 336 #endif 437 337 size = ALIGN(size + size1, 4096); 338 + if (size < old_size) 339 + size = old_size; 438 340 if (!size) { 439 341 if (b_res->start || b_res->end) 440 342 dev_info(&bus->self->dev, "disabling bridge window " ··· 460 352 unsigned long type, resource_size_t min_size) 461 353 { 462 354 struct pci_dev *dev; 463 - resource_size_t min_align, align, size; 355 + resource_size_t min_align, align, size, old_size; 464 356 resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */ 465 357 int order, max_order; 466 358 struct resource *b_res = find_free_bus_resource(bus, type); ··· 510 402 } 511 403 if (size < min_size) 512 404 size = min_size; 405 + old_size = resource_size(b_res); 406 + if (old_size == 1) 407 + old_size = 0; 408 + if (size < old_size) 409 + size = old_size; 513 410 514 411 align = 0; 515 412 min_align = 0; ··· 651 538 } 652 539 EXPORT_SYMBOL(pci_bus_size_bridges); 653 540 654 - void __ref pci_bus_assign_resources(const struct pci_bus *bus) 541 + static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, 542 + struct resource_list_x *fail_head) 655 543 { 656 544 struct pci_bus *b; 657 545 struct pci_dev *dev; 658 546 659 - pbus_assign_resources_sorted(bus); 547 + pbus_assign_resources_sorted(bus, fail_head); 660 548 661 549 list_for_each_entry(dev, &bus->devices, bus_list) { 662 550 b = dev->subordinate; 663 551 if (!b) 664 552 continue; 665 553 666 - pci_bus_assign_resources(b); 554 + __pci_bus_assign_resources(b, fail_head); 667 555 668 556 switch (dev->class >> 8) { 669 557 case PCI_CLASS_BRIDGE_PCI: 670 - pci_setup_bridge(b); 558 + if (!pci_is_enabled(dev)) 559 + pci_setup_bridge(b); 671 560 break; 672 561 673 562 case PCI_CLASS_BRIDGE_CARDBUS: ··· 683 568 } 684 569 } 685 570 } 571 + 572 + void __ref pci_bus_assign_resources(const struct pci_bus *bus) 573 + { 574 + __pci_bus_assign_resources(bus, NULL); 575 + } 686 576 EXPORT_SYMBOL(pci_bus_assign_resources); 577 + 578 + static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge, 579 + struct resource_list_x *fail_head) 580 + { 581 + struct pci_bus *b; 582 + 583 + pdev_assign_resources_sorted((struct pci_dev *)bridge, fail_head); 584 + 585 + b = bridge->subordinate; 586 + if (!b) 587 + return; 588 + 589 + __pci_bus_assign_resources(b, fail_head); 590 + 591 + switch (bridge->class >> 8) { 592 + case PCI_CLASS_BRIDGE_PCI: 593 + pci_setup_bridge(b); 594 + break; 595 + 596 + case PCI_CLASS_BRIDGE_CARDBUS: 597 + pci_setup_cardbus(b); 598 + break; 599 + 600 + default: 601 + dev_info(&bridge->dev, "not setting up bridge for bus " 602 + "%04x:%02x\n", pci_domain_nr(b), b->number); 603 + break; 604 + } 605 + } 606 + static void pci_bridge_release_resources(struct pci_bus *bus, 607 + unsigned long type) 608 + { 609 + int idx; 610 + bool changed = false; 611 + struct pci_dev *dev; 612 + struct resource *r; 613 + unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | 614 + IORESOURCE_PREFETCH; 615 + 616 + dev = bus->self; 617 + for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END; 618 + idx++) { 619 + r = &dev->resource[idx]; 620 + if ((r->flags & type_mask) != type) 621 + continue; 622 + if (!r->parent) 623 + continue; 624 + /* 625 + * if there are children under that, we should release them 626 + * all 627 + */ 628 + release_child_resources(r); 629 + if (!release_resource(r)) { 630 + dev_printk(KERN_DEBUG, &dev->dev, 631 + "resource %d %pR released\n", idx, r); 632 + /* keep the old size */ 633 + r->end = resource_size(r) - 1; 634 + r->start = 0; 635 + r->flags = 0; 636 + changed = true; 637 + } 638 + } 639 + 640 + if (changed) { 641 + /* avoiding touch the one without PREF */ 642 + if (type & IORESOURCE_PREFETCH) 643 + type = IORESOURCE_PREFETCH; 644 + __pci_setup_bridge(bus, type); 645 + } 646 + } 647 + 648 + enum release_type { 649 + leaf_only, 650 + whole_subtree, 651 + }; 652 + /* 653 + * try to release pci bridge resources that is from leaf bridge, 654 + * so we can allocate big new one later 655 + */ 656 + static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus, 657 + unsigned long type, 658 + enum release_type rel_type) 659 + { 660 + struct pci_dev *dev; 661 + bool is_leaf_bridge = true; 662 + 663 + list_for_each_entry(dev, &bus->devices, bus_list) { 664 + struct pci_bus *b = dev->subordinate; 665 + if (!b) 666 + continue; 667 + 668 + is_leaf_bridge = false; 669 + 670 + if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) 671 + continue; 672 + 673 + if (rel_type == whole_subtree) 674 + pci_bus_release_bridge_resources(b, type, 675 + whole_subtree); 676 + } 677 + 678 + if (pci_is_root_bus(bus)) 679 + return; 680 + 681 + if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI) 682 + return; 683 + 684 + if ((rel_type == whole_subtree) || is_leaf_bridge) 685 + pci_bridge_release_resources(bus, type); 686 + } 687 687 688 688 static void pci_bus_dump_res(struct pci_bus *bus) 689 689 { 690 - int i; 690 + struct resource *res; 691 + int i; 691 692 692 - for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 693 - struct resource *res = bus->resource[i]; 694 - if (!res || !res->end) 693 + pci_bus_for_each_resource(bus, res, i) { 694 + if (!res || !res->end || !res->flags) 695 695 continue; 696 696 697 697 dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res); ··· 830 600 } 831 601 } 832 602 603 + static int __init pci_bus_get_depth(struct pci_bus *bus) 604 + { 605 + int depth = 0; 606 + struct pci_dev *dev; 607 + 608 + list_for_each_entry(dev, &bus->devices, bus_list) { 609 + int ret; 610 + struct pci_bus *b = dev->subordinate; 611 + if (!b) 612 + continue; 613 + 614 + ret = pci_bus_get_depth(b); 615 + if (ret + 1 > depth) 616 + depth = ret + 1; 617 + } 618 + 619 + return depth; 620 + } 621 + static int __init pci_get_max_depth(void) 622 + { 623 + int depth = 0; 624 + struct pci_bus *bus; 625 + 626 + list_for_each_entry(bus, &pci_root_buses, node) { 627 + int ret; 628 + 629 + ret = pci_bus_get_depth(bus); 630 + if (ret > depth) 631 + depth = ret; 632 + } 633 + 634 + return depth; 635 + } 636 + 637 + /* 638 + * first try will not touch pci bridge res 639 + * second and later try will clear small leaf bridge res 640 + * will stop till to the max deepth if can not find good one 641 + */ 833 642 void __init 834 643 pci_assign_unassigned_resources(void) 835 644 { 836 645 struct pci_bus *bus; 646 + int tried_times = 0; 647 + enum release_type rel_type = leaf_only; 648 + struct resource_list_x head, *list; 649 + unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | 650 + IORESOURCE_PREFETCH; 651 + unsigned long failed_type; 652 + int max_depth = pci_get_max_depth(); 653 + int pci_try_num; 837 654 655 + head.next = NULL; 656 + 657 + pci_try_num = max_depth + 1; 658 + printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", 659 + max_depth, pci_try_num); 660 + 661 + again: 838 662 /* Depth first, calculate sizes and alignments of all 839 663 subordinate buses. */ 840 664 list_for_each_entry(bus, &pci_root_buses, node) { ··· 896 612 } 897 613 /* Depth last, allocate resources and update the hardware. */ 898 614 list_for_each_entry(bus, &pci_root_buses, node) { 899 - pci_bus_assign_resources(bus); 900 - pci_enable_bridges(bus); 615 + __pci_bus_assign_resources(bus, &head); 901 616 } 617 + tried_times++; 618 + 619 + /* any device complain? */ 620 + if (!head.next) 621 + goto enable_and_dump; 622 + failed_type = 0; 623 + for (list = head.next; list;) { 624 + failed_type |= list->flags; 625 + list = list->next; 626 + } 627 + /* 628 + * io port are tight, don't try extra 629 + * or if reach the limit, don't want to try more 630 + */ 631 + failed_type &= type_mask; 632 + if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) { 633 + free_failed_list(&head); 634 + goto enable_and_dump; 635 + } 636 + 637 + printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n", 638 + tried_times + 1); 639 + 640 + /* third times and later will not check if it is leaf */ 641 + if ((tried_times + 1) > 2) 642 + rel_type = whole_subtree; 643 + 644 + /* 645 + * Try to release leaf bridge's resources that doesn't fit resource of 646 + * child device under that bridge 647 + */ 648 + for (list = head.next; list;) { 649 + bus = list->dev->bus; 650 + pci_bus_release_bridge_resources(bus, list->flags & type_mask, 651 + rel_type); 652 + list = list->next; 653 + } 654 + /* restore size and flags */ 655 + for (list = head.next; list;) { 656 + struct resource *res = list->res; 657 + 658 + res->start = list->start; 659 + res->end = list->end; 660 + res->flags = list->flags; 661 + if (list->dev->subordinate) 662 + res->flags = 0; 663 + 664 + list = list->next; 665 + } 666 + free_failed_list(&head); 667 + 668 + goto again; 669 + 670 + enable_and_dump: 671 + /* Depth last, update the hardware. */ 672 + list_for_each_entry(bus, &pci_root_buses, node) 673 + pci_enable_bridges(bus); 902 674 903 675 /* dump the resource on buses */ 904 676 list_for_each_entry(bus, &pci_root_buses, node) { 905 677 pci_bus_dump_resources(bus); 906 678 } 907 679 } 680 + 681 + void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge) 682 + { 683 + struct pci_bus *parent = bridge->subordinate; 684 + int tried_times = 0; 685 + struct resource_list_x head, *list; 686 + int retval; 687 + unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | 688 + IORESOURCE_PREFETCH; 689 + 690 + head.next = NULL; 691 + 692 + again: 693 + pci_bus_size_bridges(parent); 694 + __pci_bridge_assign_resources(bridge, &head); 695 + retval = pci_reenable_device(bridge); 696 + pci_set_master(bridge); 697 + pci_enable_bridges(parent); 698 + 699 + tried_times++; 700 + 701 + if (!head.next) 702 + return; 703 + 704 + if (tried_times >= 2) { 705 + /* still fail, don't need to try more */ 706 + free_failed_list(&head); 707 + return; 708 + } 709 + 710 + printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n", 711 + tried_times + 1); 712 + 713 + /* 714 + * Try to release leaf bridge's resources that doesn't fit resource of 715 + * child device under that bridge 716 + */ 717 + for (list = head.next; list;) { 718 + struct pci_bus *bus = list->dev->bus; 719 + unsigned long flags = list->flags; 720 + 721 + pci_bus_release_bridge_resources(bus, flags & type_mask, 722 + whole_subtree); 723 + list = list->next; 724 + } 725 + /* restore size and flags */ 726 + for (list = head.next; list;) { 727 + struct resource *res = list->res; 728 + 729 + res->start = list->start; 730 + res->end = list->end; 731 + res->flags = list->flags; 732 + if (list->dev->subordinate) 733 + res->flags = 0; 734 + 735 + list = list->next; 736 + } 737 + free_failed_list(&head); 738 + 739 + goto again; 740 + } 741 + EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
+55
drivers/pci/slot.c
··· 47 47 slot->number); 48 48 } 49 49 50 + /* these strings match up with the values in pci_bus_speed */ 51 + static char *pci_bus_speed_strings[] = { 52 + "33 MHz PCI", /* 0x00 */ 53 + "66 MHz PCI", /* 0x01 */ 54 + "66 MHz PCI-X", /* 0x02 */ 55 + "100 MHz PCI-X", /* 0x03 */ 56 + "133 MHz PCI-X", /* 0x04 */ 57 + NULL, /* 0x05 */ 58 + NULL, /* 0x06 */ 59 + NULL, /* 0x07 */ 60 + NULL, /* 0x08 */ 61 + "66 MHz PCI-X 266", /* 0x09 */ 62 + "100 MHz PCI-X 266", /* 0x0a */ 63 + "133 MHz PCI-X 266", /* 0x0b */ 64 + "Unknown AGP", /* 0x0c */ 65 + "1x AGP", /* 0x0d */ 66 + "2x AGP", /* 0x0e */ 67 + "4x AGP", /* 0x0f */ 68 + "8x AGP", /* 0x10 */ 69 + "66 MHz PCI-X 533", /* 0x11 */ 70 + "100 MHz PCI-X 533", /* 0x12 */ 71 + "133 MHz PCI-X 533", /* 0x13 */ 72 + "2.5 GT/s PCIe", /* 0x14 */ 73 + "5.0 GT/s PCIe", /* 0x15 */ 74 + "8.0 GT/s PCIe", /* 0x16 */ 75 + }; 76 + 77 + static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf) 78 + { 79 + const char *speed_string; 80 + 81 + if (speed < ARRAY_SIZE(pci_bus_speed_strings)) 82 + speed_string = pci_bus_speed_strings[speed]; 83 + else 84 + speed_string = "Unknown"; 85 + 86 + return sprintf(buf, "%s\n", speed_string); 87 + } 88 + 89 + static ssize_t max_speed_read_file(struct pci_slot *slot, char *buf) 90 + { 91 + return bus_speed_read(slot->bus->max_bus_speed, buf); 92 + } 93 + 94 + static ssize_t cur_speed_read_file(struct pci_slot *slot, char *buf) 95 + { 96 + return bus_speed_read(slot->bus->cur_bus_speed, buf); 97 + } 98 + 50 99 static void pci_slot_release(struct kobject *kobj) 51 100 { 52 101 struct pci_dev *dev; ··· 115 66 116 67 static struct pci_slot_attribute pci_slot_attr_address = 117 68 __ATTR(address, (S_IFREG | S_IRUGO), address_read_file, NULL); 69 + static struct pci_slot_attribute pci_slot_attr_max_speed = 70 + __ATTR(max_bus_speed, (S_IFREG | S_IRUGO), max_speed_read_file, NULL); 71 + static struct pci_slot_attribute pci_slot_attr_cur_speed = 72 + __ATTR(cur_bus_speed, (S_IFREG | S_IRUGO), cur_speed_read_file, NULL); 118 73 119 74 static struct attribute *pci_slot_default_attrs[] = { 120 75 &pci_slot_attr_address.attr, 76 + &pci_slot_attr_max_speed.attr, 77 + &pci_slot_attr_cur_speed.attr, 121 78 NULL, 122 79 }; 123 80
+7 -6
drivers/pcmcia/rsrc_mgr.c
··· 114 114 unsigned long offset; 115 115 }; 116 116 117 - static void pcmcia_align(void *align_data, struct resource *res, 118 - unsigned long size, unsigned long align) 117 + static resource_size_t pcmcia_align(void *align_data, 118 + const struct resource *res, 119 + resource_size_t size, resource_size_t align) 119 120 { 120 121 struct pcmcia_align_data *data = align_data; 121 - unsigned long start; 122 + resource_size_t start; 122 123 123 124 start = (res->start & ~data->mask) + data->offset; 124 125 if (start < res->start) 125 126 start += data->mask + 1; 126 - res->start = start; 127 127 128 128 #ifdef CONFIG_X86 129 129 if (res->flags & IORESOURCE_IO) { 130 130 if (start & 0x300) { 131 131 start = (start + 0x3ff) & ~0x3ff; 132 - res->start = start; 133 132 } 134 133 } 135 134 #endif ··· 136 137 #ifdef CONFIG_M68K 137 138 if (res->flags & IORESOURCE_IO) { 138 139 if ((res->start + size - 1) >= 1024) 139 - res->start = res->end; 140 + start = res->end; 140 141 } 141 142 #endif 143 + 144 + return start; 142 145 } 143 146 144 147
+13 -12
drivers/pcmcia/rsrc_nonstatic.c
··· 533 533 struct resource_map *map; 534 534 }; 535 535 536 - static void 537 - pcmcia_common_align(void *align_data, struct resource *res, 536 + static resource_size_t 537 + pcmcia_common_align(void *align_data, const struct resource *res, 538 538 resource_size_t size, resource_size_t align) 539 539 { 540 540 struct pcmcia_align_data *data = align_data; ··· 545 545 start = (res->start & ~data->mask) + data->offset; 546 546 if (start < res->start) 547 547 start += data->mask + 1; 548 - res->start = start; 548 + return start; 549 549 } 550 550 551 - static void 552 - pcmcia_align(void *align_data, struct resource *res, resource_size_t size, 553 - resource_size_t align) 551 + static resource_size_t 552 + pcmcia_align(void *align_data, const struct resource *res, 553 + resource_size_t size, resource_size_t align) 554 554 { 555 555 struct pcmcia_align_data *data = align_data; 556 556 struct resource_map *m; 557 + resource_size_t start; 557 558 558 - pcmcia_common_align(data, res, size, align); 559 + start = pcmcia_common_align(data, res, size, align); 559 560 560 561 for (m = data->map->next; m != data->map; m = m->next) { 561 562 unsigned long start = m->base; ··· 568 567 * fit here. 569 568 */ 570 569 if (res->start < start) { 571 - res->start = start; 572 - pcmcia_common_align(data, res, size, align); 570 + start = pcmcia_common_align(data, res, size, align); 573 571 } 574 572 575 573 /* ··· 586 586 * If we failed to find something suitable, ensure we fail. 587 587 */ 588 588 if (m == data->map) 589 - res->start = res->end; 589 + start = res->end; 590 + 591 + return start; 590 592 } 591 593 592 594 /* ··· 803 801 return -EINVAL; 804 802 #endif 805 803 806 - for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 807 - res = s->cb_dev->bus->resource[i]; 804 + pci_bus_for_each_resource(s->cb_dev->bus, res, i) { 808 805 if (!res) 809 806 continue; 810 807
+3 -2
drivers/pcmcia/yenta_socket.c
··· 649 649 static int yenta_search_res(struct yenta_socket *socket, struct resource *res, 650 650 u32 min) 651 651 { 652 + struct resource *root; 652 653 int i; 653 - for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 654 - struct resource *root = socket->dev->bus->resource[i]; 654 + 655 + pci_bus_for_each_resource(socket->dev->bus, root, i) { 655 656 if (!root) 656 657 continue; 657 658
+6
include/acpi/acpi_bus.h
··· 242 242 struct acpi_device_wakeup_flags { 243 243 u8 valid:1; /* Can successfully enable wakeup? */ 244 244 u8 run_wake:1; /* Run-Wake GPE devices */ 245 + u8 always_enabled:1; /* Run-wake devices that are always enabled */ 246 + u8 notifier_present:1; /* Wake-up notify handler has been installed */ 245 247 }; 246 248 247 249 struct acpi_device_wakeup_state { ··· 258 256 struct acpi_device_wakeup_state state; 259 257 struct acpi_device_wakeup_flags flags; 260 258 int prepare_count; 259 + int run_wake_count; 261 260 }; 262 261 263 262 /* Device */ ··· 388 385 acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int); 389 386 struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); 390 387 #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)((dev)->archdata.acpi_handle)) 388 + 389 + int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state); 390 + int acpi_disable_wakeup_device_power(struct acpi_device *dev); 391 391 392 392 #ifdef CONFIG_PM_SLEEP 393 393 int acpi_pm_device_sleep_state(struct device *, int *);
+1
include/acpi/acpi_drivers.h
··· 104 104 105 105 struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain, 106 106 int bus); 107 + void pci_acpi_crs_quirks(void); 107 108 108 109 /* -------------------------------------------------------------------------- 109 110 Processor
+3 -3
include/acpi/acpixf.h
··· 281 281 /* 282 282 * GPE Interfaces 283 283 */ 284 - acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type); 284 + acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action); 285 285 286 - acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number); 286 + acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type); 287 287 288 - acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number); 288 + acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type); 289 289 290 290 acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags); 291 291
+10 -18
include/acpi/actypes.h
··· 668 668 669 669 /* 670 670 * GPE info flags - Per GPE 671 - * +-+-+-+---+---+-+ 672 - * |7|6|5|4:3|2:1|0| 673 - * +-+-+-+---+---+-+ 674 - * | | | | | | 675 - * | | | | | +--- Interrupt type: Edge or Level Triggered 676 - * | | | | +--- Type: Wake-only, Runtime-only, or wake/runtime 671 + * +-+-+-+---+-+-+-+ 672 + * |7|6|5|4:3|2|1|0| 673 + * +-+-+-+---+-+-+-+ 674 + * | | | | | | | 675 + * | | | | | | +--- Interrupt type: Edge or Level Triggered 676 + * | | | | | +--- GPE can wake the system 677 + * | | | | +--- Unused 677 678 * | | | +--- Type of dispatch -- to method, handler, or none 678 - * | | +--- Enabled for runtime? 679 - * | +--- Enabled for wake? 679 + * | | +--- Unused 680 + * | +--- Unused 680 681 * +--- Unused 681 682 */ 682 683 #define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01 ··· 688 687 #define ACPI_GPE_TYPE_WAKE_RUN (u8) 0x06 689 688 #define ACPI_GPE_TYPE_WAKE (u8) 0x02 690 689 #define ACPI_GPE_TYPE_RUNTIME (u8) 0x04 /* Default */ 690 + #define ACPI_GPE_CAN_WAKE (u8) 0x02 691 691 692 692 #define ACPI_GPE_DISPATCH_MASK (u8) 0x18 693 693 #define ACPI_GPE_DISPATCH_HANDLER (u8) 0x08 694 694 #define ACPI_GPE_DISPATCH_METHOD (u8) 0x10 695 695 #define ACPI_GPE_DISPATCH_NOT_USED (u8) 0x00 /* Default */ 696 - 697 - #define ACPI_GPE_RUN_ENABLE_MASK (u8) 0x20 698 - #define ACPI_GPE_RUN_ENABLED (u8) 0x20 699 - #define ACPI_GPE_RUN_DISABLED (u8) 0x00 /* Default */ 700 - 701 - #define ACPI_GPE_WAKE_ENABLE_MASK (u8) 0x40 702 - #define ACPI_GPE_WAKE_ENABLED (u8) 0x40 703 - #define ACPI_GPE_WAKE_DISABLED (u8) 0x00 /* Default */ 704 - 705 - #define ACPI_GPE_ENABLE_MASK (u8) 0x60 /* Both run/wake */ 706 696 707 697 /* 708 698 * Flags for GPE and Lock interfaces
+5 -2
include/linux/ioport.h
··· 112 112 113 113 extern int request_resource(struct resource *root, struct resource *new); 114 114 extern int release_resource(struct resource *new); 115 + void release_child_resources(struct resource *new); 115 116 extern void reserve_region_with_split(struct resource *root, 116 117 resource_size_t start, resource_size_t end, 117 118 const char *name); ··· 121 120 extern int allocate_resource(struct resource *root, struct resource *new, 122 121 resource_size_t size, resource_size_t min, 123 122 resource_size_t max, resource_size_t align, 124 - void (*alignf)(void *, struct resource *, 125 - resource_size_t, resource_size_t), 123 + resource_size_t (*alignf)(void *, 124 + const struct resource *, 125 + resource_size_t, 126 + resource_size_t), 126 127 void *alignf_data); 127 128 int adjust_resource(struct resource *res, resource_size_t start, 128 129 resource_size_t size);
+7
include/linux/pci-acpi.h
··· 11 11 #include <linux/acpi.h> 12 12 13 13 #ifdef CONFIG_ACPI 14 + extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, 15 + struct pci_bus *pci_bus); 16 + extern acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev); 17 + extern acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, 18 + struct pci_dev *pci_dev); 19 + extern acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev); 20 + 14 21 static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) 15 22 { 16 23 struct pci_bus *pbus = pdev->bus;
+85 -24
include/linux/pci.h
··· 187 187 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2, 188 188 }; 189 189 190 + /* Based on the PCI Hotplug Spec, but some values are made up by us */ 191 + enum pci_bus_speed { 192 + PCI_SPEED_33MHz = 0x00, 193 + PCI_SPEED_66MHz = 0x01, 194 + PCI_SPEED_66MHz_PCIX = 0x02, 195 + PCI_SPEED_100MHz_PCIX = 0x03, 196 + PCI_SPEED_133MHz_PCIX = 0x04, 197 + PCI_SPEED_66MHz_PCIX_ECC = 0x05, 198 + PCI_SPEED_100MHz_PCIX_ECC = 0x06, 199 + PCI_SPEED_133MHz_PCIX_ECC = 0x07, 200 + PCI_SPEED_66MHz_PCIX_266 = 0x09, 201 + PCI_SPEED_100MHz_PCIX_266 = 0x0a, 202 + PCI_SPEED_133MHz_PCIX_266 = 0x0b, 203 + AGP_UNKNOWN = 0x0c, 204 + AGP_1X = 0x0d, 205 + AGP_2X = 0x0e, 206 + AGP_4X = 0x0f, 207 + AGP_8X = 0x10, 208 + PCI_SPEED_66MHz_PCIX_533 = 0x11, 209 + PCI_SPEED_100MHz_PCIX_533 = 0x12, 210 + PCI_SPEED_133MHz_PCIX_533 = 0x13, 211 + PCIE_SPEED_2_5GT = 0x14, 212 + PCIE_SPEED_5_0GT = 0x15, 213 + PCIE_SPEED_8_0GT = 0x16, 214 + PCI_SPEED_UNKNOWN = 0xff, 215 + }; 216 + 190 217 struct pci_cap_saved_state { 191 218 struct hlist_node next; 192 219 char cap_nr; ··· 266 239 configuration space */ 267 240 unsigned int pme_support:5; /* Bitmask of states from which PME# 268 241 can be generated */ 242 + unsigned int pme_interrupt:1; 269 243 unsigned int d1_support:1; /* Low power state D1 is supported */ 270 244 unsigned int d2_support:1; /* Low power state D2 is supported */ 271 245 unsigned int no_d1d2:1; /* Only allow D0 and D3 */ ··· 303 275 unsigned int msix_enabled:1; 304 276 unsigned int ari_enabled:1; /* ARI forwarding */ 305 277 unsigned int is_managed:1; 306 - unsigned int is_pcie:1; 278 + unsigned int is_pcie:1; /* Obsolete. Will be removed. 279 + Use pci_is_pcie() instead */ 307 280 unsigned int needs_freset:1; /* Dev requires fundamental reset */ 308 281 unsigned int state_saved:1; 309 282 unsigned int is_physfn:1; ··· 364 335 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space); 365 336 } 366 337 367 - #ifndef PCI_BUS_NUM_RESOURCES 368 - #define PCI_BUS_NUM_RESOURCES 16 369 - #endif 338 + /* 339 + * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond 340 + * to P2P or CardBus bridge windows) go in a table. Additional ones (for 341 + * buses below host bridges or subtractive decode bridges) go in the list. 342 + * Use pci_bus_for_each_resource() to iterate through all the resources. 343 + */ 344 + 345 + /* 346 + * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly 347 + * and there's no way to program the bridge with the details of the window. 348 + * This does not apply to ACPI _CRS windows, even with the _DEC subtractive- 349 + * decode bit set, because they are explicit and can be programmed with _SRS. 350 + */ 351 + #define PCI_SUBTRACTIVE_DECODE 0x1 352 + 353 + struct pci_bus_resource { 354 + struct list_head list; 355 + struct resource *res; 356 + unsigned int flags; 357 + }; 370 358 371 359 #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ 372 360 ··· 394 348 struct list_head devices; /* list of devices on this bus */ 395 349 struct pci_dev *self; /* bridge device as seen by parent */ 396 350 struct list_head slots; /* list of slots on this bus */ 397 - struct resource *resource[PCI_BUS_NUM_RESOURCES]; 398 - /* address space routed to this bus */ 351 + struct resource *resource[PCI_BRIDGE_RESOURCE_NUM]; 352 + struct list_head resources; /* address space routed to this bus */ 399 353 400 354 struct pci_ops *ops; /* configuration access functions */ 401 355 void *sysdata; /* hook for sys-specific extension */ ··· 405 359 unsigned char primary; /* number of primary bridge */ 406 360 unsigned char secondary; /* number of secondary bridge */ 407 361 unsigned char subordinate; /* max number of subordinate buses */ 362 + unsigned char max_bus_speed; /* enum pci_bus_speed */ 363 + unsigned char cur_bus_speed; /* enum pci_bus_speed */ 408 364 409 365 char name[48]; 410 366 ··· 611 563 char *pcibios_setup(char *str); 612 564 613 565 /* Used only when drivers/pci/setup.c is used */ 614 - void pcibios_align_resource(void *, struct resource *, resource_size_t, 566 + resource_size_t pcibios_align_resource(void *, const struct resource *, 567 + resource_size_t, 615 568 resource_size_t); 616 569 void pcibios_update_irq(struct pci_dev *, int irq); 617 570 ··· 638 589 struct pci_ops *ops, void *sysdata); 639 590 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, 640 591 int busnr); 592 + void pcie_update_link_speed(struct pci_bus *bus, u16 link_status); 641 593 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, 642 594 const char *name, 643 595 struct hotplug_slot *hotplug); ··· 664 614 extern void pci_sort_breadthfirst(void); 665 615 666 616 /* Generic PCI functions exported to card drivers */ 667 - 668 - #ifdef CONFIG_PCI_LEGACY 669 - struct pci_dev __deprecated *pci_find_device(unsigned int vendor, 670 - unsigned int device, 671 - struct pci_dev *from); 672 - #endif /* CONFIG_PCI_LEGACY */ 673 617 674 618 enum pci_lost_interrupt_reason { 675 619 PCI_LOST_IRQ_NO_INFORMATION = 0, ··· 794 750 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); 795 751 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); 796 752 void pci_pme_active(struct pci_dev *dev, bool enable); 797 - int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable); 753 + int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, 754 + bool runtime, bool enable); 798 755 int pci_wake_from_d3(struct pci_dev *dev, bool enable); 799 756 pci_power_t pci_target_state(struct pci_dev *dev); 800 757 int pci_prepare_to_sleep(struct pci_dev *dev); 801 758 int pci_back_from_sleep(struct pci_dev *dev); 759 + bool pci_dev_run_wake(struct pci_dev *dev); 760 + 761 + static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, 762 + bool enable) 763 + { 764 + return __pci_enable_wake(dev, state, false, enable); 765 + } 802 766 803 767 /* For use by arch with custom probe code */ 804 768 void set_pcie_port_type(struct pci_dev *pdev); ··· 828 776 void pci_bus_size_bridges(struct pci_bus *bus); 829 777 int pci_claim_resource(struct pci_dev *, int); 830 778 void pci_assign_unassigned_resources(void); 779 + void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge); 831 780 void pdev_enable_device(struct pci_dev *); 832 781 void pdev_sort_resources(struct pci_dev *, struct resource_list *); 833 782 int pci_enable_resources(struct pci_dev *, int mask); ··· 846 793 void pci_release_selected_regions(struct pci_dev *, int); 847 794 848 795 /* drivers/pci/bus.c */ 796 + void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, unsigned int flags); 797 + struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n); 798 + void pci_bus_remove_resources(struct pci_bus *bus); 799 + 800 + #define pci_bus_for_each_resource(bus, res, i) \ 801 + for (i = 0; \ 802 + (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \ 803 + i++) 804 + 849 805 int __must_check pci_bus_alloc_resource(struct pci_bus *bus, 850 806 struct resource *res, resource_size_t size, 851 807 resource_size_t align, resource_size_t min, 852 808 unsigned int type_mask, 853 - void (*alignf)(void *, struct resource *, 854 - resource_size_t, resource_size_t), 809 + resource_size_t (*alignf)(void *, 810 + const struct resource *, 811 + resource_size_t, 812 + resource_size_t), 855 813 void *alignf_data); 856 814 void pci_enable_bridges(struct pci_bus *bus); 857 815 ··· 1040 976 _PCI_NOP(o, dword, u32 x) 1041 977 _PCI_NOP_ALL(read, *) 1042 978 _PCI_NOP_ALL(write,) 1043 - 1044 - static inline struct pci_dev *pci_find_device(unsigned int vendor, 1045 - unsigned int device, 1046 - struct pci_dev *from) 1047 - { 1048 - return NULL; 1049 - } 1050 979 1051 980 static inline struct pci_dev *pci_get_device(unsigned int vendor, 1052 981 unsigned int device, ··· 1298 1241 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1299 1242 suspend##vendor##device##hook, vendor, device, hook) 1300 1243 1301 - 1244 + #ifdef CONFIG_PCI_QUIRKS 1302 1245 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); 1246 + #else 1247 + static inline void pci_fixup_device(enum pci_fixup_pass pass, 1248 + struct pci_dev *dev) {} 1249 + #endif 1303 1250 1304 1251 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); 1305 1252 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
+2 -39
include/linux/pci_hotplug.h
··· 28 28 #ifndef _PCI_HOTPLUG_H 29 29 #define _PCI_HOTPLUG_H 30 30 31 - 32 - /* These values come from the PCI Hotplug Spec */ 33 - enum pci_bus_speed { 34 - PCI_SPEED_33MHz = 0x00, 35 - PCI_SPEED_66MHz = 0x01, 36 - PCI_SPEED_66MHz_PCIX = 0x02, 37 - PCI_SPEED_100MHz_PCIX = 0x03, 38 - PCI_SPEED_133MHz_PCIX = 0x04, 39 - PCI_SPEED_66MHz_PCIX_ECC = 0x05, 40 - PCI_SPEED_100MHz_PCIX_ECC = 0x06, 41 - PCI_SPEED_133MHz_PCIX_ECC = 0x07, 42 - PCI_SPEED_66MHz_PCIX_266 = 0x09, 43 - PCI_SPEED_100MHz_PCIX_266 = 0x0a, 44 - PCI_SPEED_133MHz_PCIX_266 = 0x0b, 45 - PCI_SPEED_66MHz_PCIX_533 = 0x11, 46 - PCI_SPEED_100MHz_PCIX_533 = 0x12, 47 - PCI_SPEED_133MHz_PCIX_533 = 0x13, 48 - PCI_SPEED_UNKNOWN = 0xff, 49 - }; 50 - 51 31 /* These values come from the PCI Express Spec */ 52 32 enum pcie_link_width { 53 33 PCIE_LNK_WIDTH_RESRV = 0x00, ··· 39 59 PCIE_LNK_X16 = 0x10, 40 60 PCIE_LNK_X32 = 0x20, 41 61 PCIE_LNK_WIDTH_UNKNOWN = 0xFF, 42 - }; 43 - 44 - enum pcie_link_speed { 45 - PCIE_2_5GB = 0x14, 46 - PCIE_5_0GB = 0x15, 47 - PCIE_LNK_SPEED_UNKNOWN = 0xFF, 48 62 }; 49 63 50 64 /** ··· 63 89 * @get_adapter_status: Called to get see if an adapter is present in the slot or not. 64 90 * If this field is NULL, the value passed in the struct hotplug_slot_info 65 91 * will be used when this value is requested by a user. 66 - * @get_max_bus_speed: Called to get the max bus speed for a slot. 67 - * If this field is NULL, the value passed in the struct hotplug_slot_info 68 - * will be used when this value is requested by a user. 69 - * @get_cur_bus_speed: Called to get the current bus speed for a slot. 70 - * If this field is NULL, the value passed in the struct hotplug_slot_info 71 - * will be used when this value is requested by a user. 72 92 * 73 93 * The table of function pointers that is passed to the hotplug pci core by a 74 94 * hotplug pci driver. These functions are called by the hotplug pci core when ··· 80 112 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value); 81 113 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value); 82 114 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value); 83 - int (*get_max_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value); 84 - int (*get_cur_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value); 85 115 }; 86 116 87 117 /** 88 118 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot 89 - * @power: if power is enabled or not (1/0) 119 + * @power_status: if power is enabled or not (1/0) 90 120 * @attention_status: if the attention light is enabled or not (1/0) 91 121 * @latch_status: if the latch (if any) is open or closed (1/0) 92 - * @adapter_present: if there is a pci board present in the slot or not (1/0) 93 - * @address: (domain << 16 | bus << 8 | dev) 122 + * @adapter_status: if there is a pci board present in the slot or not (1/0) 94 123 * 95 124 * Used to notify the hotplug pci core of the status of a specific slot. 96 125 */ ··· 96 131 u8 attention_status; 97 132 u8 latch_status; 98 133 u8 adapter_status; 99 - enum pci_bus_speed max_bus_speed; 100 - enum pci_bus_speed cur_bus_speed; 101 134 }; 102 135 103 136 /**
+3
include/linux/pci_ids.h
··· 2417 2417 #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 2418 2418 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 2419 2419 #define PCI_DEVICE_ID_INTEL_IOAT 0x1a38 2420 + #define PCI_DEVICE_ID_INTEL_CPT_SMBUS 0x1c22 2421 + #define PCI_DEVICE_ID_INTEL_CPT_LPC1 0x1c42 2422 + #define PCI_DEVICE_ID_INTEL_CPT_LPC2 0x1c43 2420 2423 #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410 2421 2424 #define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411 2422 2425 #define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
+5
kernel/power/Kconfig
··· 222 222 and the bus type drivers of the buses the devices are on are 223 223 responsible for the actual handling of the autosuspend requests and 224 224 wake-up events. 225 + 226 + config PM_OPS 227 + bool 228 + depends on PM_SLEEP || PM_RUNTIME 229 + default y
+39 -5
kernel/resource.c
··· 188 188 return -EINVAL; 189 189 } 190 190 191 + static void __release_child_resources(struct resource *r) 192 + { 193 + struct resource *tmp, *p; 194 + resource_size_t size; 195 + 196 + p = r->child; 197 + r->child = NULL; 198 + while (p) { 199 + tmp = p; 200 + p = p->sibling; 201 + 202 + tmp->parent = NULL; 203 + tmp->sibling = NULL; 204 + __release_child_resources(tmp); 205 + 206 + printk(KERN_DEBUG "release child resource %pR\n", tmp); 207 + /* need to restore size, and keep flags */ 208 + size = resource_size(tmp); 209 + tmp->start = 0; 210 + tmp->end = size - 1; 211 + } 212 + } 213 + 214 + void release_child_resources(struct resource *r) 215 + { 216 + write_lock(&resource_lock); 217 + __release_child_resources(r); 218 + write_unlock(&resource_lock); 219 + } 220 + 191 221 /** 192 222 * request_resource - request and reserve an I/O or memory resource 193 223 * @root: root resource descriptor ··· 333 303 static int find_resource(struct resource *root, struct resource *new, 334 304 resource_size_t size, resource_size_t min, 335 305 resource_size_t max, resource_size_t align, 336 - void (*alignf)(void *, struct resource *, 337 - resource_size_t, resource_size_t), 306 + resource_size_t (*alignf)(void *, 307 + const struct resource *, 308 + resource_size_t, 309 + resource_size_t), 338 310 void *alignf_data) 339 311 { 340 312 struct resource *this = root->child; ··· 362 330 tmp.end = max; 363 331 tmp.start = ALIGN(tmp.start, align); 364 332 if (alignf) 365 - alignf(alignf_data, &tmp, size, align); 333 + tmp.start = alignf(alignf_data, &tmp, size, align); 366 334 if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) { 367 335 new->start = tmp.start; 368 336 new->end = tmp.start + size - 1; ··· 390 358 int allocate_resource(struct resource *root, struct resource *new, 391 359 resource_size_t size, resource_size_t min, 392 360 resource_size_t max, resource_size_t align, 393 - void (*alignf)(void *, struct resource *, 394 - resource_size_t, resource_size_t), 361 + resource_size_t (*alignf)(void *, 362 + const struct resource *, 363 + resource_size_t, 364 + resource_size_t), 395 365 void *alignf_data) 396 366 { 397 367 int err;