Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'next-rebase' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci

* 'next-rebase' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci:
PCI: Clean-up MPS debug output
pci: Clamp pcie_set_readrq() when using "performance" settings
PCI: enable MPS "performance" setting to properly handle bridge MPS
PCI: Workaround for Intel MPS errata
PCI: Add support for PASID capability
PCI: Add implementation for PRI capability
PCI: Export ATS functions to modules
PCI: Move ATS implementation into own file
PCI / PM: Remove unnecessary error variable from acpi_dev_run_wake()
PCI hotplug: acpiphp: Prevent deadlock on PCI-to-PCI bridge remove
PCI / PM: Extend PME polling to all PCI devices
PCI quirk: mmc: Always check for lower base frequency quirk for Ricoh 1180:e823
PCI: Make pci_setup_bridge() non-static for use by arch code
x86: constify PCI raw ops structures
PCI: Add quirk for known incorrect MPSS
PCI: Add Solarflare vendor ID and SFC4000 device IDs

+868 -260
+3 -3
arch/x86/include/asm/pci_x86.h
··· 99 99 int reg, int len, u32 val); 100 100 }; 101 101 102 - extern struct pci_raw_ops *raw_pci_ops; 103 - extern struct pci_raw_ops *raw_pci_ext_ops; 102 + extern const struct pci_raw_ops *raw_pci_ops; 103 + extern const struct pci_raw_ops *raw_pci_ext_ops; 104 104 105 - extern struct pci_raw_ops pci_direct_conf1; 105 + extern const struct pci_raw_ops pci_direct_conf1; 106 106 extern bool port_cf9_safe; 107 107 108 108 /* arch_initcall level */
+1 -1
arch/x86/pci/ce4100.c
··· 304 304 return pci_direct_conf1.write(seg, bus, devfn, reg, len, value); 305 305 } 306 306 307 - struct pci_raw_ops ce4100_pci_conf = { 307 + static const struct pci_raw_ops ce4100_pci_conf = { 308 308 .read = ce4100_conf_read, 309 309 .write = ce4100_conf_write, 310 310 };
+2 -2
arch/x86/pci/common.c
··· 33 33 int pcibios_last_bus = -1; 34 34 unsigned long pirq_table_addr; 35 35 struct pci_bus *pci_root_bus; 36 - struct pci_raw_ops *raw_pci_ops; 37 - struct pci_raw_ops *raw_pci_ext_ops; 36 + const struct pci_raw_ops *__read_mostly raw_pci_ops; 37 + const struct pci_raw_ops *__read_mostly raw_pci_ext_ops; 38 38 39 39 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, 40 40 int reg, int len, u32 *val)
+3 -3
arch/x86/pci/direct.c
··· 79 79 80 80 #undef PCI_CONF1_ADDRESS 81 81 82 - struct pci_raw_ops pci_direct_conf1 = { 82 + const struct pci_raw_ops pci_direct_conf1 = { 83 83 .read = pci_conf1_read, 84 84 .write = pci_conf1_write, 85 85 }; ··· 175 175 176 176 #undef PCI_CONF2_ADDRESS 177 177 178 - struct pci_raw_ops pci_direct_conf2 = { 178 + static const struct pci_raw_ops pci_direct_conf2 = { 179 179 .read = pci_conf2_read, 180 180 .write = pci_conf2_write, 181 181 }; ··· 191 191 * This should be close to trivial, but it isn't, because there are buggy 192 192 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID. 193 193 */ 194 - static int __init pci_sanity_check(struct pci_raw_ops *o) 194 + static int __init pci_sanity_check(const struct pci_raw_ops *o) 195 195 { 196 196 u32 x = 0; 197 197 int year, devfn;
+1 -1
arch/x86/pci/mmconfig_32.c
··· 117 117 return 0; 118 118 } 119 119 120 - static struct pci_raw_ops pci_mmcfg = { 120 + static const struct pci_raw_ops pci_mmcfg = { 121 121 .read = pci_mmcfg_read, 122 122 .write = pci_mmcfg_write, 123 123 };
+1 -1
arch/x86/pci/mmconfig_64.c
··· 81 81 return 0; 82 82 } 83 83 84 - static struct pci_raw_ops pci_mmcfg = { 84 + static const struct pci_raw_ops pci_mmcfg = { 85 85 .read = pci_mmcfg_read, 86 86 .write = pci_mmcfg_write, 87 87 };
+1 -1
arch/x86/pci/numaq_32.c
··· 110 110 111 111 #undef PCI_CONF1_MQ_ADDRESS 112 112 113 - static struct pci_raw_ops pci_direct_conf1_mq = { 113 + static const struct pci_raw_ops pci_direct_conf1_mq = { 114 114 .read = pci_conf1_mq_read, 115 115 .write = pci_conf1_mq_write 116 116 };
+1 -1
arch/x86/pci/olpc.c
··· 301 301 return 0; 302 302 } 303 303 304 - static struct pci_raw_ops pci_olpc_conf = { 304 + static const struct pci_raw_ops pci_olpc_conf = { 305 305 .read = pci_olpc_read, 306 306 .write = pci_olpc_write, 307 307 };
+2 -2
arch/x86/pci/pcbios.c
··· 303 303 * Function table for BIOS32 access 304 304 */ 305 305 306 - static struct pci_raw_ops pci_bios_access = { 306 + static const struct pci_raw_ops pci_bios_access = { 307 307 .read = pci_bios_read, 308 308 .write = pci_bios_write 309 309 }; ··· 312 312 * Try to find PCI BIOS. 313 313 */ 314 314 315 - static struct pci_raw_ops * __devinit pci_find_bios(void) 315 + static const struct pci_raw_ops * __devinit pci_find_bios(void) 316 316 { 317 317 union bios32 *check; 318 318 unsigned char sum;
+2 -1
drivers/acpi/osl.c
··· 80 80 static void *acpi_irq_context; 81 81 static struct workqueue_struct *kacpid_wq; 82 82 static struct workqueue_struct *kacpi_notify_wq; 83 - static struct workqueue_struct *kacpi_hotplug_wq; 83 + struct workqueue_struct *kacpi_hotplug_wq; 84 + EXPORT_SYMBOL(kacpi_hotplug_wq); 84 85 85 86 struct acpi_res_list { 86 87 resource_size_t start;
+6 -4
drivers/net/ethernet/sfc/efx.c
··· 2229 2229 2230 2230 /* PCI device ID table */ 2231 2231 static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { 2232 - {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), 2232 + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 2233 + PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0), 2233 2234 .driver_data = (unsigned long) &falcon_a1_nic_type}, 2234 - {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), 2235 + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 2236 + PCI_DEVICE_ID_SOLARFLARE_SFC4000B), 2235 2237 .driver_data = (unsigned long) &falcon_b0_nic_type}, 2236 - {PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID), 2238 + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, BETHPAGE_A_P_DEVID), 2237 2239 .driver_data = (unsigned long) &siena_a0_nic_type}, 2238 - {PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID), 2240 + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, SIENA_A_P_DEVID), 2239 2241 .driver_data = (unsigned long) &siena_a0_nic_type}, 2240 2242 {0} /* end of list */ 2241 2243 };
-4
drivers/net/ethernet/sfc/efx.h
··· 15 15 #include "filter.h" 16 16 17 17 /* PCI IDs */ 18 - #define EFX_VENDID_SFC 0x1924 19 - #define FALCON_A_P_DEVID 0x0703 20 - #define FALCON_A_S_DEVID 0x6703 21 - #define FALCON_B_P_DEVID 0x0710 22 18 #define BETHPAGE_A_P_DEVID 0x0803 23 19 #define SIENA_A_P_DEVID 0x0813 24 20
+2 -1
drivers/net/ethernet/sfc/falcon.c
··· 1426 1426 } 1427 1427 1428 1428 dev = pci_dev_get(efx->pci_dev); 1429 - while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID, 1429 + while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE, 1430 + PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, 1430 1431 dev))) { 1431 1432 if (dev->bus == efx->pci_dev->bus && 1432 1433 dev->devfn == efx->pci_dev->devfn + 1) {
+2 -1
drivers/net/ethernet/sfc/falcon_boards.c
··· 764 764 765 765 if (board->type) { 766 766 netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n", 767 - (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) 767 + (efx->pci_dev->subsystem_vendor == 768 + PCI_VENDOR_ID_SOLARFLARE) 768 769 ? board->type->ref_model : board->type->gen_type, 769 770 'A' + board->major, board->minor); 770 771 return 0;
+26
drivers/pci/Kconfig
··· 71 71 72 72 If unsure say Y. 73 73 74 + config PCI_ATS 75 + bool 76 + 74 77 config PCI_IOV 75 78 bool "PCI IOV support" 76 79 depends on PCI 80 + select PCI_ATS 77 81 help 78 82 I/O Virtualization is a PCI feature supported by some devices 79 83 which allows them to create virtual devices which share their 80 84 physical resources. 85 + 86 + If unsure, say N. 87 + 88 + config PCI_PRI 89 + bool "PCI PRI support" 90 + select PCI_ATS 91 + help 92 + PRI is the PCI Page Request Interface. It allows PCI devices that are 93 + behind an IOMMU to recover from page faults. 94 + 95 + If unsure, say N. 96 + 97 + config PCI_PASID 98 + bool "PCI PASID support" 99 + depends on PCI 100 + select PCI_ATS 101 + help 102 + Process Address Space Identifiers (PASIDs) can be used by PCI devices 103 + to access more than one IO address space at the same time. To make 104 + use of this feature an IOMMU is required which also supports PASIDs. 105 + Select this option if you have such an IOMMU and want to compile the 106 + driver for it into your kernel. 81 107 82 108 If unsure, say N. 83 109
+1
drivers/pci/Makefile
··· 29 29 # Build the Hypertransport interrupt support 30 30 obj-$(CONFIG_HT_IRQ) += htirq.o 31 31 32 + obj-$(CONFIG_PCI_ATS) += ats.o 32 33 obj-$(CONFIG_PCI_IOV) += iov.o 33 34 34 35 #
+438
drivers/pci/ats.c
··· 1 + /* 2 + * drivers/pci/ats.c 3 + * 4 + * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com> 5 + * Copyright (C) 2011 Advanced Micro Devices, 6 + * 7 + * PCI Express I/O Virtualization (IOV) support. 8 + * Address Translation Service 1.0 9 + * Page Request Interface added by Joerg Roedel <joerg.roedel@amd.com> 10 + * PASID support added by Joerg Roedel <joerg.roedel@amd.com> 11 + */ 12 + 13 + #include <linux/pci-ats.h> 14 + #include <linux/pci.h> 15 + 16 + #include "pci.h" 17 + 18 + static int ats_alloc_one(struct pci_dev *dev, int ps) 19 + { 20 + int pos; 21 + u16 cap; 22 + struct pci_ats *ats; 23 + 24 + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS); 25 + if (!pos) 26 + return -ENODEV; 27 + 28 + ats = kzalloc(sizeof(*ats), GFP_KERNEL); 29 + if (!ats) 30 + return -ENOMEM; 31 + 32 + ats->pos = pos; 33 + ats->stu = ps; 34 + pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap); 35 + ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : 36 + PCI_ATS_MAX_QDEP; 37 + dev->ats = ats; 38 + 39 + return 0; 40 + } 41 + 42 + static void ats_free_one(struct pci_dev *dev) 43 + { 44 + kfree(dev->ats); 45 + dev->ats = NULL; 46 + } 47 + 48 + /** 49 + * pci_enable_ats - enable the ATS capability 50 + * @dev: the PCI device 51 + * @ps: the IOMMU page shift 52 + * 53 + * Returns 0 on success, or negative on failure. 54 + */ 55 + int pci_enable_ats(struct pci_dev *dev, int ps) 56 + { 57 + int rc; 58 + u16 ctrl; 59 + 60 + BUG_ON(dev->ats && dev->ats->is_enabled); 61 + 62 + if (ps < PCI_ATS_MIN_STU) 63 + return -EINVAL; 64 + 65 + if (dev->is_physfn || dev->is_virtfn) { 66 + struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn; 67 + 68 + mutex_lock(&pdev->sriov->lock); 69 + if (pdev->ats) 70 + rc = pdev->ats->stu == ps ? 0 : -EINVAL; 71 + else 72 + rc = ats_alloc_one(pdev, ps); 73 + 74 + if (!rc) 75 + pdev->ats->ref_cnt++; 76 + mutex_unlock(&pdev->sriov->lock); 77 + if (rc) 78 + return rc; 79 + } 80 + 81 + if (!dev->is_physfn) { 82 + rc = ats_alloc_one(dev, ps); 83 + if (rc) 84 + return rc; 85 + } 86 + 87 + ctrl = PCI_ATS_CTRL_ENABLE; 88 + if (!dev->is_virtfn) 89 + ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU); 90 + pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl); 91 + 92 + dev->ats->is_enabled = 1; 93 + 94 + return 0; 95 + } 96 + EXPORT_SYMBOL_GPL(pci_enable_ats); 97 + 98 + /** 99 + * pci_disable_ats - disable the ATS capability 100 + * @dev: the PCI device 101 + */ 102 + void pci_disable_ats(struct pci_dev *dev) 103 + { 104 + u16 ctrl; 105 + 106 + BUG_ON(!dev->ats || !dev->ats->is_enabled); 107 + 108 + pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl); 109 + ctrl &= ~PCI_ATS_CTRL_ENABLE; 110 + pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl); 111 + 112 + dev->ats->is_enabled = 0; 113 + 114 + if (dev->is_physfn || dev->is_virtfn) { 115 + struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn; 116 + 117 + mutex_lock(&pdev->sriov->lock); 118 + pdev->ats->ref_cnt--; 119 + if (!pdev->ats->ref_cnt) 120 + ats_free_one(pdev); 121 + mutex_unlock(&pdev->sriov->lock); 122 + } 123 + 124 + if (!dev->is_physfn) 125 + ats_free_one(dev); 126 + } 127 + EXPORT_SYMBOL_GPL(pci_disable_ats); 128 + 129 + /** 130 + * pci_ats_queue_depth - query the ATS Invalidate Queue Depth 131 + * @dev: the PCI device 132 + * 133 + * Returns the queue depth on success, or negative on failure. 134 + * 135 + * The ATS spec uses 0 in the Invalidate Queue Depth field to 136 + * indicate that the function can accept 32 Invalidate Request. 137 + * But here we use the `real' values (i.e. 1~32) for the Queue 138 + * Depth; and 0 indicates the function shares the Queue with 139 + * other functions (doesn't exclusively own a Queue). 140 + */ 141 + int pci_ats_queue_depth(struct pci_dev *dev) 142 + { 143 + int pos; 144 + u16 cap; 145 + 146 + if (dev->is_virtfn) 147 + return 0; 148 + 149 + if (dev->ats) 150 + return dev->ats->qdep; 151 + 152 + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS); 153 + if (!pos) 154 + return -ENODEV; 155 + 156 + pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap); 157 + 158 + return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : 159 + PCI_ATS_MAX_QDEP; 160 + } 161 + EXPORT_SYMBOL_GPL(pci_ats_queue_depth); 162 + 163 + #ifdef CONFIG_PCI_PRI 164 + /** 165 + * pci_enable_pri - Enable PRI capability 166 + * @ pdev: PCI device structure 167 + * 168 + * Returns 0 on success, negative value on error 169 + */ 170 + int pci_enable_pri(struct pci_dev *pdev, u32 reqs) 171 + { 172 + u16 control, status; 173 + u32 max_requests; 174 + int pos; 175 + 176 + pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); 177 + if (!pos) 178 + return -EINVAL; 179 + 180 + pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); 181 + pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status); 182 + if ((control & PCI_PRI_ENABLE) || !(status & PCI_PRI_STATUS_STOPPED)) 183 + return -EBUSY; 184 + 185 + pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ_OFF, &max_requests); 186 + reqs = min(max_requests, reqs); 187 + pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ_OFF, reqs); 188 + 189 + control |= PCI_PRI_ENABLE; 190 + pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control); 191 + 192 + return 0; 193 + } 194 + EXPORT_SYMBOL_GPL(pci_enable_pri); 195 + 196 + /** 197 + * pci_disable_pri - Disable PRI capability 198 + * @pdev: PCI device structure 199 + * 200 + * Only clears the enabled-bit, regardless of its former value 201 + */ 202 + void pci_disable_pri(struct pci_dev *pdev) 203 + { 204 + u16 control; 205 + int pos; 206 + 207 + pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); 208 + if (!pos) 209 + return; 210 + 211 + pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); 212 + control &= ~PCI_PRI_ENABLE; 213 + pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control); 214 + } 215 + EXPORT_SYMBOL_GPL(pci_disable_pri); 216 + 217 + /** 218 + * pci_pri_enabled - Checks if PRI capability is enabled 219 + * @pdev: PCI device structure 220 + * 221 + * Returns true if PRI is enabled on the device, false otherwise 222 + */ 223 + bool pci_pri_enabled(struct pci_dev *pdev) 224 + { 225 + u16 control; 226 + int pos; 227 + 228 + pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); 229 + if (!pos) 230 + return false; 231 + 232 + pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); 233 + 234 + return (control & PCI_PRI_ENABLE) ? true : false; 235 + } 236 + EXPORT_SYMBOL_GPL(pci_pri_enabled); 237 + 238 + /** 239 + * pci_reset_pri - Resets device's PRI state 240 + * @pdev: PCI device structure 241 + * 242 + * The PRI capability must be disabled before this function is called. 243 + * Returns 0 on success, negative value on error. 244 + */ 245 + int pci_reset_pri(struct pci_dev *pdev) 246 + { 247 + u16 control; 248 + int pos; 249 + 250 + pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); 251 + if (!pos) 252 + return -EINVAL; 253 + 254 + pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); 255 + if (control & PCI_PRI_ENABLE) 256 + return -EBUSY; 257 + 258 + control |= PCI_PRI_RESET; 259 + 260 + pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control); 261 + 262 + return 0; 263 + } 264 + EXPORT_SYMBOL_GPL(pci_reset_pri); 265 + 266 + /** 267 + * pci_pri_stopped - Checks whether the PRI capability is stopped 268 + * @pdev: PCI device structure 269 + * 270 + * Returns true if the PRI capability on the device is disabled and the 271 + * device has no outstanding PRI requests, false otherwise. The device 272 + * indicates this via the STOPPED bit in the status register of the 273 + * capability. 274 + * The device internal state can be cleared by resetting the PRI state 275 + * with pci_reset_pri(). This can force the capability into the STOPPED 276 + * state. 277 + */ 278 + bool pci_pri_stopped(struct pci_dev *pdev) 279 + { 280 + u16 control, status; 281 + int pos; 282 + 283 + pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); 284 + if (!pos) 285 + return true; 286 + 287 + pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); 288 + pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status); 289 + 290 + if (control & PCI_PRI_ENABLE) 291 + return false; 292 + 293 + return (status & PCI_PRI_STATUS_STOPPED) ? true : false; 294 + } 295 + EXPORT_SYMBOL_GPL(pci_pri_stopped); 296 + 297 + /** 298 + * pci_pri_status - Request PRI status of a device 299 + * @pdev: PCI device structure 300 + * 301 + * Returns negative value on failure, status on success. The status can 302 + * be checked against status-bits. Supported bits are currently: 303 + * PCI_PRI_STATUS_RF: Response failure 304 + * PCI_PRI_STATUS_UPRGI: Unexpected Page Request Group Index 305 + * PCI_PRI_STATUS_STOPPED: PRI has stopped 306 + */ 307 + int pci_pri_status(struct pci_dev *pdev) 308 + { 309 + u16 status, control; 310 + int pos; 311 + 312 + pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); 313 + if (!pos) 314 + return -EINVAL; 315 + 316 + pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); 317 + pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status); 318 + 319 + /* Stopped bit is undefined when enable == 1, so clear it */ 320 + if (control & PCI_PRI_ENABLE) 321 + status &= ~PCI_PRI_STATUS_STOPPED; 322 + 323 + return status; 324 + } 325 + EXPORT_SYMBOL_GPL(pci_pri_status); 326 + #endif /* CONFIG_PCI_PRI */ 327 + 328 + #ifdef CONFIG_PCI_PASID 329 + /** 330 + * pci_enable_pasid - Enable the PASID capability 331 + * @pdev: PCI device structure 332 + * @features: Features to enable 333 + * 334 + * Returns 0 on success, negative value on error. This function checks 335 + * whether the features are actually supported by the device and returns 336 + * an error if not. 337 + */ 338 + int pci_enable_pasid(struct pci_dev *pdev, int features) 339 + { 340 + u16 control, supported; 341 + int pos; 342 + 343 + pos = pci_find_ext_capability(pdev, PCI_PASID_CAP); 344 + if (!pos) 345 + return -EINVAL; 346 + 347 + pci_read_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, &control); 348 + pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported); 349 + 350 + if (!(supported & PCI_PASID_ENABLE)) 351 + return -EINVAL; 352 + 353 + supported &= PCI_PASID_EXEC | PCI_PASID_PRIV; 354 + 355 + /* User wants to enable anything unsupported? */ 356 + if ((supported & features) != features) 357 + return -EINVAL; 358 + 359 + control = PCI_PASID_ENABLE | features; 360 + 361 + pci_write_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, control); 362 + 363 + return 0; 364 + } 365 + EXPORT_SYMBOL_GPL(pci_enable_pasid); 366 + 367 + /** 368 + * pci_disable_pasid - Disable the PASID capability 369 + * @pdev: PCI device structure 370 + * 371 + */ 372 + void pci_disable_pasid(struct pci_dev *pdev) 373 + { 374 + u16 control = 0; 375 + int pos; 376 + 377 + pos = pci_find_ext_capability(pdev, PCI_PASID_CAP); 378 + if (!pos) 379 + return; 380 + 381 + pci_write_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, control); 382 + } 383 + EXPORT_SYMBOL_GPL(pci_disable_pasid); 384 + 385 + /** 386 + * pci_pasid_features - Check which PASID features are supported 387 + * @pdev: PCI device structure 388 + * 389 + * Returns a negative value when no PASI capability is present. 390 + * Otherwise is returns a bitmask with supported features. Current 391 + * features reported are: 392 + * PCI_PASID_ENABLE - PASID capability can be enabled 393 + * PCI_PASID_EXEC - Execute permission supported 394 + * PCI_PASID_PRIV - Priviledged mode supported 395 + */ 396 + int pci_pasid_features(struct pci_dev *pdev) 397 + { 398 + u16 supported; 399 + int pos; 400 + 401 + pos = pci_find_ext_capability(pdev, PCI_PASID_CAP); 402 + if (!pos) 403 + return -EINVAL; 404 + 405 + pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported); 406 + 407 + supported &= PCI_PASID_ENABLE | PCI_PASID_EXEC | PCI_PASID_PRIV; 408 + 409 + return supported; 410 + } 411 + EXPORT_SYMBOL_GPL(pci_pasid_features); 412 + 413 + #define PASID_NUMBER_SHIFT 8 414 + #define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT) 415 + /** 416 + * pci_max_pasid - Get maximum number of PASIDs supported by device 417 + * @pdev: PCI device structure 418 + * 419 + * Returns negative value when PASID capability is not present. 420 + * Otherwise it returns the numer of supported PASIDs. 421 + */ 422 + int pci_max_pasids(struct pci_dev *pdev) 423 + { 424 + u16 supported; 425 + int pos; 426 + 427 + pos = pci_find_ext_capability(pdev, PCI_PASID_CAP); 428 + if (!pos) 429 + return -EINVAL; 430 + 431 + pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported); 432 + 433 + supported = (supported & PASID_NUMBER_MASK) >> PASID_NUMBER_SHIFT; 434 + 435 + return (1 << supported); 436 + } 437 + EXPORT_SYMBOL_GPL(pci_max_pasids); 438 + #endif /* CONFIG_PCI_PASID */
+94 -15
drivers/pci/hotplug/acpiphp_glue.c
··· 48 48 #include <linux/pci-acpi.h> 49 49 #include <linux/mutex.h> 50 50 #include <linux/slab.h> 51 + #include <linux/acpi.h> 51 52 52 53 #include "../pci.h" 53 54 #include "acpiphp.h" ··· 1150 1149 return AE_OK ; 1151 1150 } 1152 1151 1153 - /** 1154 - * handle_hotplug_event_bridge - handle ACPI event on bridges 1155 - * @handle: Notify()'ed acpi_handle 1156 - * @type: Notify code 1157 - * @context: pointer to acpiphp_bridge structure 1158 - * 1159 - * Handles ACPI event notification on {host,p2p} bridges. 1160 - */ 1161 - static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *context) 1152 + struct acpiphp_hp_work { 1153 + struct work_struct work; 1154 + acpi_handle handle; 1155 + u32 type; 1156 + void *context; 1157 + }; 1158 + 1159 + static void alloc_acpiphp_hp_work(acpi_handle handle, u32 type, 1160 + void *context, 1161 + void (*func)(struct work_struct *work)) 1162 + { 1163 + struct acpiphp_hp_work *hp_work; 1164 + int ret; 1165 + 1166 + hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL); 1167 + if (!hp_work) 1168 + return; 1169 + 1170 + hp_work->handle = handle; 1171 + hp_work->type = type; 1172 + hp_work->context = context; 1173 + 1174 + INIT_WORK(&hp_work->work, func); 1175 + ret = queue_work(kacpi_hotplug_wq, &hp_work->work); 1176 + if (!ret) 1177 + kfree(hp_work); 1178 + } 1179 + 1180 + static void _handle_hotplug_event_bridge(struct work_struct *work) 1162 1181 { 1163 1182 struct acpiphp_bridge *bridge; 1164 1183 char objname[64]; ··· 1186 1165 .pointer = objname }; 1187 1166 struct acpi_device *device; 1188 1167 int num_sub_bridges = 0; 1168 + struct acpiphp_hp_work *hp_work; 1169 + acpi_handle handle; 1170 + u32 type; 1171 + 1172 + hp_work = container_of(work, struct acpiphp_hp_work, work); 1173 + handle = hp_work->handle; 1174 + type = hp_work->type; 1189 1175 1190 1176 if (acpi_bus_get_device(handle, &device)) { 1191 1177 /* This bridge must have just been physically inserted */ 1192 1178 handle_bridge_insertion(handle, type); 1193 - return; 1179 + goto out; 1194 1180 } 1195 1181 1196 1182 bridge = acpiphp_handle_to_bridge(handle); ··· 1208 1180 1209 1181 if (!bridge && !num_sub_bridges) { 1210 1182 err("cannot get bridge info\n"); 1211 - return; 1183 + goto out; 1212 1184 } 1213 1185 1214 1186 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); ··· 1269 1241 warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); 1270 1242 break; 1271 1243 } 1244 + 1245 + out: 1246 + kfree(hp_work); /* allocated in handle_hotplug_event_bridge */ 1272 1247 } 1273 1248 1274 1249 /** 1275 - * handle_hotplug_event_func - handle ACPI event on functions (i.e. slots) 1250 + * handle_hotplug_event_bridge - handle ACPI event on bridges 1276 1251 * @handle: Notify()'ed acpi_handle 1277 1252 * @type: Notify code 1278 - * @context: pointer to acpiphp_func structure 1253 + * @context: pointer to acpiphp_bridge structure 1279 1254 * 1280 - * Handles ACPI event notification on slots. 1255 + * Handles ACPI event notification on {host,p2p} bridges. 1281 1256 */ 1282 - static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context) 1257 + static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, 1258 + void *context) 1259 + { 1260 + /* 1261 + * Currently the code adds all hotplug events to the kacpid_wq 1262 + * queue when it should add hotplug events to the kacpi_hotplug_wq. 1263 + * The proper way to fix this is to reorganize the code so that 1264 + * drivers (dock, etc.) do not call acpi_os_execute(), etc. 1265 + * For now just re-add this work to the kacpi_hotplug_wq so we 1266 + * don't deadlock on hotplug actions. 1267 + */ 1268 + alloc_acpiphp_hp_work(handle, type, context, 1269 + _handle_hotplug_event_bridge); 1270 + } 1271 + 1272 + static void _handle_hotplug_event_func(struct work_struct *work) 1283 1273 { 1284 1274 struct acpiphp_func *func; 1285 1275 char objname[64]; 1286 1276 struct acpi_buffer buffer = { .length = sizeof(objname), 1287 1277 .pointer = objname }; 1278 + struct acpiphp_hp_work *hp_work; 1279 + acpi_handle handle; 1280 + u32 type; 1281 + void *context; 1282 + 1283 + hp_work = container_of(work, struct acpiphp_hp_work, work); 1284 + handle = hp_work->handle; 1285 + type = hp_work->type; 1286 + context = hp_work->context; 1288 1287 1289 1288 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); 1290 1289 ··· 1346 1291 warn("notify_handler: unknown event type 0x%x for %s\n", type, objname); 1347 1292 break; 1348 1293 } 1294 + 1295 + kfree(hp_work); /* allocated in handle_hotplug_event_func */ 1349 1296 } 1350 1297 1298 + /** 1299 + * handle_hotplug_event_func - handle ACPI event on functions (i.e. slots) 1300 + * @handle: Notify()'ed acpi_handle 1301 + * @type: Notify code 1302 + * @context: pointer to acpiphp_func structure 1303 + * 1304 + * Handles ACPI event notification on slots. 1305 + */ 1306 + static void handle_hotplug_event_func(acpi_handle handle, u32 type, 1307 + void *context) 1308 + { 1309 + /* 1310 + * Currently the code adds all hotplug events to the kacpid_wq 1311 + * queue when it should add hotplug events to the kacpi_hotplug_wq. 1312 + * The proper way to fix this is to reorganize the code so that 1313 + * drivers (dock, etc.) do not call acpi_os_execute(), etc. 1314 + * For now just re-add this work to the kacpi_hotplug_wq so we 1315 + * don't deadlock on hotplug actions. 1316 + */ 1317 + alloc_acpiphp_hp_work(handle, type, context, 1318 + _handle_hotplug_event_func); 1319 + } 1351 1320 1352 1321 static acpi_status 1353 1322 find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
-142
drivers/pci/iov.c
··· 722 722 return dev->sriov->nr_virtfn; 723 723 } 724 724 EXPORT_SYMBOL_GPL(pci_num_vf); 725 - 726 - static int ats_alloc_one(struct pci_dev *dev, int ps) 727 - { 728 - int pos; 729 - u16 cap; 730 - struct pci_ats *ats; 731 - 732 - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS); 733 - if (!pos) 734 - return -ENODEV; 735 - 736 - ats = kzalloc(sizeof(*ats), GFP_KERNEL); 737 - if (!ats) 738 - return -ENOMEM; 739 - 740 - ats->pos = pos; 741 - ats->stu = ps; 742 - pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap); 743 - ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : 744 - PCI_ATS_MAX_QDEP; 745 - dev->ats = ats; 746 - 747 - return 0; 748 - } 749 - 750 - static void ats_free_one(struct pci_dev *dev) 751 - { 752 - kfree(dev->ats); 753 - dev->ats = NULL; 754 - } 755 - 756 - /** 757 - * pci_enable_ats - enable the ATS capability 758 - * @dev: the PCI device 759 - * @ps: the IOMMU page shift 760 - * 761 - * Returns 0 on success, or negative on failure. 762 - */ 763 - int pci_enable_ats(struct pci_dev *dev, int ps) 764 - { 765 - int rc; 766 - u16 ctrl; 767 - 768 - BUG_ON(dev->ats && dev->ats->is_enabled); 769 - 770 - if (ps < PCI_ATS_MIN_STU) 771 - return -EINVAL; 772 - 773 - if (dev->is_physfn || dev->is_virtfn) { 774 - struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn; 775 - 776 - mutex_lock(&pdev->sriov->lock); 777 - if (pdev->ats) 778 - rc = pdev->ats->stu == ps ? 0 : -EINVAL; 779 - else 780 - rc = ats_alloc_one(pdev, ps); 781 - 782 - if (!rc) 783 - pdev->ats->ref_cnt++; 784 - mutex_unlock(&pdev->sriov->lock); 785 - if (rc) 786 - return rc; 787 - } 788 - 789 - if (!dev->is_physfn) { 790 - rc = ats_alloc_one(dev, ps); 791 - if (rc) 792 - return rc; 793 - } 794 - 795 - ctrl = PCI_ATS_CTRL_ENABLE; 796 - if (!dev->is_virtfn) 797 - ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU); 798 - pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl); 799 - 800 - dev->ats->is_enabled = 1; 801 - 802 - return 0; 803 - } 804 - 805 - /** 806 - * pci_disable_ats - disable the ATS capability 807 - * @dev: the PCI device 808 - */ 809 - void pci_disable_ats(struct pci_dev *dev) 810 - { 811 - u16 ctrl; 812 - 813 - BUG_ON(!dev->ats || !dev->ats->is_enabled); 814 - 815 - pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl); 816 - ctrl &= ~PCI_ATS_CTRL_ENABLE; 817 - pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl); 818 - 819 - dev->ats->is_enabled = 0; 820 - 821 - if (dev->is_physfn || dev->is_virtfn) { 822 - struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn; 823 - 824 - mutex_lock(&pdev->sriov->lock); 825 - pdev->ats->ref_cnt--; 826 - if (!pdev->ats->ref_cnt) 827 - ats_free_one(pdev); 828 - mutex_unlock(&pdev->sriov->lock); 829 - } 830 - 831 - if (!dev->is_physfn) 832 - ats_free_one(dev); 833 - } 834 - 835 - /** 836 - * pci_ats_queue_depth - query the ATS Invalidate Queue Depth 837 - * @dev: the PCI device 838 - * 839 - * Returns the queue depth on success, or negative on failure. 840 - * 841 - * The ATS spec uses 0 in the Invalidate Queue Depth field to 842 - * indicate that the function can accept 32 Invalidate Request. 843 - * But here we use the `real' values (i.e. 1~32) for the Queue 844 - * Depth; and 0 indicates the function shares the Queue with 845 - * other functions (doesn't exclusively own a Queue). 846 - */ 847 - int pci_ats_queue_depth(struct pci_dev *dev) 848 - { 849 - int pos; 850 - u16 cap; 851 - 852 - if (dev->is_virtfn) 853 - return 0; 854 - 855 - if (dev->ats) 856 - return dev->ats->qdep; 857 - 858 - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS); 859 - if (!pos) 860 - return -ENODEV; 861 - 862 - pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap); 863 - 864 - return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : 865 - PCI_ATS_MAX_QDEP; 866 - }
+4 -2
drivers/pci/pci-acpi.c
··· 46 46 struct pci_dev *pci_dev = context; 47 47 48 48 if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { 49 + if (pci_dev->pme_poll) 50 + pci_dev->pme_poll = false; 51 + 49 52 pci_wakeup_event(pci_dev); 50 53 pci_check_pme_status(pci_dev); 51 54 pm_runtime_resume(&pci_dev->dev); ··· 285 282 { 286 283 struct acpi_device *dev; 287 284 acpi_handle handle; 288 - int error = -ENODEV; 289 285 290 286 if (!device_run_wake(phys_dev)) 291 287 return -EINVAL; ··· 304 302 acpi_disable_wakeup_device_power(dev); 305 303 } 306 304 307 - return error; 305 + return 0; 308 306 } 309 307 310 308 static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
+36 -23
drivers/pci/pci.c
··· 1407 1407 /** 1408 1408 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. 1409 1409 * @dev: Device to handle. 1410 - * @ign: Ignored. 1410 + * @pme_poll_reset: Whether or not to reset the device's pme_poll flag. 1411 1411 * 1412 1412 * Check if @dev has generated PME and queue a resume request for it in that 1413 1413 * case. 1414 1414 */ 1415 - static int pci_pme_wakeup(struct pci_dev *dev, void *ign) 1415 + static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset) 1416 1416 { 1417 + if (pme_poll_reset && dev->pme_poll) 1418 + dev->pme_poll = false; 1419 + 1417 1420 if (pci_check_pme_status(dev)) { 1418 1421 pci_wakeup_event(dev); 1419 1422 pm_request_resume(&dev->dev); ··· 1431 1428 void pci_pme_wakeup_bus(struct pci_bus *bus) 1432 1429 { 1433 1430 if (bus) 1434 - pci_walk_bus(bus, pci_pme_wakeup, NULL); 1431 + pci_walk_bus(bus, pci_pme_wakeup, (void *)true); 1435 1432 } 1436 1433 1437 1434 /** ··· 1449 1446 1450 1447 static void pci_pme_list_scan(struct work_struct *work) 1451 1448 { 1452 - struct pci_pme_device *pme_dev; 1449 + struct pci_pme_device *pme_dev, *n; 1453 1450 1454 1451 mutex_lock(&pci_pme_list_mutex); 1455 1452 if (!list_empty(&pci_pme_list)) { 1456 - list_for_each_entry(pme_dev, &pci_pme_list, list) 1457 - pci_pme_wakeup(pme_dev->dev, NULL); 1458 - schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT)); 1453 + list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) { 1454 + if (pme_dev->dev->pme_poll) { 1455 + pci_pme_wakeup(pme_dev->dev, NULL); 1456 + } else { 1457 + list_del(&pme_dev->list); 1458 + kfree(pme_dev); 1459 + } 1460 + } 1461 + if (!list_empty(&pci_pme_list)) 1462 + schedule_delayed_work(&pci_pme_work, 1463 + msecs_to_jiffies(PME_TIMEOUT)); 1459 1464 } 1460 1465 mutex_unlock(&pci_pme_list_mutex); 1461 - } 1462 - 1463 - /** 1464 - * pci_external_pme - is a device an external PCI PME source? 1465 - * @dev: PCI device to check 1466 - * 1467 - */ 1468 - 1469 - static bool pci_external_pme(struct pci_dev *dev) 1470 - { 1471 - if (pci_is_pcie(dev) || dev->bus->number == 0) 1472 - return false; 1473 - return true; 1474 1466 } 1475 1467 1476 1468 /** ··· 1501 1503 hit, and the power savings from the devices will still be a 1502 1504 win. */ 1503 1505 1504 - if (pci_external_pme(dev)) { 1506 + if (dev->pme_poll) { 1505 1507 struct pci_pme_device *pme_dev; 1506 1508 if (enable) { 1507 1509 pme_dev = kmalloc(sizeof(struct pci_pme_device), ··· 1819 1821 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", 1820 1822 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); 1821 1823 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; 1824 + dev->pme_poll = true; 1822 1825 /* 1823 1826 * Make device's PM flags reflect the wake-up capability, but 1824 1827 * let the user space enable it to wake up the system as needed. ··· 3202 3203 if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) 3203 3204 goto out; 3204 3205 3205 - v = (ffs(rq) - 8) << 12; 3206 - 3207 3206 cap = pci_pcie_cap(dev); 3208 3207 if (!cap) 3209 3208 goto out; ··· 3209 3212 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 3210 3213 if (err) 3211 3214 goto out; 3215 + /* 3216 + * If using the "performance" PCIe config, we clamp the 3217 + * read rq size to the max packet size to prevent the 3218 + * host bridge generating requests larger than we can 3219 + * cope with 3220 + */ 3221 + if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { 3222 + int mps = pcie_get_mps(dev); 3223 + 3224 + if (mps < 0) 3225 + return mps; 3226 + if (mps < rq) 3227 + rq = mps; 3228 + } 3229 + 3230 + v = (ffs(rq) - 8) << 12; 3212 3231 3213 3232 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) { 3214 3233 ctl &= ~PCI_EXP_DEVCTL_READRQ;
+9
drivers/pci/pcie/pme.c
··· 84 84 list_for_each_entry(dev, &bus->devices, bus_list) { 85 85 /* Skip PCIe devices in case we started from a root port. */ 86 86 if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { 87 + if (dev->pme_poll) 88 + dev->pme_poll = false; 89 + 87 90 pci_wakeup_event(dev); 88 91 pm_request_resume(&dev->dev); 89 92 ret = true; ··· 145 142 146 143 /* First, check if the PME is from the root port itself. */ 147 144 if (port->devfn == devfn && port->bus->number == busnr) { 145 + if (port->pme_poll) 146 + port->pme_poll = false; 147 + 148 148 if (pci_check_pme_status(port)) { 149 149 pm_request_resume(&port->dev); 150 150 found = true; ··· 193 187 /* The device is there, but we have to check its PME status. */ 194 188 found = pci_check_pme_status(dev); 195 189 if (found) { 190 + if (dev->pme_poll) 191 + dev->pme_poll = false; 192 + 196 193 pci_wakeup_event(dev); 197 194 pm_request_resume(&dev->dev); 198 195 }
+31 -37
drivers/pci/probe.c
··· 1363 1363 1364 1364 static void pcie_write_mps(struct pci_dev *dev, int mps) 1365 1365 { 1366 - int rc, dev_mpss; 1367 - 1368 - dev_mpss = 128 << dev->pcie_mpss; 1366 + int rc; 1369 1367 1370 1368 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { 1371 - if (dev->bus->self) { 1372 - dev_dbg(&dev->bus->dev, "Bus MPSS %d\n", 1373 - 128 << dev->bus->self->pcie_mpss); 1369 + mps = 128 << dev->pcie_mpss; 1374 1370 1375 - /* For "MPS Force Max", the assumption is made that 1371 + if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && dev->bus->self) 1372 + /* For "Performance", the assumption is made that 1376 1373 * downstream communication will never be larger than 1377 1374 * the MRRS. So, the MPS only needs to be configured 1378 1375 * for the upstream communication. This being the case, 1379 1376 * walk from the top down and set the MPS of the child 1380 1377 * to that of the parent bus. 1378 + * 1379 + * Configure the device MPS with the smaller of the 1380 + * device MPSS or the bridge MPS (which is assumed to be 1381 + * properly configured at this point to the largest 1382 + * allowable MPS based on its parent bus). 1381 1383 */ 1382 - mps = 128 << dev->bus->self->pcie_mpss; 1383 - if (mps > dev_mpss) 1384 - dev_warn(&dev->dev, "MPS configured higher than" 1385 - " maximum supported by the device. If" 1386 - " a bus issue occurs, try running with" 1387 - " pci=pcie_bus_safe.\n"); 1388 - } 1389 - 1390 - dev->pcie_mpss = ffs(mps) - 8; 1384 + mps = min(mps, pcie_get_mps(dev->bus->self)); 1391 1385 } 1392 1386 1393 1387 rc = pcie_set_mps(dev, mps); ··· 1389 1395 dev_err(&dev->dev, "Failed attempting to set the MPS\n"); 1390 1396 } 1391 1397 1392 - static void pcie_write_mrrs(struct pci_dev *dev, int mps) 1398 + static void pcie_write_mrrs(struct pci_dev *dev) 1393 1399 { 1394 - int rc, mrrs, dev_mpss; 1400 + int rc, mrrs; 1395 1401 1396 1402 /* In the "safe" case, do not configure the MRRS. There appear to be 1397 1403 * issues with setting MRRS to 0 on a number of devices. 1398 1404 */ 1399 - 1400 1405 if (pcie_bus_config != PCIE_BUS_PERFORMANCE) 1401 1406 return; 1402 1407 1403 - dev_mpss = 128 << dev->pcie_mpss; 1404 - 1405 1408 /* For Max performance, the MRRS must be set to the largest supported 1406 1409 * value. However, it cannot be configured larger than the MPS the 1407 - * device or the bus can support. This assumes that the largest MRRS 1408 - * available on the device cannot be smaller than the device MPSS. 1410 + * device or the bus can support. This should already be properly 1411 + * configured by a prior call to pcie_write_mps. 1409 1412 */ 1410 - mrrs = min(mps, dev_mpss); 1413 + mrrs = pcie_get_mps(dev); 1411 1414 1412 1415 /* MRRS is a R/W register. Invalid values can be written, but a 1413 1416 * subsequent read will verify if the value is acceptable or not. ··· 1412 1421 * shrink the value until it is acceptable to the HW. 1413 1422 */ 1414 1423 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { 1415 - dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value" 1416 - " to %d. If any issues are encountered, please try " 1417 - "running with pci=pcie_bus_safe\n", mrrs); 1418 1424 rc = pcie_set_readrq(dev, mrrs); 1419 - if (rc) 1420 - dev_err(&dev->dev, 1421 - "Failed attempting to set the MRRS\n"); 1425 + if (!rc) 1426 + break; 1422 1427 1428 + dev_warn(&dev->dev, "Failed attempting to set the MRRS\n"); 1423 1429 mrrs /= 2; 1424 1430 } 1431 + 1432 + if (mrrs < 128) 1433 + dev_err(&dev->dev, "MRRS was unable to be configured with a " 1434 + "safe value. If problems are experienced, try running " 1435 + "with pci=pcie_bus_safe.\n"); 1425 1436 } 1426 1437 1427 1438 static int pcie_bus_configure_set(struct pci_dev *dev, void *data) 1428 1439 { 1429 - int mps = 128 << *(u8 *)data; 1440 + int mps, orig_mps; 1430 1441 1431 1442 if (!pci_is_pcie(dev)) 1432 1443 return 0; 1433 1444 1434 - dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", 1435 - pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev)); 1445 + mps = 128 << *(u8 *)data; 1446 + orig_mps = pcie_get_mps(dev); 1436 1447 1437 1448 pcie_write_mps(dev, mps); 1438 - pcie_write_mrrs(dev, mps); 1449 + pcie_write_mrrs(dev); 1439 1450 1440 - dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", 1441 - pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev)); 1451 + dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), " 1452 + "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss, 1453 + orig_mps, pcie_get_readrq(dev)); 1442 1454 1443 1455 return 0; 1444 1456 } 1445 1457 1446 - /* pcie_bus_configure_mps requires that pci_walk_bus work in a top-down, 1458 + /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down, 1447 1459 * parents then children fashion. If this changes, then this code will not 1448 1460 * work as designed. 1449 1461 */
+97 -14
drivers/pci/quirks.c
··· 2745 2745 /* disable must be done via function #0 */ 2746 2746 if (PCI_FUNC(dev->devfn)) 2747 2747 return; 2748 - 2749 - pci_read_config_byte(dev, 0xCB, &disable); 2750 - 2751 - if (disable & 0x02) 2752 - return; 2753 - 2754 - pci_read_config_byte(dev, 0xCA, &write_enable); 2755 - pci_write_config_byte(dev, 0xCA, 0x57); 2756 - pci_write_config_byte(dev, 0xCB, disable | 0x02); 2757 - pci_write_config_byte(dev, 0xCA, write_enable); 2758 - 2759 - dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n"); 2760 - dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n"); 2761 - 2762 2748 /* 2763 2749 * RICOH 0xe823 SD/MMC card reader fails to recognize 2764 2750 * certain types of SD/MMC cards. Lowering the SD base ··· 2767 2781 2768 2782 dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n"); 2769 2783 } 2784 + 2785 + pci_read_config_byte(dev, 0xCB, &disable); 2786 + 2787 + if (disable & 0x02) 2788 + return; 2789 + 2790 + pci_read_config_byte(dev, 0xCA, &write_enable); 2791 + pci_write_config_byte(dev, 0xCA, 0x57); 2792 + pci_write_config_byte(dev, 0xCB, disable | 0x02); 2793 + pci_write_config_byte(dev, 0xCA, write_enable); 2794 + 2795 + dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n"); 2796 + dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n"); 2797 + 2770 2798 } 2771 2799 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); 2772 2800 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); ··· 2821 2821 } 2822 2822 } 2823 2823 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_TI, 0xb800, fixup_ti816x_class); 2824 + 2825 + /* Some PCIe devices do not work reliably with the claimed maximum 2826 + * payload size supported. 2827 + */ 2828 + static void __devinit fixup_mpss_256(struct pci_dev *dev) 2829 + { 2830 + dev->pcie_mpss = 1; /* 256 bytes */ 2831 + } 2832 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, 2833 + PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256); 2834 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, 2835 + PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256); 2836 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, 2837 + PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256); 2838 + 2839 + /* Intel 5000 and 5100 Memory controllers have an errata with read completion 2840 + * coalescing (which is enabled by default on some BIOSes) and MPS of 256B. 2841 + * Since there is no way of knowing what the PCIE MPS on each fabric will be 2842 + * until all of the devices are discovered and buses walked, read completion 2843 + * coalescing must be disabled. Unfortunately, it cannot be re-enabled because 2844 + * it is possible to hotplug a device with MPS of 256B. 2845 + */ 2846 + static void __devinit quirk_intel_mc_errata(struct pci_dev *dev) 2847 + { 2848 + int err; 2849 + u16 rcc; 2850 + 2851 + if (pcie_bus_config == PCIE_BUS_TUNE_OFF) 2852 + return; 2853 + 2854 + /* Intel errata specifies bits to change but does not say what they are. 2855 + * Keeping them magical until such time as the registers and values can 2856 + * be explained. 2857 + */ 2858 + err = pci_read_config_word(dev, 0x48, &rcc); 2859 + if (err) { 2860 + dev_err(&dev->dev, "Error attempting to read the read " 2861 + "completion coalescing register.\n"); 2862 + return; 2863 + } 2864 + 2865 + if (!(rcc & (1 << 10))) 2866 + return; 2867 + 2868 + rcc &= ~(1 << 10); 2869 + 2870 + err = pci_write_config_word(dev, 0x48, rcc); 2871 + if (err) { 2872 + dev_err(&dev->dev, "Error attempting to write the read " 2873 + "completion coalescing register.\n"); 2874 + return; 2875 + } 2876 + 2877 + pr_info_once("Read completion coalescing disabled due to hardware " 2878 + "errata relating to 256B MPS.\n"); 2879 + } 2880 + /* Intel 5000 series memory controllers and ports 2-7 */ 2881 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata); 2882 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata); 2883 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata); 2884 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata); 2885 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata); 2886 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata); 2887 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata); 2888 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata); 2889 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata); 2890 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata); 2891 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata); 2892 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata); 2893 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata); 2894 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata); 2895 + /* Intel 5100 series memory controllers and ports 2-7 */ 2896 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata); 2897 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata); 2898 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata); 2899 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata); 2900 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata); 2901 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata); 2902 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata); 2903 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata); 2904 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata); 2905 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata); 2906 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata); 2824 2907 2825 2908 static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, 2826 2909 struct pci_fixup *end)
+1 -1
drivers/pci/setup-bus.c
··· 426 426 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); 427 427 } 428 428 429 - static void pci_setup_bridge(struct pci_bus *bus) 429 + void pci_setup_bridge(struct pci_bus *bus) 430 430 { 431 431 unsigned long type = IORESOURCE_IO | IORESOURCE_MEM | 432 432 IORESOURCE_PREFETCH;
+2
include/acpi/acpiosxf.h
··· 189 189 /* 190 190 * Threads and Scheduling 191 191 */ 192 + extern struct workqueue_struct *kacpi_hotplug_wq; 193 + 192 194 acpi_thread_id acpi_os_get_thread_id(void); 193 195 194 196 acpi_status
+75
include/linux/pci-ats.h
··· 1 1 #ifndef LINUX_PCI_ATS_H 2 2 #define LINUX_PCI_ATS_H 3 3 4 + #include <linux/pci.h> 5 + 4 6 /* Address Translation Service */ 5 7 struct pci_ats { 6 8 int pos; /* capability position */ ··· 17 15 extern int pci_enable_ats(struct pci_dev *dev, int ps); 18 16 extern void pci_disable_ats(struct pci_dev *dev); 19 17 extern int pci_ats_queue_depth(struct pci_dev *dev); 18 + 20 19 /** 21 20 * pci_ats_enabled - query the ATS status 22 21 * @dev: the PCI device ··· 51 48 } 52 49 53 50 #endif /* CONFIG_PCI_IOV */ 51 + 52 + #ifdef CONFIG_PCI_PRI 53 + 54 + extern int pci_enable_pri(struct pci_dev *pdev, u32 reqs); 55 + extern void pci_disable_pri(struct pci_dev *pdev); 56 + extern bool pci_pri_enabled(struct pci_dev *pdev); 57 + extern int pci_reset_pri(struct pci_dev *pdev); 58 + extern bool pci_pri_stopped(struct pci_dev *pdev); 59 + extern int pci_pri_status(struct pci_dev *pdev); 60 + 61 + #else /* CONFIG_PCI_PRI */ 62 + 63 + static inline int pci_enable_pri(struct pci_dev *pdev, u32 reqs) 64 + { 65 + return -ENODEV; 66 + } 67 + 68 + static inline void pci_disable_pri(struct pci_dev *pdev) 69 + { 70 + } 71 + 72 + static inline bool pci_pri_enabled(struct pci_dev *pdev) 73 + { 74 + return false; 75 + } 76 + 77 + static inline int pci_reset_pri(struct pci_dev *pdev) 78 + { 79 + return -ENODEV; 80 + } 81 + 82 + static inline bool pci_pri_stopped(struct pci_dev *pdev) 83 + { 84 + return true; 85 + } 86 + 87 + static inline int pci_pri_status(struct pci_dev *pdev) 88 + { 89 + return -ENODEV; 90 + } 91 + #endif /* CONFIG_PCI_PRI */ 92 + 93 + #ifdef CONFIG_PCI_PASID 94 + 95 + extern int pci_enable_pasid(struct pci_dev *pdev, int features); 96 + extern void pci_disable_pasid(struct pci_dev *pdev); 97 + extern int pci_pasid_features(struct pci_dev *pdev); 98 + extern int pci_max_pasids(struct pci_dev *pdev); 99 + 100 + #else /* CONFIG_PCI_PASID */ 101 + 102 + static inline int pci_enable_pasid(struct pci_dev *pdev, int features) 103 + { 104 + return -EINVAL; 105 + } 106 + 107 + static inline void pci_disable_pasid(struct pci_dev *pdev) 108 + { 109 + } 110 + 111 + static inline int pci_pasid_features(struct pci_dev *pdev) 112 + { 113 + return -EINVAL; 114 + } 115 + 116 + static inline int pci_max_pasids(struct pci_dev *pdev) 117 + { 118 + return -EINVAL; 119 + } 120 + 121 + #endif /* CONFIG_PCI_PASID */ 122 + 54 123 55 124 #endif /* LINUX_PCI_ATS_H*/
+2
include/linux/pci.h
··· 275 275 unsigned int pme_support:5; /* Bitmask of states from which PME# 276 276 can be generated */ 277 277 unsigned int pme_interrupt:1; 278 + unsigned int pme_poll:1; /* Poll device's PME status bit */ 278 279 unsigned int d1_support:1; /* Low power state D1 is supported */ 279 280 unsigned int d2_support:1; /* Low power state D2 is supported */ 280 281 unsigned int no_d1d2:1; /* Only allow D0 and D3 */ ··· 958 957 int pci_cfg_space_size_ext(struct pci_dev *dev); 959 958 int pci_cfg_space_size(struct pci_dev *dev); 960 959 unsigned char pci_bus_max_busnr(struct pci_bus *bus); 960 + void pci_setup_bridge(struct pci_bus *bus); 961 961 962 962 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0) 963 963 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
+5
include/linux/pci_ids.h
··· 2302 2302 #define PCI_DEVICE_ID_RENESAS_SH7785 0x0007 2303 2303 #define PCI_DEVICE_ID_RENESAS_SH7786 0x0010 2304 2304 2305 + #define PCI_VENDOR_ID_SOLARFLARE 0x1924 2306 + #define PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0 0x0703 2307 + #define PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1 0x6703 2308 + #define PCI_DEVICE_ID_SOLARFLARE_SFC4000B 0x0710 2309 + 2305 2310 #define PCI_VENDOR_ID_TDI 0x192E 2306 2311 #define PCI_DEVICE_ID_TDI_EHCI 0x0101 2307 2312
+20
include/linux/pci_regs.h
··· 663 663 #define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */ 664 664 #define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */ 665 665 666 + /* Page Request Interface */ 667 + #define PCI_PRI_CAP 0x13 /* PRI capability ID */ 668 + #define PCI_PRI_CONTROL_OFF 0x04 /* Offset of control register */ 669 + #define PCI_PRI_STATUS_OFF 0x06 /* Offset of status register */ 670 + #define PCI_PRI_ENABLE 0x0001 /* Enable mask */ 671 + #define PCI_PRI_RESET 0x0002 /* Reset bit mask */ 672 + #define PCI_PRI_STATUS_RF 0x0001 /* Request Failure */ 673 + #define PCI_PRI_STATUS_UPRGI 0x0002 /* Unexpected PRG index */ 674 + #define PCI_PRI_STATUS_STOPPED 0x0100 /* PRI Stopped */ 675 + #define PCI_PRI_MAX_REQ_OFF 0x08 /* Cap offset for max reqs supported */ 676 + #define PCI_PRI_ALLOC_REQ_OFF 0x0c /* Cap offset for max reqs allowed */ 677 + 678 + /* PASID capability */ 679 + #define PCI_PASID_CAP 0x1b /* PASID capability ID */ 680 + #define PCI_PASID_CAP_OFF 0x04 /* PASID feature register */ 681 + #define PCI_PASID_CONTROL_OFF 0x06 /* PASID control register */ 682 + #define PCI_PASID_ENABLE 0x01 /* Enable/Supported bit */ 683 + #define PCI_PASID_EXEC 0x02 /* Exec permissions Enable/Supported */ 684 + #define PCI_PASID_PRIV 0x04 /* Priviledge Mode Enable/Support */ 685 + 666 686 /* Single Root I/O Virtualization */ 667 687 #define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */ 668 688 #define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */