Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'remotes/lorenzo/pci/endpoint'

- Add max-virtual-functions to endpoint binding (Kishon Vijay Abraham I)

- Add pci_epf_add_vepf() API to add virtual function to endpoint (Kishon
Vijay Abraham I)

- Add pci_epf_vepf_link() to link virtual function to endpoint physical
function (Kishon Vijay Abraham I)

- Add virtual function number to pci_epc_ops endpoint ops interfaces
(Kishon Vijay Abraham I)

- Simplify register base address computation for endpoint BAR configuration
(Kishon Vijay Abraham I)

- Add support to configure virtual functions in cadence endpoint driver
(Kishon Vijay Abraham I)

- Add SR-IOV configuration to endpoint test driver (Kishon Vijay Abraham I)

- Document configfs usage to create virtual functions for endpoints (Kishon
Vijay Abraham I)

* remotes/lorenzo/pci/endpoint:
Documentation: PCI: endpoint/pci-endpoint-cfs: Guide to use SR-IOV
misc: pci_endpoint_test: Populate sriov_configure ops to configure SR-IOV device
PCI: cadence: Add support to configure virtual functions
PCI: cadence: Simplify code to get register base address for configuring BAR
PCI: endpoint: Add virtual function number in pci_epc ops
PCI: endpoint: Add support to link a physical function to a virtual function
PCI: endpoint: Add support to add virtual function in endpoint core
dt-bindings: PCI: pci-ep: Add binding to specify virtual function

+616 -225
+11 -1
Documentation/PCI/endpoint/pci-endpoint-cfs.rst
··· 43 43 .. <EPF Driver1>/ 44 44 ... <EPF Device 11>/ 45 45 ... <EPF Device 21>/ 46 + ... <EPF Device 31>/ 46 47 .. <EPF Driver2>/ 47 48 ... <EPF Device 12>/ 48 49 ... <EPF Device 22>/ ··· 69 68 ... subsys_vendor_id 70 69 ... subsys_id 71 70 ... interrupt_pin 71 + ... <Symlink EPF Device 31>/ 72 72 ... primary/ 73 73 ... <Symlink EPC Device1>/ 74 74 ... secondary/ ··· 80 78 interface should be added in 'primary' directory and symlink of endpoint 81 79 controller connected to secondary interface should be added in 'secondary' 82 80 directory. 81 + 82 + The <EPF Device> directory can have a list of symbolic links 83 + (<Symlink EPF Device 31>) to other <EPF Device>. These symbolic links should 84 + be created by the user to represent the virtual functions that are bound to 85 + the physical function. In the above directory structure <EPF Device 11> is a 86 + physical function and <EPF Device 31> is a virtual function. An EPF device once 87 + it's linked to another EPF device, cannot be linked to a EPC device. 83 88 84 89 EPC Device 85 90 ========== ··· 107 98 108 99 The <EPC Device> directory will have a list of symbolic links to 109 100 <EPF Device>. These symbolic links should be created by the user to 110 - represent the functions present in the endpoint device. 101 + represent the functions present in the endpoint device. Only <EPF Device> 102 + that represents a physical function can be linked to a EPC device. 111 103 112 104 The <EPC Device> directory will also have a *start* field. Once 113 105 "1" is written to this field, the endpoint device will be ready to
+7
Documentation/devicetree/bindings/pci/pci-ep.yaml
··· 23 23 default: 1 24 24 maximum: 255 25 25 26 + max-virtual-functions: 27 + description: Array representing the number of virtual functions corresponding to each physical 28 + function 29 + $ref: /schemas/types.yaml#/definitions/uint8-array 30 + minItems: 1 31 + maxItems: 255 32 + 26 33 max-link-speed: 27 34 $ref: /schemas/types.yaml#/definitions/uint32 28 35 enum: [ 1, 2, 3, 4 ]
+1
drivers/misc/pci_endpoint_test.c
··· 986 986 .id_table = pci_endpoint_test_tbl, 987 987 .probe = pci_endpoint_test_probe, 988 988 .remove = pci_endpoint_test_remove, 989 + .sriov_configure = pci_sriov_configure_simple, 989 990 }; 990 991 module_pci_driver(pci_endpoint_test_driver); 991 992
+143 -53
drivers/pci/controller/cadence/pcie-cadence-ep.c
··· 16 16 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1 17 17 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3 18 18 19 - static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, 19 + static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn) 20 + { 21 + u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET; 22 + u32 first_vf_offset, stride; 23 + 24 + if (vfn == 0) 25 + return fn; 26 + 27 + first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET); 28 + stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE); 29 + fn = fn + first_vf_offset + ((vfn - 1) * stride); 30 + 31 + return fn; 32 + } 33 + 34 + static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn, 20 35 struct pci_epf_header *hdr) 21 36 { 22 37 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 38 + u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET; 23 39 struct cdns_pcie *pcie = &ep->pcie; 40 + u32 reg; 41 + 42 + if (vfn > 1) { 43 + dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n"); 44 + return -EINVAL; 45 + } else if (vfn == 1) { 46 + reg = cap + PCI_SRIOV_VF_DID; 47 + cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid); 48 + return 0; 49 + } 24 50 25 51 cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid); 26 52 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid); ··· 73 47 return 0; 74 48 } 75 49 76 - static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, 50 + static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn, 77 51 struct pci_epf_bar *epf_bar) 78 52 { 79 53 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); ··· 118 92 119 93 addr0 = lower_32_bits(bar_phys); 120 94 addr1 = upper_32_bits(bar_phys); 95 + 96 + if (vfn == 1) 97 + reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn); 98 + else 99 + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn); 100 + b = (bar < BAR_4) ? bar : bar - BAR_4; 101 + 102 + if (vfn == 0 || vfn == 1) { 103 + cfg = cdns_pcie_readl(pcie, reg); 104 + cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | 105 + CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); 106 + cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | 107 + CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); 108 + cdns_pcie_writel(pcie, reg, cfg); 109 + } 110 + 111 + fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 121 112 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 122 113 addr0); 123 114 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 124 115 addr1); 125 116 126 - if (bar < BAR_4) { 127 - reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn); 128 - b = bar; 129 - } else { 130 - reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn); 131 - b = bar - BAR_4; 132 - } 133 - 134 - cfg = cdns_pcie_readl(pcie, reg); 135 - cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | 136 - CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); 137 - cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | 138 - CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); 139 - cdns_pcie_writel(pcie, reg, cfg); 140 - 117 + if (vfn > 0) 118 + epf = &epf->epf[vfn - 1]; 141 119 epf->epf_bar[bar] = epf_bar; 142 120 143 121 return 0; 144 122 } 145 123 146 - static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, 124 + static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn, 147 125 struct pci_epf_bar *epf_bar) 148 126 { 149 127 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); ··· 156 126 enum pci_barno bar = epf_bar->barno; 157 127 u32 reg, cfg, b, ctrl; 158 128 159 - if (bar < BAR_4) { 160 - reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn); 161 - b = bar; 162 - } else { 163 - reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn); 164 - b = bar - BAR_4; 129 + if (vfn == 1) 130 + reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn); 131 + else 132 + reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn); 133 + b = (bar < BAR_4) ? bar : bar - BAR_4; 134 + 135 + if (vfn == 0 || vfn == 1) { 136 + ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; 137 + cfg = cdns_pcie_readl(pcie, reg); 138 + cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | 139 + CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); 140 + cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); 141 + cdns_pcie_writel(pcie, reg, cfg); 165 142 } 166 143 167 - ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; 168 - cfg = cdns_pcie_readl(pcie, reg); 169 - cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | 170 - CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); 171 - cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); 172 - cdns_pcie_writel(pcie, reg, cfg); 173 - 144 + fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 174 145 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0); 175 146 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0); 176 147 148 + if (vfn > 0) 149 + epf = &epf->epf[vfn - 1]; 177 150 epf->epf_bar[bar] = NULL; 178 151 } 179 152 180 - static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr, 181 - u64 pci_addr, size_t size) 153 + static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, 154 + phys_addr_t addr, u64 pci_addr, size_t size) 182 155 { 183 156 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 184 157 struct cdns_pcie *pcie = &ep->pcie; ··· 194 161 return -EINVAL; 195 162 } 196 163 164 + fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 197 165 cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size); 198 166 199 167 set_bit(r, &ep->ob_region_map); ··· 203 169 return 0; 204 170 } 205 171 206 - static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, 172 + static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, 207 173 phys_addr_t addr) 208 174 { 209 175 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); ··· 223 189 clear_bit(r, &ep->ob_region_map); 224 190 } 225 191 226 - static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc) 192 + static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc) 227 193 { 228 194 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 229 195 struct cdns_pcie *pcie = &ep->pcie; 230 196 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 231 197 u16 flags; 198 + 199 + fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 232 200 233 201 /* 234 202 * Set the Multiple Message Capable bitfield into the Message Control ··· 245 209 return 0; 246 210 } 247 211 248 - static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) 212 + static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) 249 213 { 250 214 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 251 215 struct cdns_pcie *pcie = &ep->pcie; 252 216 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; 253 217 u16 flags, mme; 218 + 219 + fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 254 220 255 221 /* Validate that the MSI feature is actually enabled. */ 256 222 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); ··· 268 230 return mme; 269 231 } 270 232 271 - static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no) 233 + static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 272 234 { 273 235 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 274 236 struct cdns_pcie *pcie = &ep->pcie; 275 237 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; 276 238 u32 val, reg; 239 + 240 + func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no); 277 241 278 242 reg = cap + PCI_MSIX_FLAGS; 279 243 val = cdns_pcie_ep_fn_readw(pcie, func_no, reg); ··· 287 247 return val; 288 248 } 289 249 290 - static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts, 291 - enum pci_barno bir, u32 offset) 250 + static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn, 251 + u16 interrupts, enum pci_barno bir, 252 + u32 offset) 292 253 { 293 254 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 294 255 struct cdns_pcie *pcie = &ep->pcie; 295 256 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; 296 257 u32 val, reg; 258 + 259 + fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 297 260 298 261 reg = cap + PCI_MSIX_FLAGS; 299 262 val = cdns_pcie_ep_fn_readw(pcie, fn, reg); ··· 317 274 return 0; 318 275 } 319 276 320 - static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, 321 - u8 intx, bool is_asserted) 277 + static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, 278 + bool is_asserted) 322 279 { 323 280 struct cdns_pcie *pcie = &ep->pcie; 324 281 unsigned long flags; ··· 360 317 writel(0, ep->irq_cpu_addr + offset); 361 318 } 362 319 363 - static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx) 320 + static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, 321 + u8 intx) 364 322 { 365 323 u16 cmd; 366 324 ··· 378 334 return 0; 379 335 } 380 336 381 - static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, 337 + static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, 382 338 u8 interrupt_num) 383 339 { 384 340 struct cdns_pcie *pcie = &ep->pcie; ··· 386 342 u16 flags, mme, data, data_mask; 387 343 u8 msi_count; 388 344 u64 pci_addr, pci_addr_mask = 0xff; 345 + 346 + fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 389 347 390 348 /* Check whether the MSI feature has been enabled by the PCI host. */ 391 349 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); ··· 428 382 return 0; 429 383 } 430 384 431 - static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, 385 + static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn, 432 386 phys_addr_t addr, u8 interrupt_num, 433 387 u32 entry_size, u32 *msi_data, 434 388 u32 *msi_addr_offset) ··· 441 395 u8 msi_count; 442 396 int ret; 443 397 int i; 398 + 399 + fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 444 400 445 401 /* Check whether the MSI feature has been enabled by the PCI host. */ 446 402 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); ··· 467 419 pci_addr &= GENMASK_ULL(63, 2); 468 420 469 421 for (i = 0; i < interrupt_num; i++) { 470 - ret = cdns_pcie_ep_map_addr(epc, fn, addr, 422 + ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr, 471 423 pci_addr & ~pci_addr_mask, 472 424 entry_size); 473 425 if (ret) ··· 481 433 return 0; 482 434 } 483 435 484 - static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, 436 + static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, 485 437 u16 interrupt_num) 486 438 { 487 439 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; ··· 494 446 u16 flags; 495 447 u8 bir; 496 448 449 + epf = &ep->epf[fn]; 450 + if (vfn > 0) 451 + epf = &epf->epf[vfn - 1]; 452 + 453 + fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); 454 + 497 455 /* Check whether the MSI-X feature has been enabled by the PCI host. */ 498 456 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS); 499 457 if (!(flags & PCI_MSIX_FLAGS_ENABLE)) ··· 510 456 bir = tbl_offset & PCI_MSIX_TABLE_BIR; 511 457 tbl_offset &= PCI_MSIX_TABLE_OFFSET; 512 458 513 - epf = &ep->epf[fn]; 514 459 msix_tbl = epf->epf_bar[bir]->addr + tbl_offset; 515 460 msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; 516 461 msg_data = msix_tbl[(interrupt_num - 1)].msg_data; ··· 531 478 return 0; 532 479 } 533 480 534 - static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, 481 + static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, 535 482 enum pci_epc_irq_type type, 536 483 u16 interrupt_num) 537 484 { 538 485 struct cdns_pcie_ep *ep = epc_get_drvdata(epc); 486 + struct cdns_pcie *pcie = &ep->pcie; 487 + struct device *dev = pcie->dev; 539 488 540 489 switch (type) { 541 490 case PCI_EPC_IRQ_LEGACY: 542 - return cdns_pcie_ep_send_legacy_irq(ep, fn, 0); 491 + if (vfn > 0) { 492 + dev_err(dev, "Cannot raise legacy interrupts for VF\n"); 493 + return -EINVAL; 494 + } 495 + return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0); 543 496 544 497 case PCI_EPC_IRQ_MSI: 545 - return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num); 498 + return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num); 546 499 547 500 case PCI_EPC_IRQ_MSIX: 548 - return cdns_pcie_ep_send_msix_irq(ep, fn, interrupt_num); 501 + return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num); 549 502 550 503 default: 551 504 break; ··· 582 523 return 0; 583 524 } 584 525 526 + static const struct pci_epc_features cdns_pcie_epc_vf_features = { 527 + .linkup_notifier = false, 528 + .msi_capable = true, 529 + .msix_capable = true, 530 + .align = 65536, 531 + }; 532 + 585 533 static const struct pci_epc_features cdns_pcie_epc_features = { 586 534 .linkup_notifier = false, 587 535 .msi_capable = true, ··· 597 531 }; 598 532 599 533 static const struct pci_epc_features* 600 - cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no) 534 + cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 601 535 { 602 - return &cdns_pcie_epc_features; 536 + if (!vfunc_no) 537 + return &cdns_pcie_epc_features; 538 + 539 + return &cdns_pcie_epc_vf_features; 603 540 } 604 541 605 542 static const struct pci_epc_ops cdns_pcie_epc_ops = { ··· 628 559 struct platform_device *pdev = to_platform_device(dev); 629 560 struct device_node *np = dev->of_node; 630 561 struct cdns_pcie *pcie = &ep->pcie; 562 + struct cdns_pcie_epf *epf; 631 563 struct resource *res; 632 564 struct pci_epc *epc; 633 565 int ret; 566 + int i; 634 567 635 568 pcie->is_rc = false; 636 569 ··· 676 605 GFP_KERNEL); 677 606 if (!ep->epf) 678 607 return -ENOMEM; 608 + 609 + epc->max_vfs = devm_kcalloc(dev, epc->max_functions, 610 + sizeof(*epc->max_vfs), GFP_KERNEL); 611 + if (!epc->max_vfs) 612 + return -ENOMEM; 613 + 614 + ret = of_property_read_u8_array(np, "max-virtual-functions", 615 + epc->max_vfs, epc->max_functions); 616 + if (ret == 0) { 617 + for (i = 0; i < epc->max_functions; i++) { 618 + epf = &ep->epf[i]; 619 + if (epc->max_vfs[i] == 0) 620 + continue; 621 + epf->epf = devm_kcalloc(dev, epc->max_vfs[i], 622 + sizeof(*ep->epf), GFP_KERNEL); 623 + if (!epf->epf) 624 + return -ENOMEM; 625 + } 626 + } 679 627 680 628 ret = pci_epc_mem_init(epc, pcie->mem_res->start, 681 629 resource_size(pcie->mem_res), PAGE_SIZE);
+12
drivers/pci/controller/cadence/pcie-cadence.h
··· 8 8 9 9 #include <linux/kernel.h> 10 10 #include <linux/pci.h> 11 + #include <linux/pci-epf.h> 11 12 #include <linux/phy/phy.h> 12 13 13 14 /* Parameters for the waiting for link up routine */ ··· 47 46 #define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8 48 47 49 48 /* Endpoint Function f BAR b Configuration Registers */ 49 + #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn) \ 50 + (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn)) 50 51 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \ 51 52 (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008) 52 53 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \ 53 54 (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008) 55 + #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn) \ 56 + (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn)) 57 + #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \ 58 + (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008) 59 + #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \ 60 + (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008) 54 61 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \ 55 62 (GENMASK(4, 0) << ((b) * 8)) 56 63 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \ ··· 123 114 124 115 #define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90 125 116 #define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0 117 + #define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200 126 118 127 119 /* 128 120 * Root Port Registers (PCI configuration space for the root port function) ··· 328 318 329 319 /** 330 320 * struct cdns_pcie_epf - Structure to hold info about endpoint function 321 + * @epf: Info about virtual functions attached to the physical function 331 322 * @epf_bar: reference to the pci_epf_bar for the six Base Address Registers 332 323 */ 333 324 struct cdns_pcie_epf { 325 + struct cdns_pcie_epf *epf; 334 326 struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS]; 335 327 }; 336 328
+18 -18
drivers/pci/controller/dwc/pcie-designware-ep.c
··· 125 125 return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap); 126 126 } 127 127 128 - static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, 128 + static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 129 129 struct pci_epf_header *hdr) 130 130 { 131 131 struct dw_pcie_ep *ep = epc_get_drvdata(epc); ··· 202 202 return 0; 203 203 } 204 204 205 - static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, 205 + static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 206 206 struct pci_epf_bar *epf_bar) 207 207 { 208 208 struct dw_pcie_ep *ep = epc_get_drvdata(epc); ··· 217 217 ep->epf_bar[bar] = NULL; 218 218 } 219 219 220 - static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, 220 + static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 221 221 struct pci_epf_bar *epf_bar) 222 222 { 223 223 int ret; ··· 276 276 return -EINVAL; 277 277 } 278 278 279 - static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, 279 + static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 280 280 phys_addr_t addr) 281 281 { 282 282 int ret; ··· 292 292 clear_bit(atu_index, ep->ob_window_map); 293 293 } 294 294 295 - static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, 296 - phys_addr_t addr, 297 - u64 pci_addr, size_t size) 295 + static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 296 + phys_addr_t addr, u64 pci_addr, size_t size) 298 297 { 299 298 int ret; 300 299 struct dw_pcie_ep *ep = epc_get_drvdata(epc); ··· 308 309 return 0; 309 310 } 310 311 311 - static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no) 312 + static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 312 313 { 313 314 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 314 315 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); ··· 332 333 return val; 333 334 } 334 335 335 - static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts) 336 + static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 337 + u8 interrupts) 336 338 { 337 339 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 338 340 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); ··· 358 358 return 0; 359 359 } 360 360 361 - static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no) 361 + static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 362 362 { 363 363 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 364 364 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); ··· 382 382 return val; 383 383 } 384 384 385 - static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts, 386 - enum pci_barno bir, u32 offset) 385 + static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 386 + u16 interrupts, enum pci_barno bir, u32 offset) 387 387 { 388 388 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 389 389 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); ··· 418 418 return 0; 419 419 } 420 420 421 - static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, 421 + static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 422 422 enum pci_epc_irq_type type, u16 interrupt_num) 423 423 { 424 424 struct dw_pcie_ep *ep = epc_get_drvdata(epc); ··· 450 450 } 451 451 452 452 static const struct pci_epc_features* 453 - dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no) 453 + dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 454 454 { 455 455 struct dw_pcie_ep *ep = epc_get_drvdata(epc); 456 456 ··· 525 525 aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1); 526 526 msg_addr = ((u64)msg_addr_upper) << 32 | 527 527 (msg_addr_lower & ~aligned_offset); 528 - ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, 528 + ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, 529 529 epc->mem->window.page_size); 530 530 if (ret) 531 531 return ret; 532 532 533 533 writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset); 534 534 535 - dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); 535 + dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys); 536 536 537 537 return 0; 538 538 } ··· 593 593 } 594 594 595 595 aligned_offset = msg_addr & (epc->mem->window.page_size - 1); 596 - ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, 596 + ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, 597 597 epc->mem->window.page_size); 598 598 if (ret) 599 599 return ret; 600 600 601 601 writel(msg_data, ep->msi_mem + aligned_offset); 602 602 603 - dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); 603 + dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys); 604 604 605 605 return 0; 606 606 }
+10 -9
drivers/pci/controller/pcie-rcar-ep.c
··· 159 159 return 0; 160 160 } 161 161 162 - static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn, 162 + static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn, 163 163 struct pci_epf_header *hdr) 164 164 { 165 165 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); ··· 195 195 return 0; 196 196 } 197 197 198 - static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, 198 + static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 199 199 struct pci_epf_bar *epf_bar) 200 200 { 201 201 int flags = epf_bar->flags | LAR_ENABLE | LAM_64BIT; ··· 246 246 return 0; 247 247 } 248 248 249 - static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, 249 + static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn, 250 250 struct pci_epf_bar *epf_bar) 251 251 { 252 252 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); ··· 259 259 clear_bit(atu_index + 1, ep->ib_window_map); 260 260 } 261 261 262 - static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 interrupts) 262 + static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, 263 + u8 interrupts) 263 264 { 264 265 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); 265 266 struct rcar_pcie *pcie = &ep->pcie; ··· 273 272 return 0; 274 273 } 275 274 276 - static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) 275 + static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) 277 276 { 278 277 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); 279 278 struct rcar_pcie *pcie = &ep->pcie; ··· 286 285 return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET); 287 286 } 288 287 289 - static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, 288 + static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, 290 289 phys_addr_t addr, u64 pci_addr, size_t size) 291 290 { 292 291 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); ··· 323 322 return 0; 324 323 } 325 324 326 - static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, 325 + static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, 327 326 phys_addr_t addr) 328 327 { 329 328 struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); ··· 404 403 return 0; 405 404 } 406 405 407 - static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, 406 + static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, 408 407 enum pci_epc_irq_type type, 409 408 u16 interrupt_num) 410 409 { ··· 452 451 }; 453 452 454 453 static const struct pci_epc_features* 455 - rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no) 454 + rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 456 455 { 457 456 return &rcar_pcie_epc_features; 458 457 }
+9 -9
drivers/pci/controller/pcie-rockchip-ep.c
··· 122 122 ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r)); 123 123 } 124 124 125 - static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, 125 + static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn, 126 126 struct pci_epf_header *hdr) 127 127 { 128 128 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); ··· 159 159 return 0; 160 160 } 161 161 162 - static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, 162 + static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn, 163 163 struct pci_epf_bar *epf_bar) 164 164 { 165 165 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); ··· 227 227 return 0; 228 228 } 229 229 230 - static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, 230 + static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn, 231 231 struct pci_epf_bar *epf_bar) 232 232 { 233 233 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); ··· 256 256 ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar)); 257 257 } 258 258 259 - static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, 259 + static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, 260 260 phys_addr_t addr, u64 pci_addr, 261 261 size_t size) 262 262 { ··· 284 284 return 0; 285 285 } 286 286 287 - static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, 287 + static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, 288 288 phys_addr_t addr) 289 289 { 290 290 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); ··· 308 308 clear_bit(r, &ep->ob_region_map); 309 309 } 310 310 311 - static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, 311 + static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, 312 312 u8 multi_msg_cap) 313 313 { 314 314 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); ··· 329 329 return 0; 330 330 } 331 331 332 - static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) 332 + static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) 333 333 { 334 334 struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); 335 335 struct rockchip_pcie *rockchip = &ep->rockchip; ··· 471 471 return 0; 472 472 } 473 473 474 - static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, 474 + static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, 475 475 enum pci_epc_irq_type type, 476 476 u16 interrupt_num) 477 477 { ··· 510 510 }; 511 511 512 512 static const struct pci_epc_features* 513 - rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no) 513 + rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 514 514 { 515 515 return &rockchip_pcie_epc_features; 516 516 }
+54 -35
drivers/pci/endpoint/functions/pci-epf-ntb.c
··· 87 87 88 88 struct epf_ntb_epc { 89 89 u8 func_no; 90 + u8 vfunc_no; 90 91 bool linkup; 91 92 bool is_msix; 92 93 int msix_bar; ··· 144 143 struct epf_ntb_epc *ntb_epc; 145 144 struct epf_ntb_ctrl *ctrl; 146 145 struct pci_epc *epc; 146 + u8 func_no, vfunc_no; 147 147 bool is_msix; 148 - u8 func_no; 149 148 int ret; 150 149 151 150 for (type = PRIMARY_INTERFACE; type <= SECONDARY_INTERFACE; type++) { 152 151 ntb_epc = ntb->epc[type]; 153 152 epc = ntb_epc->epc; 154 153 func_no = ntb_epc->func_no; 154 + vfunc_no = ntb_epc->vfunc_no; 155 155 is_msix = ntb_epc->is_msix; 156 156 ctrl = ntb_epc->reg; 157 157 if (link_up) ··· 160 158 else 161 159 ctrl->link_status &= ~LINK_STATUS_UP; 162 160 irq_type = is_msix ? PCI_EPC_IRQ_MSIX : PCI_EPC_IRQ_MSI; 163 - ret = pci_epc_raise_irq(epc, func_no, irq_type, 1); 161 + ret = pci_epc_raise_irq(epc, func_no, vfunc_no, irq_type, 1); 164 162 if (ret) { 165 163 dev_err(&epc->dev, 166 164 "%s intf: Failed to raise Link Up IRQ\n", ··· 240 238 enum pci_barno peer_barno; 241 239 struct epf_ntb_ctrl *ctrl; 242 240 phys_addr_t phys_addr; 241 + u8 func_no, vfunc_no; 243 242 struct pci_epc *epc; 244 243 u64 addr, size; 245 244 int ret = 0; 246 - u8 func_no; 247 245 248 246 ntb_epc = ntb->epc[type]; 249 247 epc = ntb_epc->epc; ··· 269 267 } 270 268 271 269 func_no = ntb_epc->func_no; 270 + vfunc_no = ntb_epc->vfunc_no; 272 271 273 - ret = pci_epc_map_addr(epc, func_no, phys_addr, addr, size); 272 + ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, addr, size); 274 273 if (ret) 275 274 dev_err(&epc->dev, 276 275 "%s intf: Failed to map memory window %d address\n", ··· 299 296 enum pci_barno peer_barno; 300 297 struct epf_ntb_ctrl *ctrl; 301 298 phys_addr_t phys_addr; 299 + u8 func_no, vfunc_no; 302 300 struct pci_epc *epc; 303 - u8 func_no; 304 301 305 302 ntb_epc = ntb->epc[type]; 306 303 epc = ntb_epc->epc; ··· 314 311 if (mw + NTB_MW_OFFSET == BAR_DB_MW1) 315 312 phys_addr += ctrl->mw1_offset; 316 313 func_no = ntb_epc->func_no; 314 + vfunc_no = ntb_epc->vfunc_no; 317 315 318 - pci_epc_unmap_addr(epc, func_no, phys_addr); 316 + pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr); 319 317 } 320 318 321 319 /** ··· 389 385 struct epf_ntb_ctrl *peer_ctrl; 390 386 enum pci_barno peer_barno; 391 387 phys_addr_t phys_addr; 388 + u8 func_no, vfunc_no; 392 389 struct pci_epc *epc; 393 - u8 func_no; 394 390 int ret, i; 395 391 396 392 ntb_epc = ntb->epc[type]; ··· 404 400 405 401 phys_addr = peer_epf_bar->phys_addr; 406 402 func_no = ntb_epc->func_no; 403 + vfunc_no = ntb_epc->vfunc_no; 407 404 408 - ret = pci_epc_map_msi_irq(epc, func_no, phys_addr, db_count, 405 + ret = pci_epc_map_msi_irq(epc, func_no, vfunc_no, phys_addr, db_count, 409 406 db_entry_size, &db_data, &db_offset); 410 407 if (ret) { 411 408 dev_err(&epc->dev, "%s intf: Failed to map MSI IRQ\n", ··· 496 491 u32 db_entry_size, msg_data; 497 492 enum pci_barno peer_barno; 498 493 phys_addr_t phys_addr; 494 + u8 func_no, vfunc_no; 499 495 struct pci_epc *epc; 500 496 size_t align; 501 497 u64 msg_addr; 502 - u8 func_no; 503 498 int ret, i; 504 499 505 500 ntb_epc = ntb->epc[type]; ··· 517 512 align = epc_features->align; 518 513 519 514 func_no = ntb_epc->func_no; 515 + vfunc_no = ntb_epc->vfunc_no; 520 516 db_entry_size = peer_ctrl->db_entry_size; 521 517 522 518 for (i = 0; i < db_count; i++) { 523 519 msg_addr = ALIGN_DOWN(msix_tbl[i].msg_addr, align); 524 520 msg_data = msix_tbl[i].msg_data; 525 - ret = pci_epc_map_addr(epc, func_no, phys_addr, msg_addr, 521 + ret = pci_epc_map_addr(epc, func_no, vfunc_no, phys_addr, msg_addr, 526 522 db_entry_size); 527 523 if (ret) { 528 524 dev_err(&epc->dev, ··· 592 586 struct pci_epf_bar *peer_epf_bar; 593 587 enum pci_barno peer_barno; 594 588 phys_addr_t phys_addr; 589 + u8 func_no, vfunc_no; 595 590 struct pci_epc *epc; 596 - u8 func_no; 597 591 598 592 ntb_epc = ntb->epc[type]; 599 593 epc = ntb_epc->epc; ··· 603 597 peer_epf_bar = &peer_ntb_epc->epf_bar[peer_barno]; 604 598 phys_addr = peer_epf_bar->phys_addr; 605 599 func_no = ntb_epc->func_no; 600 + vfunc_no = ntb_epc->vfunc_no; 606 601 607 - pci_epc_unmap_addr(epc, func_no, phys_addr); 602 + pci_epc_unmap_addr(epc, func_no, vfunc_no, phys_addr); 608 603 } 609 604 610 605 /** ··· 735 728 { 736 729 struct pci_epf_bar *epf_bar; 737 730 enum pci_barno barno; 731 + u8 func_no, vfunc_no; 738 732 struct pci_epc *epc; 739 - u8 func_no; 740 733 741 734 epc = ntb_epc->epc; 742 735 func_no = ntb_epc->func_no; 736 + vfunc_no = ntb_epc->vfunc_no; 743 737 barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD]; 744 738 epf_bar = &ntb_epc->epf_bar[barno]; 745 - pci_epc_clear_bar(epc, func_no, epf_bar); 739 + pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar); 746 740 } 747 741 748 742 /** ··· 783 775 struct pci_epf_bar *peer_epf_bar, *epf_bar; 784 776 enum pci_barno peer_barno, barno; 785 777 u32 peer_spad_offset; 778 + u8 func_no, vfunc_no; 786 779 struct pci_epc *epc; 787 780 struct device *dev; 788 - u8 func_no; 789 781 int ret; 790 782 791 783 dev = &ntb->epf->dev; ··· 798 790 barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD]; 799 791 epf_bar = &ntb_epc->epf_bar[barno]; 800 792 func_no = ntb_epc->func_no; 793 + vfunc_no = ntb_epc->vfunc_no; 801 794 epc = ntb_epc->epc; 802 795 803 796 peer_spad_offset = peer_ntb_epc->reg->spad_offset; ··· 807 798 epf_bar->barno = barno; 808 799 epf_bar->flags = PCI_BASE_ADDRESS_MEM_TYPE_32; 809 800 810 - ret = pci_epc_set_bar(epc, func_no, epf_bar); 801 + ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar); 811 802 if (ret) { 812 803 dev_err(dev, "%s intf: peer SPAD BAR set failed\n", 813 804 pci_epc_interface_string(type)); ··· 851 842 { 852 843 struct pci_epf_bar *epf_bar; 853 844 enum pci_barno barno; 845 + u8 func_no, vfunc_no; 854 846 struct pci_epc *epc; 855 - u8 func_no; 856 847 857 848 epc = ntb_epc->epc; 858 849 func_no = ntb_epc->func_no; 850 + vfunc_no = ntb_epc->vfunc_no; 859 851 barno = ntb_epc->epf_ntb_bar[BAR_CONFIG]; 860 852 epf_bar = &ntb_epc->epf_bar[barno]; 861 - pci_epc_clear_bar(epc, func_no, epf_bar); 853 + pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar); 862 854 } 863 855 864 856 /** ··· 896 886 { 897 887 struct pci_epf_bar *epf_bar; 898 888 enum pci_barno barno; 889 + u8 func_no, vfunc_no; 899 890 struct epf_ntb *ntb; 900 891 struct pci_epc *epc; 901 892 struct device *dev; 902 - u8 func_no; 903 893 int ret; 904 894 905 895 ntb = ntb_epc->epf_ntb; ··· 907 897 908 898 epc = ntb_epc->epc; 909 899 func_no = ntb_epc->func_no; 900 + vfunc_no = ntb_epc->vfunc_no; 910 901 barno = ntb_epc->epf_ntb_bar[BAR_CONFIG]; 911 902 epf_bar = &ntb_epc->epf_bar[barno]; 912 903 913 - ret = pci_epc_set_bar(epc, func_no, epf_bar); 904 + ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar); 914 905 if (ret) { 915 906 dev_err(dev, "%s inft: Config/Status/SPAD BAR set failed\n", 916 907 pci_epc_interface_string(ntb_epc->type)); ··· 1225 1214 struct pci_epf_bar *epf_bar; 1226 1215 enum epf_ntb_bar bar; 1227 1216 enum pci_barno barno; 1217 + u8 func_no, vfunc_no; 1228 1218 struct pci_epc *epc; 1229 - u8 func_no; 1230 1219 1231 1220 epc = ntb_epc->epc; 1232 1221 1233 1222 func_no = ntb_epc->func_no; 1223 + vfunc_no = ntb_epc->vfunc_no; 1234 1224 1235 1225 for (bar = BAR_DB_MW1; bar < BAR_MW4; bar++) { 1236 1226 barno = ntb_epc->epf_ntb_bar[bar]; 1237 1227 epf_bar = &ntb_epc->epf_bar[barno]; 1238 - pci_epc_clear_bar(epc, func_no, epf_bar); 1228 + pci_epc_clear_bar(epc, func_no, vfunc_no, epf_bar); 1239 1229 } 1240 1230 } 1241 1231 ··· 1275 1263 const struct pci_epc_features *epc_features; 1276 1264 bool msix_capable, msi_capable; 1277 1265 struct epf_ntb_epc *ntb_epc; 1266 + u8 func_no, vfunc_no; 1278 1267 struct pci_epc *epc; 1279 1268 struct device *dev; 1280 1269 u32 db_count; 1281 - u8 func_no; 1282 1270 int ret; 1283 1271 1284 1272 ntb_epc = ntb->epc[type]; ··· 1294 1282 } 1295 1283 1296 1284 func_no = ntb_epc->func_no; 1285 + vfunc_no = ntb_epc->vfunc_no; 1297 1286 1298 1287 db_count = ntb->db_count; 1299 1288 if (db_count > MAX_DB_COUNT) { ··· 1306 1293 epc = ntb_epc->epc; 1307 1294 1308 1295 if (msi_capable) { 1309 - ret = pci_epc_set_msi(epc, func_no, db_count); 1296 + ret = pci_epc_set_msi(epc, func_no, vfunc_no, db_count); 1310 1297 if (ret) { 1311 1298 dev_err(dev, "%s intf: MSI configuration failed\n", 1312 1299 pci_epc_interface_string(type)); ··· 1315 1302 } 1316 1303 1317 1304 if (msix_capable) { 1318 - ret = pci_epc_set_msix(epc, func_no, db_count, 1305 + ret = pci_epc_set_msix(epc, func_no, vfunc_no, db_count, 1319 1306 ntb_epc->msix_bar, 1320 1307 ntb_epc->msix_table_offset); 1321 1308 if (ret) { ··· 1436 1423 u32 num_mws, db_count; 1437 1424 enum epf_ntb_bar bar; 1438 1425 enum pci_barno barno; 1426 + u8 func_no, vfunc_no; 1439 1427 struct pci_epc *epc; 1440 1428 struct device *dev; 1441 1429 size_t align; 1442 1430 int ret, i; 1443 - u8 func_no; 1444 1431 u64 size; 1445 1432 1446 1433 ntb_epc = ntb->epc[type]; ··· 1450 1437 epc_features = ntb_epc->epc_features; 1451 1438 align = epc_features->align; 1452 1439 func_no = ntb_epc->func_no; 1440 + vfunc_no = ntb_epc->vfunc_no; 1453 1441 epc = ntb_epc->epc; 1454 1442 num_mws = ntb->num_mws; 1455 1443 db_count = ntb->db_count; ··· 1478 1464 barno = ntb_epc->epf_ntb_bar[bar]; 1479 1465 epf_bar = &ntb_epc->epf_bar[barno]; 1480 1466 1481 - ret = pci_epc_set_bar(epc, func_no, epf_bar); 1467 + ret = pci_epc_set_bar(epc, func_no, vfunc_no, epf_bar); 1482 1468 if (ret) { 1483 1469 dev_err(dev, "%s intf: DoorBell BAR set failed\n", 1484 1470 pci_epc_interface_string(type)); ··· 1550 1536 const struct pci_epc_features *epc_features; 1551 1537 struct pci_epf_bar *epf_bar; 1552 1538 struct epf_ntb_epc *ntb_epc; 1539 + u8 func_no, vfunc_no; 1553 1540 struct pci_epf *epf; 1554 1541 struct device *dev; 1555 - u8 func_no; 1556 1542 1557 1543 dev = &ntb->epf->dev; 1558 1544 ··· 1561 1547 return -ENOMEM; 1562 1548 1563 1549 epf = ntb->epf; 1550 + vfunc_no = epf->vfunc_no; 1564 1551 if (type == PRIMARY_INTERFACE) { 1565 1552 func_no = epf->func_no; 1566 1553 epf_bar = epf->bar; ··· 1573 1558 ntb_epc->linkup = false; 1574 1559 ntb_epc->epc = epc; 1575 1560 ntb_epc->func_no = func_no; 1561 + ntb_epc->vfunc_no = vfunc_no; 1576 1562 ntb_epc->type = type; 1577 1563 ntb_epc->epf_bar = epf_bar; 1578 1564 ntb_epc->epf_ntb = ntb; 1579 1565 1580 - epc_features = pci_epc_get_features(epc, func_no); 1566 + epc_features = pci_epc_get_features(epc, func_no, vfunc_no); 1581 1567 if (!epc_features) 1582 1568 return -EINVAL; 1583 1569 ntb_epc->epc_features = epc_features; ··· 1718 1702 enum pci_epc_interface_type type) 1719 1703 { 1720 1704 struct epf_ntb_epc *ntb_epc; 1705 + u8 func_no, vfunc_no; 1721 1706 struct pci_epc *epc; 1722 1707 struct pci_epf *epf; 1723 1708 struct device *dev; 1724 - u8 func_no; 1725 1709 int ret; 1726 1710 1727 1711 ntb_epc = ntb->epc[type]; ··· 1729 1713 dev = &epf->dev; 1730 1714 epc = ntb_epc->epc; 1731 1715 func_no = ntb_epc->func_no; 1716 + vfunc_no = ntb_epc->vfunc_no; 1732 1717 1733 1718 ret = epf_ntb_config_sspad_bar_set(ntb->epc[type]); 1734 1719 if (ret) { ··· 1759 1742 goto err_db_mw_bar_init; 1760 1743 } 1761 1744 1762 - ret = pci_epc_write_header(epc, func_no, epf->header); 1763 - if (ret) { 1764 - dev_err(dev, "%s intf: Configuration header write failed\n", 1765 - pci_epc_interface_string(type)); 1766 - goto err_write_header; 1745 + if (vfunc_no <= 1) { 1746 + ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header); 1747 + if (ret) { 1748 + dev_err(dev, "%s intf: Configuration header write failed\n", 1749 + pci_epc_interface_string(type)); 1750 + goto err_write_header; 1751 + } 1767 1752 } 1768 1753 1769 1754 INIT_DELAYED_WORK(&ntb->epc[type]->cmd_handler, epf_ntb_cmd_handler);
+42 -32
drivers/pci/endpoint/functions/pci-epf-test.c
··· 247 247 goto err; 248 248 } 249 249 250 - ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr, 251 - reg->size); 250 + ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr, 251 + reg->src_addr, reg->size); 252 252 if (ret) { 253 253 dev_err(dev, "Failed to map source address\n"); 254 254 reg->status = STATUS_SRC_ADDR_INVALID; ··· 263 263 goto err_src_map_addr; 264 264 } 265 265 266 - ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr, 267 - reg->size); 266 + ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr, 267 + reg->dst_addr, reg->size); 268 268 if (ret) { 269 269 dev_err(dev, "Failed to map destination address\n"); 270 270 reg->status = STATUS_DST_ADDR_INVALID; ··· 291 291 pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma); 292 292 293 293 err_map_addr: 294 - pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr); 294 + pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr); 295 295 296 296 err_dst_addr: 297 297 pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size); 298 298 299 299 err_src_map_addr: 300 - pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr); 300 + pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr); 301 301 302 302 err_src_addr: 303 303 pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size); ··· 331 331 goto err; 332 332 } 333 333 334 - ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr, 335 - reg->size); 334 + ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, 335 + reg->src_addr, reg->size); 336 336 if (ret) { 337 337 dev_err(dev, "Failed to map address\n"); 338 338 reg->status = STATUS_SRC_ADDR_INVALID; ··· 386 386 kfree(buf); 387 387 388 388 err_map_addr: 389 - pci_epc_unmap_addr(epc, epf->func_no, phys_addr); 389 + pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr); 390 390 391 391 err_addr: 392 392 pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size); ··· 419 419 goto err; 420 420 } 421 421 422 - ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr, 423 - reg->size); 422 + ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, 423 + reg->dst_addr, reg->size); 424 424 if (ret) { 425 425 dev_err(dev, "Failed to map address\n"); 426 426 reg->status = STATUS_DST_ADDR_INVALID; ··· 479 479 kfree(buf); 480 480 481 481 err_map_addr: 482 - pci_epc_unmap_addr(epc, epf->func_no, phys_addr); 482 + pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr); 483 483 484 484 err_addr: 485 485 pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size); ··· 501 501 502 502 switch (irq_type) { 503 503 case IRQ_TYPE_LEGACY: 504 - pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0); 504 + pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 505 + PCI_EPC_IRQ_LEGACY, 0); 505 506 break; 506 507 case IRQ_TYPE_MSI: 507 - pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq); 508 + pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 509 + PCI_EPC_IRQ_MSI, irq); 508 510 break; 509 511 case IRQ_TYPE_MSIX: 510 - pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq); 512 + pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 513 + PCI_EPC_IRQ_MSIX, irq); 511 514 break; 512 515 default: 513 516 dev_err(dev, "Failed to raise IRQ, unknown type\n"); ··· 545 542 546 543 if (command & COMMAND_RAISE_LEGACY_IRQ) { 547 544 reg->status = STATUS_IRQ_RAISED; 548 - pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0); 545 + pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 546 + PCI_EPC_IRQ_LEGACY, 0); 549 547 goto reset_handler; 550 548 } 551 549 ··· 584 580 } 585 581 586 582 if (command & COMMAND_RAISE_MSI_IRQ) { 587 - count = pci_epc_get_msi(epc, epf->func_no); 583 + count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no); 588 584 if (reg->irq_number > count || count <= 0) 589 585 goto reset_handler; 590 586 reg->status = STATUS_IRQ_RAISED; 591 - pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, 592 - reg->irq_number); 587 + pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 588 + PCI_EPC_IRQ_MSI, reg->irq_number); 593 589 goto reset_handler; 594 590 } 595 591 596 592 if (command & COMMAND_RAISE_MSIX_IRQ) { 597 - count = pci_epc_get_msix(epc, epf->func_no); 593 + count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no); 598 594 if (reg->irq_number > count || count <= 0) 599 595 goto reset_handler; 600 596 reg->status = STATUS_IRQ_RAISED; 601 - pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, 602 - reg->irq_number); 597 + pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 598 + PCI_EPC_IRQ_MSIX, reg->irq_number); 603 599 goto reset_handler; 604 600 } 605 601 ··· 622 618 epf_bar = &epf->bar[bar]; 623 619 624 620 if (epf_test->reg[bar]) { 625 - pci_epc_clear_bar(epc, epf->func_no, epf_bar); 621 + pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, 622 + epf_bar); 626 623 pci_epf_free_space(epf, epf_test->reg[bar], bar, 627 624 PRIMARY_INTERFACE); 628 625 } ··· 655 650 if (!!(epc_features->reserved_bar & (1 << bar))) 656 651 continue; 657 652 658 - ret = pci_epc_set_bar(epc, epf->func_no, epf_bar); 653 + ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, 654 + epf_bar); 659 655 if (ret) { 660 656 pci_epf_free_space(epf, epf_test->reg[bar], bar, 661 657 PRIMARY_INTERFACE); ··· 680 674 bool msi_capable = true; 681 675 int ret; 682 676 683 - epc_features = pci_epc_get_features(epc, epf->func_no); 677 + epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); 684 678 if (epc_features) { 685 679 msix_capable = epc_features->msix_capable; 686 680 msi_capable = epc_features->msi_capable; 687 681 } 688 682 689 - ret = pci_epc_write_header(epc, epf->func_no, header); 690 - if (ret) { 691 - dev_err(dev, "Configuration header write failed\n"); 692 - return ret; 683 + if (epf->vfunc_no <= 1) { 684 + ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header); 685 + if (ret) { 686 + dev_err(dev, "Configuration header write failed\n"); 687 + return ret; 688 + } 693 689 } 694 690 695 691 ret = pci_epf_test_set_bar(epf); ··· 699 691 return ret; 700 692 701 693 if (msi_capable) { 702 - ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts); 694 + ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no, 695 + epf->msi_interrupts); 703 696 if (ret) { 704 697 dev_err(dev, "MSI configuration failed\n"); 705 698 return ret; ··· 708 699 } 709 700 710 701 if (msix_capable) { 711 - ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts, 702 + ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no, 703 + epf->msix_interrupts, 712 704 epf_test->test_reg_bar, 713 705 epf_test->msix_table_offset); 714 706 if (ret) { ··· 842 832 if (WARN_ON_ONCE(!epc)) 843 833 return -EINVAL; 844 834 845 - epc_features = pci_epc_get_features(epc, epf->func_no); 835 + epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); 846 836 if (!epc_features) { 847 837 dev_err(&epf->dev, "epc_features not implemented\n"); 848 838 return -EOPNOTSUPP;
+24
drivers/pci/endpoint/pci-ep-cfs.c
··· 475 475 NULL, 476 476 }; 477 477 478 + static int pci_epf_vepf_link(struct config_item *epf_pf_item, 479 + struct config_item *epf_vf_item) 480 + { 481 + struct pci_epf_group *epf_vf_group = to_pci_epf_group(epf_vf_item); 482 + struct pci_epf_group *epf_pf_group = to_pci_epf_group(epf_pf_item); 483 + struct pci_epf *epf_pf = epf_pf_group->epf; 484 + struct pci_epf *epf_vf = epf_vf_group->epf; 485 + 486 + return pci_epf_add_vepf(epf_pf, epf_vf); 487 + } 488 + 489 + static void pci_epf_vepf_unlink(struct config_item *epf_pf_item, 490 + struct config_item *epf_vf_item) 491 + { 492 + struct pci_epf_group *epf_vf_group = to_pci_epf_group(epf_vf_item); 493 + struct pci_epf_group *epf_pf_group = to_pci_epf_group(epf_pf_item); 494 + struct pci_epf *epf_pf = epf_pf_group->epf; 495 + struct pci_epf *epf_vf = epf_vf_group->epf; 496 + 497 + pci_epf_remove_vepf(epf_pf, epf_vf); 498 + } 499 + 478 500 static void pci_epf_release(struct config_item *item) 479 501 { 480 502 struct pci_epf_group *epf_group = to_pci_epf_group(item); ··· 509 487 } 510 488 511 489 static struct configfs_item_operations pci_epf_ops = { 490 + .allow_link = pci_epf_vepf_link, 491 + .drop_link = pci_epf_vepf_unlink, 512 492 .release = pci_epf_release, 513 493 }; 514 494
+95 -39
drivers/pci/endpoint/pci-epc-core.c
··· 137 137 * @epc: the features supported by *this* EPC device will be returned 138 138 * @func_no: the features supported by the EPC device specific to the 139 139 * endpoint function with func_no will be returned 140 + * @vfunc_no: the features supported by the EPC device specific to the 141 + * virtual endpoint function with vfunc_no will be returned 140 142 * 141 143 * Invoke to get the features provided by the EPC which may be 142 144 * specific to an endpoint function. Returns pci_epc_features on success 143 145 * and NULL for any failures. 144 146 */ 145 147 const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, 146 - u8 func_no) 148 + u8 func_no, u8 vfunc_no) 147 149 { 148 150 const struct pci_epc_features *epc_features; 149 151 150 152 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 151 153 return NULL; 152 154 155 + if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 156 + return NULL; 157 + 153 158 if (!epc->ops->get_features) 154 159 return NULL; 155 160 156 161 mutex_lock(&epc->lock); 157 - epc_features = epc->ops->get_features(epc, func_no); 162 + epc_features = epc->ops->get_features(epc, func_no, vfunc_no); 158 163 mutex_unlock(&epc->lock); 159 164 160 165 return epc_features; ··· 210 205 /** 211 206 * pci_epc_raise_irq() - interrupt the host system 212 207 * @epc: the EPC device which has to interrupt the host 213 - * @func_no: the endpoint function number in the EPC device 208 + * @func_no: the physical endpoint function number in the EPC device 209 + * @vfunc_no: the virtual endpoint function number in the physical function 214 210 * @type: specify the type of interrupt; legacy, MSI or MSI-X 215 211 * @interrupt_num: the MSI or MSI-X interrupt number 216 212 * 217 213 * Invoke to raise an legacy, MSI or MSI-X interrupt 218 214 */ 219 - int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, 215 + int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 220 216 enum pci_epc_irq_type type, u16 interrupt_num) 221 217 { 222 218 int ret; ··· 225 219 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 226 220 return -EINVAL; 227 221 222 + if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 223 + return -EINVAL; 224 + 228 225 if (!epc->ops->raise_irq) 229 226 return 0; 230 227 231 228 mutex_lock(&epc->lock); 232 - ret = epc->ops->raise_irq(epc, func_no, type, interrupt_num); 229 + ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num); 233 230 mutex_unlock(&epc->lock); 234 231 235 232 return ret; ··· 244 235 * MSI data 245 236 * @epc: the EPC device which has the MSI capability 246 237 * @func_no: the physical endpoint function number in the EPC device 238 + * @vfunc_no: the virtual endpoint function number in the physical function 247 239 * @phys_addr: the physical address of the outbound region 248 240 * @interrupt_num: the MSI interrupt number 249 241 * @entry_size: Size of Outbound address region for each interrupt ··· 260 250 * physical address (in outbound region) of the other interface to ring 261 251 * doorbell. 262 252 */ 263 - int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, phys_addr_t phys_addr, 264 - u8 interrupt_num, u32 entry_size, u32 *msi_data, 265 - u32 *msi_addr_offset) 253 + int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 254 + phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size, 255 + u32 *msi_data, u32 *msi_addr_offset) 266 256 { 267 257 int ret; 268 258 269 259 if (IS_ERR_OR_NULL(epc)) 270 260 return -EINVAL; 271 261 262 + if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 263 + return -EINVAL; 264 + 272 265 if (!epc->ops->map_msi_irq) 273 266 return -EINVAL; 274 267 275 268 mutex_lock(&epc->lock); 276 - ret = epc->ops->map_msi_irq(epc, func_no, phys_addr, interrupt_num, 277 - entry_size, msi_data, msi_addr_offset); 269 + ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr, 270 + interrupt_num, entry_size, msi_data, 271 + msi_addr_offset); 278 272 mutex_unlock(&epc->lock); 279 273 280 274 return ret; ··· 288 274 /** 289 275 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated 290 276 * @epc: the EPC device to which MSI interrupts was requested 291 - * @func_no: the endpoint function number in the EPC device 277 + * @func_no: the physical endpoint function number in the EPC device 278 + * @vfunc_no: the virtual endpoint function number in the physical function 292 279 * 293 280 * Invoke to get the number of MSI interrupts allocated by the RC 294 281 */ 295 - int pci_epc_get_msi(struct pci_epc *epc, u8 func_no) 282 + int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 296 283 { 297 284 int interrupt; 298 285 299 286 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 300 287 return 0; 301 288 289 + if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 290 + return 0; 291 + 302 292 if (!epc->ops->get_msi) 303 293 return 0; 304 294 305 295 mutex_lock(&epc->lock); 306 - interrupt = epc->ops->get_msi(epc, func_no); 296 + interrupt = epc->ops->get_msi(epc, func_no, vfunc_no); 307 297 mutex_unlock(&epc->lock); 308 298 309 299 if (interrupt < 0) ··· 322 304 /** 323 305 * pci_epc_set_msi() - set the number of MSI interrupt numbers required 324 306 * @epc: the EPC device on which MSI has to be configured 325 - * @func_no: the endpoint function number in the EPC device 307 + * @func_no: the physical endpoint function number in the EPC device 308 + * @vfunc_no: the virtual endpoint function number in the physical function 326 309 * @interrupts: number of MSI interrupts required by the EPF 327 310 * 328 311 * Invoke to set the required number of MSI interrupts. 329 312 */ 330 - int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts) 313 + int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts) 331 314 { 332 315 int ret; 333 316 u8 encode_int; ··· 337 318 interrupts > 32) 338 319 return -EINVAL; 339 320 321 + if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 322 + return -EINVAL; 323 + 340 324 if (!epc->ops->set_msi) 341 325 return 0; 342 326 343 327 encode_int = order_base_2(interrupts); 344 328 345 329 mutex_lock(&epc->lock); 346 - ret = epc->ops->set_msi(epc, func_no, encode_int); 330 + ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int); 347 331 mutex_unlock(&epc->lock); 348 332 349 333 return ret; ··· 356 334 /** 357 335 * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated 358 336 * @epc: the EPC device to which MSI-X interrupts was requested 359 - * @func_no: the endpoint function number in the EPC device 337 + * @func_no: the physical endpoint function number in the EPC device 338 + * @vfunc_no: the virtual endpoint function number in the physical function 360 339 * 361 340 * Invoke to get the number of MSI-X interrupts allocated by the RC 362 341 */ 363 - int pci_epc_get_msix(struct pci_epc *epc, u8 func_no) 342 + int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 364 343 { 365 344 int interrupt; 366 345 367 346 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 368 347 return 0; 369 348 349 + if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 350 + return 0; 351 + 370 352 if (!epc->ops->get_msix) 371 353 return 0; 372 354 373 355 mutex_lock(&epc->lock); 374 - interrupt = epc->ops->get_msix(epc, func_no); 356 + interrupt = epc->ops->get_msix(epc, func_no, vfunc_no); 375 357 mutex_unlock(&epc->lock); 376 358 377 359 if (interrupt < 0) ··· 388 362 /** 389 363 * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required 390 364 * @epc: the EPC device on which MSI-X has to be configured 391 - * @func_no: the endpoint function number in the EPC device 365 + * @func_no: the physical endpoint function number in the EPC device 366 + * @vfunc_no: the virtual endpoint function number in the physical function 392 367 * @interrupts: number of MSI-X interrupts required by the EPF 393 368 * @bir: BAR where the MSI-X table resides 394 369 * @offset: Offset pointing to the start of MSI-X table 395 370 * 396 371 * Invoke to set the required number of MSI-X interrupts. 397 372 */ 398 - int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts, 399 - enum pci_barno bir, u32 offset) 373 + int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 374 + u16 interrupts, enum pci_barno bir, u32 offset) 400 375 { 401 376 int ret; 402 377 ··· 405 378 interrupts < 1 || interrupts > 2048) 406 379 return -EINVAL; 407 380 381 + if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 382 + return -EINVAL; 383 + 408 384 if (!epc->ops->set_msix) 409 385 return 0; 410 386 411 387 mutex_lock(&epc->lock); 412 - ret = epc->ops->set_msix(epc, func_no, interrupts - 1, bir, offset); 388 + ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir, 389 + offset); 413 390 mutex_unlock(&epc->lock); 414 391 415 392 return ret; ··· 423 392 /** 424 393 * pci_epc_unmap_addr() - unmap CPU address from PCI address 425 394 * @epc: the EPC device on which address is allocated 426 - * @func_no: the endpoint function number in the EPC device 395 + * @func_no: the physical endpoint function number in the EPC device 396 + * @vfunc_no: the virtual endpoint function number in the physical function 427 397 * @phys_addr: physical address of the local system 428 398 * 429 399 * Invoke to unmap the CPU address from PCI address. 430 400 */ 431 - void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, 401 + void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 432 402 phys_addr_t phys_addr) 433 403 { 434 404 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 405 + return; 406 + 407 + if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 435 408 return; 436 409 437 410 if (!epc->ops->unmap_addr) 438 411 return; 439 412 440 413 mutex_lock(&epc->lock); 441 - epc->ops->unmap_addr(epc, func_no, phys_addr); 414 + epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr); 442 415 mutex_unlock(&epc->lock); 443 416 } 444 417 EXPORT_SYMBOL_GPL(pci_epc_unmap_addr); ··· 450 415 /** 451 416 * pci_epc_map_addr() - map CPU address to PCI address 452 417 * @epc: the EPC device on which address is allocated 453 - * @func_no: the endpoint function number in the EPC device 418 + * @func_no: the physical endpoint function number in the EPC device 419 + * @vfunc_no: the virtual endpoint function number in the physical function 454 420 * @phys_addr: physical address of the local system 455 421 * @pci_addr: PCI address to which the physical address should be mapped 456 422 * @size: the size of the allocation 457 423 * 458 424 * Invoke to map CPU address with PCI address. 459 425 */ 460 - int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, 426 + int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 461 427 phys_addr_t phys_addr, u64 pci_addr, size_t size) 462 428 { 463 429 int ret; ··· 466 430 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 467 431 return -EINVAL; 468 432 433 + if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 434 + return -EINVAL; 435 + 469 436 if (!epc->ops->map_addr) 470 437 return 0; 471 438 472 439 mutex_lock(&epc->lock); 473 - ret = epc->ops->map_addr(epc, func_no, phys_addr, pci_addr, size); 440 + ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr, 441 + size); 474 442 mutex_unlock(&epc->lock); 475 443 476 444 return ret; ··· 484 444 /** 485 445 * pci_epc_clear_bar() - reset the BAR 486 446 * @epc: the EPC device for which the BAR has to be cleared 487 - * @func_no: the endpoint function number in the EPC device 447 + * @func_no: the physical endpoint function number in the EPC device 448 + * @vfunc_no: the virtual endpoint function number in the physical function 488 449 * @epf_bar: the struct epf_bar that contains the BAR information 489 450 * 490 451 * Invoke to reset the BAR of the endpoint device. 491 452 */ 492 - void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, 453 + void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 493 454 struct pci_epf_bar *epf_bar) 494 455 { 495 456 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || ··· 498 457 epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) 499 458 return; 500 459 460 + if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 461 + return; 462 + 501 463 if (!epc->ops->clear_bar) 502 464 return; 503 465 504 466 mutex_lock(&epc->lock); 505 - epc->ops->clear_bar(epc, func_no, epf_bar); 467 + epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar); 506 468 mutex_unlock(&epc->lock); 507 469 } 508 470 EXPORT_SYMBOL_GPL(pci_epc_clear_bar); ··· 513 469 /** 514 470 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space 515 471 * @epc: the EPC device on which BAR has to be configured 516 - * @func_no: the endpoint function number in the EPC device 472 + * @func_no: the physical endpoint function number in the EPC device 473 + * @vfunc_no: the virtual endpoint function number in the physical function 517 474 * @epf_bar: the struct epf_bar that contains the BAR information 518 475 * 519 476 * Invoke to configure the BAR of the endpoint device. 520 477 */ 521 - int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, 478 + int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 522 479 struct pci_epf_bar *epf_bar) 523 480 { 524 481 int ret; ··· 534 489 !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))) 535 490 return -EINVAL; 536 491 492 + if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 493 + return -EINVAL; 494 + 537 495 if (!epc->ops->set_bar) 538 496 return 0; 539 497 540 498 mutex_lock(&epc->lock); 541 - ret = epc->ops->set_bar(epc, func_no, epf_bar); 499 + ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar); 542 500 mutex_unlock(&epc->lock); 543 501 544 502 return ret; ··· 551 503 /** 552 504 * pci_epc_write_header() - write standard configuration header 553 505 * @epc: the EPC device to which the configuration header should be written 554 - * @func_no: the endpoint function number in the EPC device 506 + * @func_no: the physical endpoint function number in the EPC device 507 + * @vfunc_no: the virtual endpoint function number in the physical function 555 508 * @header: standard configuration header fields 556 509 * 557 510 * Invoke to write the configuration header to the endpoint controller. Every ··· 560 511 * configuration header would be written. The callback function should write 561 512 * the header fields to this dedicated location. 562 513 */ 563 - int pci_epc_write_header(struct pci_epc *epc, u8 func_no, 514 + int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 564 515 struct pci_epf_header *header) 565 516 { 566 517 int ret; ··· 568 519 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 569 520 return -EINVAL; 570 521 522 + if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 523 + return -EINVAL; 524 + 525 + /* Only Virtual Function #1 has deviceID */ 526 + if (vfunc_no > 1) 527 + return -EINVAL; 528 + 571 529 if (!epc->ops->write_header) 572 530 return 0; 573 531 574 532 mutex_lock(&epc->lock); 575 - ret = epc->ops->write_header(epc, func_no, header); 533 + ret = epc->ops->write_header(epc, func_no, vfunc_no, header); 576 534 mutex_unlock(&epc->lock); 577 535 578 536 return ret; ··· 604 548 u32 func_no; 605 549 int ret = 0; 606 550 607 - if (IS_ERR_OR_NULL(epc)) 551 + if (IS_ERR_OR_NULL(epc) || epf->is_vf) 608 552 return -EINVAL; 609 553 610 554 if (type == PRIMARY_INTERFACE && epf->epc)
+144 -2
drivers/pci/endpoint/pci-epf-core.c
··· 62 62 */ 63 63 void pci_epf_unbind(struct pci_epf *epf) 64 64 { 65 + struct pci_epf *epf_vf; 66 + 65 67 if (!epf->driver) { 66 68 dev_WARN(&epf->dev, "epf device not bound to driver\n"); 67 69 return; 68 70 } 69 71 70 72 mutex_lock(&epf->lock); 71 - epf->driver->ops->unbind(epf); 73 + list_for_each_entry(epf_vf, &epf->pci_vepf, list) { 74 + if (epf_vf->is_bound) 75 + epf_vf->driver->ops->unbind(epf_vf); 76 + } 77 + if (epf->is_bound) 78 + epf->driver->ops->unbind(epf); 72 79 mutex_unlock(&epf->lock); 73 80 module_put(epf->driver->owner); 74 81 } ··· 90 83 */ 91 84 int pci_epf_bind(struct pci_epf *epf) 92 85 { 86 + struct device *dev = &epf->dev; 87 + struct pci_epf *epf_vf; 88 + u8 func_no, vfunc_no; 89 + struct pci_epc *epc; 93 90 int ret; 94 91 95 92 if (!epf->driver) { 96 - dev_WARN(&epf->dev, "epf device not bound to driver\n"); 93 + dev_WARN(dev, "epf device not bound to driver\n"); 97 94 return -EINVAL; 98 95 } 99 96 ··· 105 94 return -EAGAIN; 106 95 107 96 mutex_lock(&epf->lock); 97 + list_for_each_entry(epf_vf, &epf->pci_vepf, list) { 98 + vfunc_no = epf_vf->vfunc_no; 99 + 100 + if (vfunc_no < 1) { 101 + dev_err(dev, "Invalid virtual function number\n"); 102 + ret = -EINVAL; 103 + goto ret; 104 + } 105 + 106 + epc = epf->epc; 107 + func_no = epf->func_no; 108 + if (!IS_ERR_OR_NULL(epc)) { 109 + if (!epc->max_vfs) { 110 + dev_err(dev, "No support for virt function\n"); 111 + ret = -EINVAL; 112 + goto ret; 113 + } 114 + 115 + if (vfunc_no > epc->max_vfs[func_no]) { 116 + dev_err(dev, "PF%d: Exceeds max vfunc number\n", 117 + func_no); 118 + ret = -EINVAL; 119 + goto ret; 120 + } 121 + } 122 + 123 + epc = epf->sec_epc; 124 + func_no = epf->sec_epc_func_no; 125 + if (!IS_ERR_OR_NULL(epc)) { 126 + if (!epc->max_vfs) { 127 + dev_err(dev, "No support for virt function\n"); 128 + ret = -EINVAL; 129 + goto ret; 130 + } 131 + 132 + if (vfunc_no > epc->max_vfs[func_no]) { 133 + dev_err(dev, "PF%d: Exceeds max vfunc number\n", 134 + func_no); 135 + ret = -EINVAL; 136 + goto ret; 137 + } 138 + } 139 + 140 + epf_vf->func_no = epf->func_no; 141 + epf_vf->sec_epc_func_no = epf->sec_epc_func_no; 142 + epf_vf->epc = epf->epc; 143 + epf_vf->sec_epc = epf->sec_epc; 144 + ret = epf_vf->driver->ops->bind(epf_vf); 145 + if (ret) 146 + goto ret; 147 + epf_vf->is_bound = true; 148 + } 149 + 108 150 ret = epf->driver->ops->bind(epf); 151 + if (ret) 152 + goto ret; 153 + epf->is_bound = true; 154 + 109 155 mutex_unlock(&epf->lock); 156 + return 0; 157 + 158 + ret: 159 + mutex_unlock(&epf->lock); 160 + pci_epf_unbind(epf); 110 161 111 162 return ret; 112 163 } 113 164 EXPORT_SYMBOL_GPL(pci_epf_bind); 165 + 166 + /** 167 + * pci_epf_add_vepf() - associate virtual EP function to physical EP function 168 + * @epf_pf: the physical EP function to which the virtual EP function should be 169 + * associated 170 + * @epf_vf: the virtual EP function to be added 171 + * 172 + * A physical endpoint function can be associated with multiple virtual 173 + * endpoint functions. Invoke pci_epf_add_epf() to add a virtual PCI endpoint 174 + * function to a physical PCI endpoint function. 175 + */ 176 + int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf) 177 + { 178 + u32 vfunc_no; 179 + 180 + if (IS_ERR_OR_NULL(epf_pf) || IS_ERR_OR_NULL(epf_vf)) 181 + return -EINVAL; 182 + 183 + if (epf_pf->epc || epf_vf->epc || epf_vf->epf_pf) 184 + return -EBUSY; 185 + 186 + if (epf_pf->sec_epc || epf_vf->sec_epc) 187 + return -EBUSY; 188 + 189 + mutex_lock(&epf_pf->lock); 190 + vfunc_no = find_first_zero_bit(&epf_pf->vfunction_num_map, 191 + BITS_PER_LONG); 192 + if (vfunc_no >= BITS_PER_LONG) { 193 + mutex_unlock(&epf_pf->lock); 194 + return -EINVAL; 195 + } 196 + 197 + set_bit(vfunc_no, &epf_pf->vfunction_num_map); 198 + epf_vf->vfunc_no = vfunc_no; 199 + 200 + epf_vf->epf_pf = epf_pf; 201 + epf_vf->is_vf = true; 202 + 203 + list_add_tail(&epf_vf->list, &epf_pf->pci_vepf); 204 + mutex_unlock(&epf_pf->lock); 205 + 206 + return 0; 207 + } 208 + EXPORT_SYMBOL_GPL(pci_epf_add_vepf); 209 + 210 + /** 211 + * pci_epf_remove_vepf() - remove virtual EP function from physical EP function 212 + * @epf_pf: the physical EP function from which the virtual EP function should 213 + * be removed 214 + * @epf_vf: the virtual EP function to be removed 215 + * 216 + * Invoke to remove a virtual endpoint function from the physcial endpoint 217 + * function. 218 + */ 219 + void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf) 220 + { 221 + if (IS_ERR_OR_NULL(epf_pf) || IS_ERR_OR_NULL(epf_vf)) 222 + return; 223 + 224 + mutex_lock(&epf_pf->lock); 225 + clear_bit(epf_vf->vfunc_no, &epf_pf->vfunction_num_map); 226 + list_del(&epf_vf->list); 227 + mutex_unlock(&epf_pf->lock); 228 + } 229 + EXPORT_SYMBOL_GPL(pci_epf_remove_vepf); 114 230 115 231 /** 116 232 * pci_epf_free_space() - free the allocated PCI EPF register space ··· 454 316 kfree(epf); 455 317 return ERR_PTR(-ENOMEM); 456 318 } 319 + 320 + /* VFs are numbered starting with 1. So set BIT(0) by default */ 321 + epf->vfunction_num_map = 1; 322 + INIT_LIST_HEAD(&epf->pci_vepf); 457 323 458 324 dev = &epf->dev; 459 325 device_initialize(dev);
+31 -26
include/linux/pci-epc.h
··· 62 62 * @owner: the module owner containing the ops 63 63 */ 64 64 struct pci_epc_ops { 65 - int (*write_header)(struct pci_epc *epc, u8 func_no, 65 + int (*write_header)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 66 66 struct pci_epf_header *hdr); 67 - int (*set_bar)(struct pci_epc *epc, u8 func_no, 67 + int (*set_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 68 68 struct pci_epf_bar *epf_bar); 69 - void (*clear_bar)(struct pci_epc *epc, u8 func_no, 69 + void (*clear_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 70 70 struct pci_epf_bar *epf_bar); 71 - int (*map_addr)(struct pci_epc *epc, u8 func_no, 71 + int (*map_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 72 72 phys_addr_t addr, u64 pci_addr, size_t size); 73 - void (*unmap_addr)(struct pci_epc *epc, u8 func_no, 73 + void (*unmap_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 74 74 phys_addr_t addr); 75 - int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts); 76 - int (*get_msi)(struct pci_epc *epc, u8 func_no); 77 - int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts, 78 - enum pci_barno, u32 offset); 79 - int (*get_msix)(struct pci_epc *epc, u8 func_no); 80 - int (*raise_irq)(struct pci_epc *epc, u8 func_no, 75 + int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 76 + u8 interrupts); 77 + int (*get_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no); 78 + int (*set_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 79 + u16 interrupts, enum pci_barno, u32 offset); 80 + int (*get_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no); 81 + int (*raise_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 81 82 enum pci_epc_irq_type type, u16 interrupt_num); 82 - int (*map_msi_irq)(struct pci_epc *epc, u8 func_no, 83 + int (*map_msi_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 83 84 phys_addr_t phys_addr, u8 interrupt_num, 84 85 u32 entry_size, u32 *msi_data, 85 86 u32 *msi_addr_offset); 86 87 int (*start)(struct pci_epc *epc); 87 88 void (*stop)(struct pci_epc *epc); 88 89 const struct pci_epc_features* (*get_features)(struct pci_epc *epc, 89 - u8 func_no); 90 + u8 func_no, u8 vfunc_no); 90 91 struct module *owner; 91 92 }; 92 93 ··· 129 128 * single window. 130 129 * @num_windows: number of windows supported by device 131 130 * @max_functions: max number of functions that can be configured in this EPC 131 + * @max_vfs: Array indicating the maximum number of virtual functions that can 132 + * be associated with each physical function 132 133 * @group: configfs group representing the PCI EPC device 133 134 * @lock: mutex to protect pci_epc ops 134 135 * @function_num_map: bitmap to manage physical function number ··· 144 141 struct pci_epc_mem *mem; 145 142 unsigned int num_windows; 146 143 u8 max_functions; 144 + u8 *max_vfs; 147 145 struct config_group *group; 148 146 /* mutex to protect against concurrent access of EP controller */ 149 147 struct mutex lock; ··· 212 208 void pci_epc_init_notify(struct pci_epc *epc); 213 209 void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf, 214 210 enum pci_epc_interface_type type); 215 - int pci_epc_write_header(struct pci_epc *epc, u8 func_no, 211 + int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 216 212 struct pci_epf_header *hdr); 217 - int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, 213 + int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 218 214 struct pci_epf_bar *epf_bar); 219 - void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, 215 + void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 220 216 struct pci_epf_bar *epf_bar); 221 - int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, 217 + int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 222 218 phys_addr_t phys_addr, 223 219 u64 pci_addr, size_t size); 224 - void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, 220 + void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 225 221 phys_addr_t phys_addr); 226 - int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts); 227 - int pci_epc_get_msi(struct pci_epc *epc, u8 func_no); 228 - int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts, 229 - enum pci_barno, u32 offset); 230 - int pci_epc_get_msix(struct pci_epc *epc, u8 func_no); 231 - int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, 222 + int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 223 + u8 interrupts); 224 + int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no); 225 + int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 226 + u16 interrupts, enum pci_barno, u32 offset); 227 + int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no); 228 + int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 232 229 phys_addr_t phys_addr, u8 interrupt_num, 233 230 u32 entry_size, u32 *msi_data, u32 *msi_addr_offset); 234 - int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, 231 + int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 235 232 enum pci_epc_irq_type type, u16 interrupt_num); 236 233 int pci_epc_start(struct pci_epc *epc); 237 234 void pci_epc_stop(struct pci_epc *epc); 238 235 const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, 239 - u8 func_no); 236 + u8 func_no, u8 vfunc_no); 240 237 enum pci_barno 241 238 pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features); 242 239 enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
+15 -1
include/linux/pci-epf.h
··· 121 121 * @bar: represents the BAR of EPF device 122 122 * @msi_interrupts: number of MSI interrupts required by this function 123 123 * @msix_interrupts: number of MSI-X interrupts required by this function 124 - * @func_no: unique function number within this endpoint device 124 + * @func_no: unique (physical) function number within this endpoint device 125 + * @vfunc_no: unique virtual function number within a physical function 125 126 * @epc: the EPC device to which this EPF device is bound 127 + * @epf_pf: the physical EPF device to which this virtual EPF device is bound 126 128 * @driver: the EPF driver to which this EPF device is bound 127 129 * @list: to add pci_epf as a list of PCI endpoint functions to pci_epc 128 130 * @nb: notifier block to notify EPF of any EPC events (like linkup) ··· 135 133 * @sec_epc_bar: represents the BAR of EPF device associated with secondary EPC 136 134 * @sec_epc_func_no: unique (physical) function number within the secondary EPC 137 135 * @group: configfs group associated with the EPF device 136 + * @is_bound: indicates if bind notification to function driver has been invoked 137 + * @is_vf: true - virtual function, false - physical function 138 + * @vfunction_num_map: bitmap to manage virtual function number 139 + * @pci_vepf: list of virtual endpoint functions associated with this function 138 140 */ 139 141 struct pci_epf { 140 142 struct device dev; ··· 148 142 u8 msi_interrupts; 149 143 u16 msix_interrupts; 150 144 u8 func_no; 145 + u8 vfunc_no; 151 146 152 147 struct pci_epc *epc; 148 + struct pci_epf *epf_pf; 153 149 struct pci_epf_driver *driver; 154 150 struct list_head list; 155 151 struct notifier_block nb; ··· 164 156 struct pci_epf_bar sec_epc_bar[6]; 165 157 u8 sec_epc_func_no; 166 158 struct config_group *group; 159 + unsigned int is_bound; 160 + unsigned int is_vf; 161 + unsigned long vfunction_num_map; 162 + struct list_head pci_vepf; 167 163 }; 168 164 169 165 /** ··· 211 199 void pci_epf_unbind(struct pci_epf *epf); 212 200 struct config_group *pci_epf_type_add_cfs(struct pci_epf *epf, 213 201 struct config_group *group); 202 + int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf); 203 + void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf); 214 204 #endif /* __LINUX_PCI_EPF_H */