Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'pci/endpoint'

- Add pci_epc_function_is_valid() to avoid repeating common validation
checks (Damien Le Moal)

- Skip attempts to allocate from endpoint controller memory window if the
requested size is larger than the window (Damien Le Moal)

- Add and document pci_epc_mem_map() and pci_epc_mem_unmap() to handle
controller-specific size and alignment constraints, and add test cases to
the endpoint test driver (Damien Le Moal)

- Implement dwc pci_epc_ops.align_addr() so pci_epc_mem_map() can observe
DWC-specific alignment requirements (Damien Le Moal)

- Synchronously cancel command handler work in endpoint test before
cleaning up DMA and BARs (Damien Le Moal)

- Respect endpoint page size in dw_pcie_ep_align_addr() (Niklas Cassel)

- Use dw_pcie_ep_align_addr() in dw_pcie_ep_raise_msi_irq() and
dw_pcie_ep_raise_msix_irq() instead of open coding the equivalent (Niklas
Cassel)

- Remove superfluous 'return' from pci_epf_test_clean_dma_chan() (Wang
Jiang)

- Avoid NULL dereference if Modem Host Interface Endpoint lacks 'mmio' DT
property (Zhongqiu Han)

- Release PCI domain ID of Endpoint controller parent (not controller
itself) and before unregistering the controller, to avoid use-after-free
(Zijun Hu)

- Clear secondary (not primary) EPC in pci_epc_remove_epf() when removing
the secondary controller associated with an NTB (Zijun Hu)

- Fix pci_epc_map map_size kerneldoc (Rick Wertenbroek)

* pci/endpoint:
PCI: endpoint: Fix pci_epc_map map_size kerneldoc string
PCI: endpoint: Clear secondary (not primary) EPC in pci_epc_remove_epf()
PCI: endpoint: Fix PCI domain ID release in pci_epc_destroy()
PCI: endpoint: epf-mhi: Avoid NULL dereference if DT lacks 'mmio'
PCI: endpoint: Remove surplus return statement from pci_epf_test_clean_dma_chan()
PCI: dwc: ep: Use align addr function for dw_pcie_ep_raise_{msi,msix}_irq()
PCI: endpoint: test: Synchronously cancel command handler work
PCI: dwc: endpoint: Implement the pci_epc_ops::align_addr() operation
PCI: endpoint: test: Use pci_epc_mem_map/unmap()
PCI: endpoint: Update documentation
PCI: endpoint: Introduce pci_epc_mem_map()/unmap()
PCI: endpoint: Improve pci_epc_mem_alloc_addr()
PCI: endpoint: Introduce pci_epc_function_is_valid()

+439 -251
+29
Documentation/PCI/endpoint/pci-endpoint.rst
··· 117 117 The PCI endpoint function driver should use pci_epc_mem_free_addr() to 118 118 free the memory space allocated using pci_epc_mem_alloc_addr(). 119 119 120 + * pci_epc_map_addr() 121 + 122 + A PCI endpoint function driver should use pci_epc_map_addr() to map to a RC 123 + PCI address the CPU address of local memory obtained with 124 + pci_epc_mem_alloc_addr(). 125 + 126 + * pci_epc_unmap_addr() 127 + 128 + A PCI endpoint function driver should use pci_epc_unmap_addr() to unmap the 129 + CPU address of local memory mapped to a RC address with pci_epc_map_addr(). 130 + 131 + * pci_epc_mem_map() 132 + 133 + A PCI endpoint controller may impose constraints on the RC PCI addresses that 134 + can be mapped. The function pci_epc_mem_map() allows endpoint function 135 + drivers to allocate and map controller memory while handling such 136 + constraints. This function will determine the size of the memory that must be 137 + allocated with pci_epc_mem_alloc_addr() for successfully mapping a RC PCI 138 + address range. This function will also indicate the size of the PCI address 139 + range that was actually mapped, which can be less than the requested size, as 140 + well as the offset into the allocated memory to use for accessing the mapped 141 + RC PCI address range. 142 + 143 + * pci_epc_mem_unmap() 144 + 145 + A PCI endpoint function driver can use pci_epc_mem_unmap() to unmap and free 146 + controller memory that was allocated and mapped using pci_epc_mem_map(). 147 + 148 + 120 149 Other EPC APIs 121 150 ~~~~~~~~~~~~~~ 122 151
+25 -10
drivers/pci/controller/dwc/pcie-designware-ep.c
··· 268 268 return -EINVAL; 269 269 } 270 270 271 + static u64 dw_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr, 272 + size_t *pci_size, size_t *offset) 273 + { 274 + struct dw_pcie_ep *ep = epc_get_drvdata(epc); 275 + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 276 + u64 mask = pci->region_align - 1; 277 + size_t ofst = pci_addr & mask; 278 + 279 + *pci_size = ALIGN(ofst + *pci_size, epc->mem->window.page_size); 280 + *offset = ofst; 281 + 282 + return pci_addr & ~mask; 283 + } 284 + 271 285 static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 272 286 phys_addr_t addr) 273 287 { ··· 458 444 .write_header = dw_pcie_ep_write_header, 459 445 .set_bar = dw_pcie_ep_set_bar, 460 446 .clear_bar = dw_pcie_ep_clear_bar, 447 + .align_addr = dw_pcie_ep_align_addr, 461 448 .map_addr = dw_pcie_ep_map_addr, 462 449 .unmap_addr = dw_pcie_ep_unmap_addr, 463 450 .set_msi = dw_pcie_ep_set_msi, ··· 503 488 u32 msg_addr_lower, msg_addr_upper, reg; 504 489 struct dw_pcie_ep_func *ep_func; 505 490 struct pci_epc *epc = ep->epc; 506 - unsigned int aligned_offset; 491 + size_t map_size = sizeof(u32); 492 + size_t offset; 507 493 u16 msg_ctrl, msg_data; 508 494 bool has_upper; 509 495 u64 msg_addr; ··· 532 516 } 533 517 msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower; 534 518 535 - aligned_offset = msg_addr & (epc->mem->window.page_size - 1); 536 - msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); 519 + msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset); 537 520 ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, 538 - epc->mem->window.page_size); 521 + map_size); 539 522 if (ret) 540 523 return ret; 541 524 542 - writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset); 525 + writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset); 543 526 544 527 dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys); 545 528 ··· 589 574 struct pci_epf_msix_tbl *msix_tbl; 590 575 struct dw_pcie_ep_func *ep_func; 591 576 struct pci_epc *epc = ep->epc; 577 + size_t map_size = sizeof(u32); 578 + size_t offset; 592 579 u32 reg, msg_data, vec_ctrl; 593 - unsigned int aligned_offset; 594 580 u32 tbl_offset; 595 581 u64 msg_addr; 596 582 int ret; ··· 616 600 return -EPERM; 617 601 } 618 602 619 - aligned_offset = msg_addr & (epc->mem->window.page_size - 1); 620 - msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); 603 + msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset); 621 604 ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, 622 - epc->mem->window.page_size); 605 + map_size); 623 606 if (ret) 624 607 return ret; 625 608 626 - writel(msg_data, ep->msi_mem + aligned_offset); 609 + writel(msg_data, ep->msi_mem + offset); 627 610 628 611 dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys); 629 612
+6
drivers/pci/endpoint/functions/pci-epf-mhi.c
··· 867 867 { 868 868 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf); 869 869 struct pci_epc *epc = epf->epc; 870 + struct device *dev = &epf->dev; 870 871 struct platform_device *pdev = to_platform_device(epc->dev.parent); 871 872 struct resource *res; 872 873 int ret; 873 874 874 875 /* Get MMIO base address from Endpoint controller */ 875 876 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio"); 877 + if (!res) { 878 + dev_err(dev, "Failed to get \"mmio\" resource\n"); 879 + return -ENODEV; 880 + } 881 + 876 882 epf_mhi->mmio_phys = res->start; 877 883 epf_mhi->mmio_size = resource_size(res); 878 884
+196 -184
drivers/pci/endpoint/functions/pci-epf-test.c
··· 291 291 292 292 dma_release_channel(epf_test->dma_chan_rx); 293 293 epf_test->dma_chan_rx = NULL; 294 - 295 - return; 296 294 } 297 295 298 296 static void pci_epf_test_print_rate(struct pci_epf_test *epf_test, ··· 315 317 static void pci_epf_test_copy(struct pci_epf_test *epf_test, 316 318 struct pci_epf_test_reg *reg) 317 319 { 318 - int ret; 319 - void __iomem *src_addr; 320 - void __iomem *dst_addr; 321 - phys_addr_t src_phys_addr; 322 - phys_addr_t dst_phys_addr; 320 + int ret = 0; 323 321 struct timespec64 start, end; 324 322 struct pci_epf *epf = epf_test->epf; 325 - struct device *dev = &epf->dev; 326 323 struct pci_epc *epc = epf->epc; 324 + struct device *dev = &epf->dev; 325 + struct pci_epc_map src_map, dst_map; 326 + u64 src_addr = reg->src_addr; 327 + u64 dst_addr = reg->dst_addr; 328 + size_t copy_size = reg->size; 329 + ssize_t map_size = 0; 330 + void *copy_buf = NULL, *buf; 327 331 328 - src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size); 329 - if (!src_addr) { 330 - dev_err(dev, "Failed to allocate source address\n"); 331 - reg->status = STATUS_SRC_ADDR_INVALID; 332 - ret = -ENOMEM; 333 - goto err; 334 - } 335 - 336 - ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr, 337 - reg->src_addr, reg->size); 338 - if (ret) { 339 - dev_err(dev, "Failed to map source address\n"); 340 - reg->status = STATUS_SRC_ADDR_INVALID; 341 - goto err_src_addr; 342 - } 343 - 344 - dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size); 345 - if (!dst_addr) { 346 - dev_err(dev, "Failed to allocate destination address\n"); 347 - reg->status = STATUS_DST_ADDR_INVALID; 348 - ret = -ENOMEM; 349 - goto err_src_map_addr; 350 - } 351 - 352 - ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr, 353 - reg->dst_addr, reg->size); 354 - if (ret) { 355 - dev_err(dev, "Failed to map destination address\n"); 356 - reg->status = STATUS_DST_ADDR_INVALID; 357 - goto err_dst_addr; 358 - } 359 - 360 - ktime_get_ts64(&start); 361 332 if (reg->flags & FLAG_USE_DMA) { 362 333 if (epf_test->dma_private) { 363 334 dev_err(dev, "Cannot transfer data using DMA\n"); 364 335 ret = -EINVAL; 365 - goto err_map_addr; 336 + goto set_status; 366 337 } 367 - 368 - ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, 369 - src_phys_addr, reg->size, 0, 370 - DMA_MEM_TO_MEM); 371 - if (ret) 372 - dev_err(dev, "Data transfer failed\n"); 373 338 } else { 374 - void *buf; 375 - 376 - buf = kzalloc(reg->size, GFP_KERNEL); 377 - if (!buf) { 339 + copy_buf = kzalloc(copy_size, GFP_KERNEL); 340 + if (!copy_buf) { 378 341 ret = -ENOMEM; 379 - goto err_map_addr; 342 + goto set_status; 343 + } 344 + buf = copy_buf; 345 + } 346 + 347 + while (copy_size) { 348 + ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no, 349 + src_addr, copy_size, &src_map); 350 + if (ret) { 351 + dev_err(dev, "Failed to map source address\n"); 352 + reg->status = STATUS_SRC_ADDR_INVALID; 353 + goto free_buf; 380 354 } 381 355 382 - memcpy_fromio(buf, src_addr, reg->size); 383 - memcpy_toio(dst_addr, buf, reg->size); 384 - kfree(buf); 356 + ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no, 357 + dst_addr, copy_size, &dst_map); 358 + if (ret) { 359 + dev_err(dev, "Failed to map destination address\n"); 360 + reg->status = STATUS_DST_ADDR_INVALID; 361 + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, 362 + &src_map); 363 + goto free_buf; 364 + } 365 + 366 + map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size); 367 + 368 + ktime_get_ts64(&start); 369 + if (reg->flags & FLAG_USE_DMA) { 370 + ret = pci_epf_test_data_transfer(epf_test, 371 + dst_map.phys_addr, src_map.phys_addr, 372 + map_size, 0, DMA_MEM_TO_MEM); 373 + if (ret) { 374 + dev_err(dev, "Data transfer failed\n"); 375 + goto unmap; 376 + } 377 + } else { 378 + memcpy_fromio(buf, src_map.virt_addr, map_size); 379 + memcpy_toio(dst_map.virt_addr, buf, map_size); 380 + buf += map_size; 381 + } 382 + ktime_get_ts64(&end); 383 + 384 + copy_size -= map_size; 385 + src_addr += map_size; 386 + dst_addr += map_size; 387 + 388 + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map); 389 + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map); 390 + map_size = 0; 385 391 } 386 - ktime_get_ts64(&end); 387 - pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, &end, 388 - reg->flags & FLAG_USE_DMA); 389 392 390 - err_map_addr: 391 - pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr); 393 + pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, 394 + &end, reg->flags & FLAG_USE_DMA); 392 395 393 - err_dst_addr: 394 - pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size); 396 + unmap: 397 + if (map_size) { 398 + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map); 399 + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map); 400 + } 395 401 396 - err_src_map_addr: 397 - pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr); 402 + free_buf: 403 + kfree(copy_buf); 398 404 399 - err_src_addr: 400 - pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size); 401 - 402 - err: 405 + set_status: 403 406 if (!ret) 404 407 reg->status |= STATUS_COPY_SUCCESS; 405 408 else ··· 410 411 static void pci_epf_test_read(struct pci_epf_test *epf_test, 411 412 struct pci_epf_test_reg *reg) 412 413 { 413 - int ret; 414 - void __iomem *src_addr; 415 - void *buf; 414 + int ret = 0; 415 + void *src_buf, *buf; 416 416 u32 crc32; 417 - phys_addr_t phys_addr; 417 + struct pci_epc_map map; 418 418 phys_addr_t dst_phys_addr; 419 419 struct timespec64 start, end; 420 420 struct pci_epf *epf = epf_test->epf; 421 - struct device *dev = &epf->dev; 422 421 struct pci_epc *epc = epf->epc; 422 + struct device *dev = &epf->dev; 423 423 struct device *dma_dev = epf->epc->dev.parent; 424 + u64 src_addr = reg->src_addr; 425 + size_t src_size = reg->size; 426 + ssize_t map_size = 0; 424 427 425 - src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); 426 - if (!src_addr) { 427 - dev_err(dev, "Failed to allocate address\n"); 428 - reg->status = STATUS_SRC_ADDR_INVALID; 428 + src_buf = kzalloc(src_size, GFP_KERNEL); 429 + if (!src_buf) { 429 430 ret = -ENOMEM; 430 - goto err; 431 + goto set_status; 431 432 } 433 + buf = src_buf; 432 434 433 - ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, 434 - reg->src_addr, reg->size); 435 - if (ret) { 436 - dev_err(dev, "Failed to map address\n"); 437 - reg->status = STATUS_SRC_ADDR_INVALID; 438 - goto err_addr; 439 - } 440 - 441 - buf = kzalloc(reg->size, GFP_KERNEL); 442 - if (!buf) { 443 - ret = -ENOMEM; 444 - goto err_map_addr; 445 - } 446 - 447 - if (reg->flags & FLAG_USE_DMA) { 448 - dst_phys_addr = dma_map_single(dma_dev, buf, reg->size, 449 - DMA_FROM_DEVICE); 450 - if (dma_mapping_error(dma_dev, dst_phys_addr)) { 451 - dev_err(dev, "Failed to map destination buffer addr\n"); 452 - ret = -ENOMEM; 453 - goto err_dma_map; 435 + while (src_size) { 436 + ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no, 437 + src_addr, src_size, &map); 438 + if (ret) { 439 + dev_err(dev, "Failed to map address\n"); 440 + reg->status = STATUS_SRC_ADDR_INVALID; 441 + goto free_buf; 454 442 } 455 443 456 - ktime_get_ts64(&start); 457 - ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, 458 - phys_addr, reg->size, 459 - reg->src_addr, DMA_DEV_TO_MEM); 460 - if (ret) 461 - dev_err(dev, "Data transfer failed\n"); 462 - ktime_get_ts64(&end); 444 + map_size = map.pci_size; 445 + if (reg->flags & FLAG_USE_DMA) { 446 + dst_phys_addr = dma_map_single(dma_dev, buf, map_size, 447 + DMA_FROM_DEVICE); 448 + if (dma_mapping_error(dma_dev, dst_phys_addr)) { 449 + dev_err(dev, 450 + "Failed to map destination buffer addr\n"); 451 + ret = -ENOMEM; 452 + goto unmap; 453 + } 463 454 464 - dma_unmap_single(dma_dev, dst_phys_addr, reg->size, 465 - DMA_FROM_DEVICE); 466 - } else { 467 - ktime_get_ts64(&start); 468 - memcpy_fromio(buf, src_addr, reg->size); 469 - ktime_get_ts64(&end); 455 + ktime_get_ts64(&start); 456 + ret = pci_epf_test_data_transfer(epf_test, 457 + dst_phys_addr, map.phys_addr, 458 + map_size, src_addr, DMA_DEV_TO_MEM); 459 + if (ret) 460 + dev_err(dev, "Data transfer failed\n"); 461 + ktime_get_ts64(&end); 462 + 463 + dma_unmap_single(dma_dev, dst_phys_addr, map_size, 464 + DMA_FROM_DEVICE); 465 + 466 + if (ret) 467 + goto unmap; 468 + } else { 469 + ktime_get_ts64(&start); 470 + memcpy_fromio(buf, map.virt_addr, map_size); 471 + ktime_get_ts64(&end); 472 + } 473 + 474 + src_size -= map_size; 475 + src_addr += map_size; 476 + buf += map_size; 477 + 478 + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map); 479 + map_size = 0; 470 480 } 471 481 472 - pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, &end, 473 - reg->flags & FLAG_USE_DMA); 482 + pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, 483 + &end, reg->flags & FLAG_USE_DMA); 474 484 475 - crc32 = crc32_le(~0, buf, reg->size); 485 + crc32 = crc32_le(~0, src_buf, reg->size); 476 486 if (crc32 != reg->checksum) 477 487 ret = -EIO; 478 488 479 - err_dma_map: 480 - kfree(buf); 489 + unmap: 490 + if (map_size) 491 + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map); 481 492 482 - err_map_addr: 483 - pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr); 493 + free_buf: 494 + kfree(src_buf); 484 495 485 - err_addr: 486 - pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size); 487 - 488 - err: 496 + set_status: 489 497 if (!ret) 490 498 reg->status |= STATUS_READ_SUCCESS; 491 499 else ··· 502 496 static void pci_epf_test_write(struct pci_epf_test *epf_test, 503 497 struct pci_epf_test_reg *reg) 504 498 { 505 - int ret; 506 - void __iomem *dst_addr; 507 - void *buf; 508 - phys_addr_t phys_addr; 499 + int ret = 0; 500 + void *dst_buf, *buf; 501 + struct pci_epc_map map; 509 502 phys_addr_t src_phys_addr; 510 503 struct timespec64 start, end; 511 504 struct pci_epf *epf = epf_test->epf; 512 - struct device *dev = &epf->dev; 513 505 struct pci_epc *epc = epf->epc; 506 + struct device *dev = &epf->dev; 514 507 struct device *dma_dev = epf->epc->dev.parent; 508 + u64 dst_addr = reg->dst_addr; 509 + size_t dst_size = reg->size; 510 + ssize_t map_size = 0; 515 511 516 - dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); 517 - if (!dst_addr) { 518 - dev_err(dev, "Failed to allocate address\n"); 519 - reg->status = STATUS_DST_ADDR_INVALID; 512 + dst_buf = kzalloc(dst_size, GFP_KERNEL); 513 + if (!dst_buf) { 520 514 ret = -ENOMEM; 521 - goto err; 515 + goto set_status; 522 516 } 517 + get_random_bytes(dst_buf, dst_size); 518 + reg->checksum = crc32_le(~0, dst_buf, dst_size); 519 + buf = dst_buf; 523 520 524 - ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, 525 - reg->dst_addr, reg->size); 526 - if (ret) { 527 - dev_err(dev, "Failed to map address\n"); 528 - reg->status = STATUS_DST_ADDR_INVALID; 529 - goto err_addr; 530 - } 531 - 532 - buf = kzalloc(reg->size, GFP_KERNEL); 533 - if (!buf) { 534 - ret = -ENOMEM; 535 - goto err_map_addr; 536 - } 537 - 538 - get_random_bytes(buf, reg->size); 539 - reg->checksum = crc32_le(~0, buf, reg->size); 540 - 541 - if (reg->flags & FLAG_USE_DMA) { 542 - src_phys_addr = dma_map_single(dma_dev, buf, reg->size, 543 - DMA_TO_DEVICE); 544 - if (dma_mapping_error(dma_dev, src_phys_addr)) { 545 - dev_err(dev, "Failed to map source buffer addr\n"); 546 - ret = -ENOMEM; 547 - goto err_dma_map; 521 + while (dst_size) { 522 + ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no, 523 + dst_addr, dst_size, &map); 524 + if (ret) { 525 + dev_err(dev, "Failed to map address\n"); 526 + reg->status = STATUS_DST_ADDR_INVALID; 527 + goto free_buf; 548 528 } 549 529 550 - ktime_get_ts64(&start); 530 + map_size = map.pci_size; 531 + if (reg->flags & FLAG_USE_DMA) { 532 + src_phys_addr = dma_map_single(dma_dev, buf, map_size, 533 + DMA_TO_DEVICE); 534 + if (dma_mapping_error(dma_dev, src_phys_addr)) { 535 + dev_err(dev, 536 + "Failed to map source buffer addr\n"); 537 + ret = -ENOMEM; 538 + goto unmap; 539 + } 551 540 552 - ret = pci_epf_test_data_transfer(epf_test, phys_addr, 553 - src_phys_addr, reg->size, 554 - reg->dst_addr, 555 - DMA_MEM_TO_DEV); 556 - if (ret) 557 - dev_err(dev, "Data transfer failed\n"); 558 - ktime_get_ts64(&end); 541 + ktime_get_ts64(&start); 559 542 560 - dma_unmap_single(dma_dev, src_phys_addr, reg->size, 561 - DMA_TO_DEVICE); 562 - } else { 563 - ktime_get_ts64(&start); 564 - memcpy_toio(dst_addr, buf, reg->size); 565 - ktime_get_ts64(&end); 543 + ret = pci_epf_test_data_transfer(epf_test, 544 + map.phys_addr, src_phys_addr, 545 + map_size, dst_addr, 546 + DMA_MEM_TO_DEV); 547 + if (ret) 548 + dev_err(dev, "Data transfer failed\n"); 549 + ktime_get_ts64(&end); 550 + 551 + dma_unmap_single(dma_dev, src_phys_addr, map_size, 552 + DMA_TO_DEVICE); 553 + 554 + if (ret) 555 + goto unmap; 556 + } else { 557 + ktime_get_ts64(&start); 558 + memcpy_toio(map.virt_addr, buf, map_size); 559 + ktime_get_ts64(&end); 560 + } 561 + 562 + dst_size -= map_size; 563 + dst_addr += map_size; 564 + buf += map_size; 565 + 566 + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map); 567 + map_size = 0; 566 568 } 567 569 568 - pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, &end, 569 - reg->flags & FLAG_USE_DMA); 570 + pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, 571 + &end, reg->flags & FLAG_USE_DMA); 570 572 571 573 /* 572 574 * wait 1ms inorder for the write to complete. Without this delay L3 ··· 582 568 */ 583 569 usleep_range(1000, 2000); 584 570 585 - err_dma_map: 586 - kfree(buf); 571 + unmap: 572 + if (map_size) 573 + pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map); 587 574 588 - err_map_addr: 589 - pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr); 575 + free_buf: 576 + kfree(dst_buf); 590 577 591 - err_addr: 592 - pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size); 593 - 594 - err: 578 + set_status: 595 579 if (!ret) 596 580 reg->status |= STATUS_WRITE_SUCCESS; 597 581 else ··· 798 786 { 799 787 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 800 788 801 - cancel_delayed_work(&epf_test->cmd_handler); 789 + cancel_delayed_work_sync(&epf_test->cmd_handler); 802 790 pci_epf_test_clean_dma_chan(epf_test); 803 791 pci_epf_test_clear_bar(epf); 804 792 } ··· 929 917 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 930 918 struct pci_epc *epc = epf->epc; 931 919 932 - cancel_delayed_work(&epf_test->cmd_handler); 920 + cancel_delayed_work_sync(&epf_test->cmd_handler); 933 921 if (epc->init_complete) { 934 922 pci_epf_test_clean_dma_chan(epf_test); 935 923 pci_epf_test_clear_bar(epf);
+139 -54
drivers/pci/endpoint/pci-epc-core.c
··· 128 128 } 129 129 EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar); 130 130 131 + static bool pci_epc_function_is_valid(struct pci_epc *epc, 132 + u8 func_no, u8 vfunc_no) 133 + { 134 + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 135 + return false; 136 + 137 + if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 138 + return false; 139 + 140 + return true; 141 + } 142 + 131 143 /** 132 144 * pci_epc_get_features() - get the features supported by EPC 133 145 * @epc: the features supported by *this* EPC device will be returned ··· 157 145 { 158 146 const struct pci_epc_features *epc_features; 159 147 160 - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 161 - return NULL; 162 - 163 - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 148 + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 164 149 return NULL; 165 150 166 151 if (!epc->ops->get_features) ··· 227 218 { 228 219 int ret; 229 220 230 - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 231 - return -EINVAL; 232 - 233 - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 221 + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 234 222 return -EINVAL; 235 223 236 224 if (!epc->ops->raise_irq) ··· 268 262 { 269 263 int ret; 270 264 271 - if (IS_ERR_OR_NULL(epc)) 272 - return -EINVAL; 273 - 274 - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 265 + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 275 266 return -EINVAL; 276 267 277 268 if (!epc->ops->map_msi_irq) ··· 296 293 { 297 294 int interrupt; 298 295 299 - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 300 - return 0; 301 - 302 - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 296 + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 303 297 return 0; 304 298 305 299 if (!epc->ops->get_msi) ··· 329 329 int ret; 330 330 u8 encode_int; 331 331 332 - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || 333 - interrupts < 1 || interrupts > 32) 332 + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 334 333 return -EINVAL; 335 334 336 - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 335 + if (interrupts < 1 || interrupts > 32) 337 336 return -EINVAL; 338 337 339 338 if (!epc->ops->set_msi) ··· 360 361 { 361 362 int interrupt; 362 363 363 - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 364 - return 0; 365 - 366 - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 364 + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 367 365 return 0; 368 366 369 367 if (!epc->ops->get_msix) ··· 393 397 { 394 398 int ret; 395 399 396 - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || 397 - interrupts < 1 || interrupts > 2048) 400 + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 398 401 return -EINVAL; 399 402 400 - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 403 + if (interrupts < 1 || interrupts > 2048) 401 404 return -EINVAL; 402 405 403 406 if (!epc->ops->set_msix) ··· 423 428 void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 424 429 phys_addr_t phys_addr) 425 430 { 426 - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 427 - return; 428 - 429 - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 431 + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 430 432 return; 431 433 432 434 if (!epc->ops->unmap_addr) ··· 451 459 { 452 460 int ret; 453 461 454 - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 455 - return -EINVAL; 456 - 457 - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 462 + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 458 463 return -EINVAL; 459 464 460 465 if (!epc->ops->map_addr) ··· 467 478 EXPORT_SYMBOL_GPL(pci_epc_map_addr); 468 479 469 480 /** 481 + * pci_epc_mem_map() - allocate and map a PCI address to a CPU address 482 + * @epc: the EPC device on which the CPU address is to be allocated and mapped 483 + * @func_no: the physical endpoint function number in the EPC device 484 + * @vfunc_no: the virtual endpoint function number in the physical function 485 + * @pci_addr: PCI address to which the CPU address should be mapped 486 + * @pci_size: the number of bytes to map starting from @pci_addr 487 + * @map: where to return the mapping information 488 + * 489 + * Allocate a controller memory address region and map it to a RC PCI address 490 + * region, taking into account the controller physical address mapping 491 + * constraints using the controller operation align_addr(). If this operation is 492 + * not defined, we assume that there are no alignment constraints for the 493 + * mapping. 494 + * 495 + * The effective size of the PCI address range mapped from @pci_addr is 496 + * indicated by @map->pci_size. This size may be less than the requested 497 + * @pci_size. The local virtual CPU address for the mapping is indicated by 498 + * @map->virt_addr (@map->phys_addr indicates the physical address). 499 + * The size and CPU address of the controller memory allocated and mapped are 500 + * respectively indicated by @map->map_size and @map->virt_base (and 501 + * @map->phys_base for the physical address of @map->virt_base). 502 + * 503 + * Returns 0 on success and a negative error code in case of error. 504 + */ 505 + int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 506 + u64 pci_addr, size_t pci_size, struct pci_epc_map *map) 507 + { 508 + size_t map_size = pci_size; 509 + size_t map_offset = 0; 510 + int ret; 511 + 512 + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 513 + return -EINVAL; 514 + 515 + if (!pci_size || !map) 516 + return -EINVAL; 517 + 518 + /* 519 + * Align the PCI address to map. If the controller defines the 520 + * .align_addr() operation, use it to determine the PCI address to map 521 + * and the size of the mapping. Otherwise, assume that the controller 522 + * has no alignment constraint. 523 + */ 524 + memset(map, 0, sizeof(*map)); 525 + map->pci_addr = pci_addr; 526 + if (epc->ops->align_addr) 527 + map->map_pci_addr = 528 + epc->ops->align_addr(epc, pci_addr, 529 + &map_size, &map_offset); 530 + else 531 + map->map_pci_addr = pci_addr; 532 + map->map_size = map_size; 533 + if (map->map_pci_addr + map->map_size < pci_addr + pci_size) 534 + map->pci_size = map->map_pci_addr + map->map_size - pci_addr; 535 + else 536 + map->pci_size = pci_size; 537 + 538 + map->virt_base = pci_epc_mem_alloc_addr(epc, &map->phys_base, 539 + map->map_size); 540 + if (!map->virt_base) 541 + return -ENOMEM; 542 + 543 + map->phys_addr = map->phys_base + map_offset; 544 + map->virt_addr = map->virt_base + map_offset; 545 + 546 + ret = pci_epc_map_addr(epc, func_no, vfunc_no, map->phys_base, 547 + map->map_pci_addr, map->map_size); 548 + if (ret) { 549 + pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base, 550 + map->map_size); 551 + return ret; 552 + } 553 + 554 + return 0; 555 + } 556 + EXPORT_SYMBOL_GPL(pci_epc_mem_map); 557 + 558 + /** 559 + * pci_epc_mem_unmap() - unmap and free a CPU address region 560 + * @epc: the EPC device on which the CPU address is allocated and mapped 561 + * @func_no: the physical endpoint function number in the EPC device 562 + * @vfunc_no: the virtual endpoint function number in the physical function 563 + * @map: the mapping information 564 + * 565 + * Unmap and free a CPU address region that was allocated and mapped with 566 + * pci_epc_mem_map(). 567 + */ 568 + void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 569 + struct pci_epc_map *map) 570 + { 571 + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 572 + return; 573 + 574 + if (!map || !map->virt_base) 575 + return; 576 + 577 + pci_epc_unmap_addr(epc, func_no, vfunc_no, map->phys_base); 578 + pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base, 579 + map->map_size); 580 + } 581 + EXPORT_SYMBOL_GPL(pci_epc_mem_unmap); 582 + 583 + /** 470 584 * pci_epc_clear_bar() - reset the BAR 471 585 * @epc: the EPC device for which the BAR has to be cleared 472 586 * @func_no: the physical endpoint function number in the EPC device ··· 581 489 void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 582 490 struct pci_epf_bar *epf_bar) 583 491 { 584 - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || 585 - (epf_bar->barno == BAR_5 && 586 - epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) 492 + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 587 493 return; 588 494 589 - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 495 + if (epf_bar->barno == BAR_5 && 496 + epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) 590 497 return; 591 498 592 499 if (!epc->ops->clear_bar) ··· 612 521 int ret; 613 522 int flags = epf_bar->flags; 614 523 615 - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || 616 - (epf_bar->barno == BAR_5 && 617 - flags & PCI_BASE_ADDRESS_MEM_TYPE_64) || 524 + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 525 + return -EINVAL; 526 + 527 + if ((epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) || 618 528 (flags & PCI_BASE_ADDRESS_SPACE_IO && 619 529 flags & PCI_BASE_ADDRESS_IO_MASK) || 620 530 (upper_32_bits(epf_bar->size) && 621 531 !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))) 622 - return -EINVAL; 623 - 624 - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 625 532 return -EINVAL; 626 533 627 534 if (!epc->ops->set_bar) ··· 650 561 { 651 562 int ret; 652 563 653 - if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 654 - return -EINVAL; 655 - 656 - if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 564 + if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 657 565 return -EINVAL; 658 566 659 567 /* Only Virtual Function #1 has deviceID */ ··· 746 660 if (IS_ERR_OR_NULL(epc) || !epf) 747 661 return; 748 662 663 + mutex_lock(&epc->list_lock); 749 664 if (type == PRIMARY_INTERFACE) { 750 665 func_no = epf->func_no; 751 666 list = &epf->list; 667 + epf->epc = NULL; 752 668 } else { 753 669 func_no = epf->sec_epc_func_no; 754 670 list = &epf->sec_epc_list; 671 + epf->sec_epc = NULL; 755 672 } 756 - 757 - mutex_lock(&epc->list_lock); 758 673 clear_bit(func_no, &epc->function_num_map); 759 674 list_del(list); 760 - epf->epc = NULL; 761 675 mutex_unlock(&epc->list_lock); 762 676 } 763 677 EXPORT_SYMBOL_GPL(pci_epc_remove_epf); ··· 923 837 void pci_epc_destroy(struct pci_epc *epc) 924 838 { 925 839 pci_ep_cfs_remove_epc_group(epc->group); 926 - device_unregister(&epc->dev); 927 - 928 840 #ifdef CONFIG_PCI_DOMAINS_GENERIC 929 - pci_bus_release_domain_nr(&epc->dev, epc->domain_nr); 841 + pci_bus_release_domain_nr(epc->dev.parent, epc->domain_nr); 930 842 #endif 843 + device_unregister(&epc->dev); 931 844 } 932 845 EXPORT_SYMBOL_GPL(pci_epc_destroy); 933 846
+6 -3
drivers/pci/endpoint/pci-epc-mem.c
··· 178 178 void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, 179 179 phys_addr_t *phys_addr, size_t size) 180 180 { 181 - void __iomem *virt_addr = NULL; 181 + void __iomem *virt_addr; 182 182 struct pci_epc_mem *mem; 183 183 unsigned int page_shift; 184 184 size_t align_size; ··· 188 188 189 189 for (i = 0; i < epc->num_windows; i++) { 190 190 mem = epc->windows[i]; 191 - mutex_lock(&mem->lock); 191 + if (size > mem->window.size) 192 + continue; 193 + 192 194 align_size = ALIGN(size, mem->window.page_size); 193 195 order = pci_epc_mem_get_order(mem, align_size); 194 196 197 + mutex_lock(&mem->lock); 195 198 pageno = bitmap_find_free_region(mem->bitmap, mem->pages, 196 199 order); 197 200 if (pageno >= 0) { ··· 214 211 mutex_unlock(&mem->lock); 215 212 } 216 213 217 - return virt_addr; 214 + return NULL; 218 215 } 219 216 EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr); 220 217
+38
include/linux/pci-epc.h
··· 33 33 } 34 34 35 35 /** 36 + * struct pci_epc_map - information about EPC memory for mapping a RC PCI 37 + * address range 38 + * @pci_addr: start address of the RC PCI address range to map 39 + * @pci_size: size of the RC PCI address range mapped from @pci_addr 40 + * @map_pci_addr: RC PCI address used as the first address mapped (may be lower 41 + * than @pci_addr) 42 + * @map_size: size of the controller memory needed for mapping the RC PCI address 43 + * range @map_pci_addr..@pci_addr+@pci_size 44 + * @phys_base: base physical address of the allocated EPC memory for mapping the 45 + * RC PCI address range 46 + * @phys_addr: physical address at which @pci_addr is mapped 47 + * @virt_base: base virtual address of the allocated EPC memory for mapping the 48 + * RC PCI address range 49 + * @virt_addr: virtual address at which @pci_addr is mapped 50 + */ 51 + struct pci_epc_map { 52 + u64 pci_addr; 53 + size_t pci_size; 54 + 55 + u64 map_pci_addr; 56 + size_t map_size; 57 + 58 + phys_addr_t phys_base; 59 + phys_addr_t phys_addr; 60 + void __iomem *virt_base; 61 + void __iomem *virt_addr; 62 + }; 63 + 64 + /** 36 65 * struct pci_epc_ops - set of function pointers for performing EPC operations 37 66 * @write_header: ops to populate configuration space header 38 67 * @set_bar: ops to configure the BAR 39 68 * @clear_bar: ops to reset the BAR 69 + * @align_addr: operation to get the mapping address, mapping size and offset 70 + * into a controller memory window needed to map an RC PCI address 71 + * region 40 72 * @map_addr: ops to map CPU address to PCI address 41 73 * @unmap_addr: ops to unmap CPU address and PCI address 42 74 * @set_msi: ops to set the requested number of MSI interrupts in the MSI ··· 93 61 struct pci_epf_bar *epf_bar); 94 62 void (*clear_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 95 63 struct pci_epf_bar *epf_bar); 64 + u64 (*align_addr)(struct pci_epc *epc, u64 pci_addr, size_t *size, 65 + size_t *offset); 96 66 int (*map_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 97 67 phys_addr_t addr, u64 pci_addr, size_t size); 98 68 void (*unmap_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, ··· 312 278 phys_addr_t *phys_addr, size_t size); 313 279 void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr, 314 280 void __iomem *virt_addr, size_t size); 281 + int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 282 + u64 pci_addr, size_t pci_size, struct pci_epc_map *map); 283 + void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 284 + struct pci_epc_map *map); 315 285 316 286 #else 317 287 static inline void pci_epc_init_notify(struct pci_epc *epc)