Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

soc/fsl/qbman: Use shared-dma-pool for QMan private memory allocations

Use the shared-memory-pool mechanism for frame queue descriptor and
packed frame descriptor record area allocations.

Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Li Yang <leoyang.li@nxp.com>

authored by

Roy Pledge and committed by
Li Yang
07f86917 5ae783c6

+63 -34
+63 -30
drivers/soc/fsl/qbman/qman_ccsr.c
··· 401 401 } 402 402 403 403 /* 404 - * Ideally we would use the DMA API to turn rmem->base into a DMA address 405 - * (especially if iommu translations ever get involved). Unfortunately, the 406 - * DMA API currently does not allow mapping anything that is not backed with 407 - * a struct page. 404 + * QMan needs two global memory areas initialized at boot time: 405 + * 1) FQD: Frame Queue Descriptors used to manage frame queues 406 + * 2) PFDR: Packed Frame Queue Descriptor Records used to store frames 407 + * Both areas are reserved using the device tree reserved memory framework 408 + * and the addresses and sizes are initialized when the QMan device is probed 408 409 */ 409 410 static dma_addr_t fqd_a, pfdr_a; 410 411 static size_t fqd_sz, pfdr_sz; 412 + 413 + #ifdef CONFIG_PPC 414 + /* 415 + * Support for PPC Device Tree backward compatibility when compatible 416 + * string is set to fsl-qman-fqd and fsl-qman-pfdr 417 + */ 418 + static int zero_priv_mem(phys_addr_t addr, size_t sz) 419 + { 420 + /* map as cacheable, non-guarded */ 421 + void __iomem *tmpp = ioremap_prot(addr, sz, 0); 422 + 423 + if (!tmpp) 424 + return -ENOMEM; 425 + 426 + memset_io(tmpp, 0, sz); 427 + flush_dcache_range((unsigned long)tmpp, 428 + (unsigned long)tmpp + sz); 429 + iounmap(tmpp); 430 + 431 + return 0; 432 + } 411 433 412 434 static int qman_fqd(struct reserved_mem *rmem) 413 435 { ··· 437 415 fqd_sz = rmem->size; 438 416 439 417 WARN_ON(!(fqd_a && fqd_sz)); 440 - 441 418 return 0; 442 419 } 443 420 RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd); ··· 452 431 } 453 432 RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr); 454 433 434 + #endif 435 + 455 436 static unsigned int qm_get_fqid_maxcnt(void) 456 437 { 457 438 return fqd_sz / 64; 458 - } 459 - 460 - /* 461 - * Flush this memory range from data cache so that QMAN originated 462 - * transactions for this memory region could be marked non-coherent. 463 - */ 464 - static int zero_priv_mem(struct device *dev, struct device_node *node, 465 - phys_addr_t addr, size_t sz) 466 - { 467 - /* map as cacheable, non-guarded */ 468 - void __iomem *tmpp = ioremap_prot(addr, sz, 0); 469 - 470 - if (!tmpp) 471 - return -ENOMEM; 472 - 473 - memset_io(tmpp, 0, sz); 474 - flush_dcache_range((unsigned long)tmpp, 475 - (unsigned long)tmpp + sz); 476 - iounmap(tmpp); 477 - 478 - return 0; 479 439 } 480 440 481 441 static void log_edata_bits(struct device *dev, u32 bit_count) ··· 729 727 qm_channel_caam = QMAN_CHANNEL_CAAM_REV3; 730 728 } 731 729 732 - ret = zero_priv_mem(dev, node, fqd_a, fqd_sz); 733 - WARN_ON(ret); 734 - if (ret) 735 - return -ENODEV; 730 + if (fqd_a) { 731 + #ifdef CONFIG_PPC 732 + /* 733 + * For PPC backward DT compatibility 734 + * FQD memory MUST be zero'd by software 735 + */ 736 + zero_priv_mem(fqd_a, fqd_sz); 737 + #else 738 + WARN(1, "Unexpected architecture using non shared-dma-mem reservations"); 739 + #endif 740 + } else { 741 + /* 742 + * Order of memory regions is assumed as FQD followed by PFDR 743 + * in order to ensure allocations from the correct regions the 744 + * driver initializes then allocates each piece in order 745 + */ 746 + ret = qbman_init_private_mem(dev, 0, &fqd_a, &fqd_sz); 747 + if (ret) { 748 + dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n", 749 + ret); 750 + return -ENODEV; 751 + } 752 + } 753 + dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz); 754 + 755 + if (!pfdr_a) { 756 + /* Setup PFDR memory */ 757 + ret = qbman_init_private_mem(dev, 1, &pfdr_a, &pfdr_sz); 758 + if (ret) { 759 + dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n", 760 + ret); 761 + return -ENODEV; 762 + } 763 + } 764 + dev_dbg(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz); 736 765 737 766 ret = qman_init_ccsr(dev); 738 767 if (ret) {
-2
drivers/soc/fsl/qbman/qman_priv.h
··· 28 28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 29 */ 30 30 31 - #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 32 - 33 31 #include "dpaa_sys.h" 34 32 35 33 #include <soc/fsl/qman.h>
-2
drivers/soc/fsl/qbman/qman_test.h
··· 30 30 31 31 #include "qman_priv.h" 32 32 33 - #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 - 35 33 int qman_test_stash(void); 36 34 int qman_test_api(void);