Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rapidio/tsi721: add DMA engine support

Adds support for DMA Engine API into Tsi721 mport driver.

Includes following changes for Tsi721 driver:
- Modifies BDMA register offset definitions to support per-channel handling
- Separates BDMA channel reserved for RIO Maintenance requests
- Adds DMA Engine callback routines

Signed-off-by: Alexandre Bounine <alexandre.bounine@idt.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Li Yang <leoli@freescale.com>
Cc: Matt Porter <mporter@kernel.crashing.org>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Alexandre Bounine and committed by
Linus Torvalds
9eaa3d9b e42d98eb

+1050 -92
+3
drivers/rapidio/devices/Makefile
··· 3 3 # 4 4 5 5 obj-$(CONFIG_RAPIDIO_TSI721) += tsi721.o 6 + ifeq ($(CONFIG_RAPIDIO_DMA_ENGINE),y) 7 + obj-$(CONFIG_RAPIDIO_TSI721) += tsi721_dma.o 8 + endif
+134 -77
drivers/rapidio/devices/tsi721.c
··· 108 108 u16 destid, u8 hopcount, u32 offset, int len, 109 109 u32 *data, int do_wr) 110 110 { 111 + void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id); 111 112 struct tsi721_dma_desc *bd_ptr; 112 113 u32 rd_count, swr_ptr, ch_stat; 113 114 int i, err = 0; ··· 117 116 if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) 118 117 return -EINVAL; 119 118 120 - bd_ptr = priv->bdma[TSI721_DMACH_MAINT].bd_base; 119 + bd_ptr = priv->mdma.bd_base; 121 120 122 - rd_count = ioread32( 123 - priv->regs + TSI721_DMAC_DRDCNT(TSI721_DMACH_MAINT)); 121 + rd_count = ioread32(regs + TSI721_DMAC_DRDCNT); 124 122 125 123 /* Initialize DMA descriptor */ 126 124 bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid); ··· 134 134 mb(); 135 135 136 136 /* Start DMA operation */ 137 - iowrite32(rd_count + 2, 138 - priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT)); 139 - ioread32(priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT)); 137 + iowrite32(rd_count + 2, regs + TSI721_DMAC_DWRCNT); 138 + ioread32(regs + TSI721_DMAC_DWRCNT); 140 139 i = 0; 141 140 142 141 /* Wait until DMA transfer is finished */ 143 - while ((ch_stat = ioread32(priv->regs + 144 - TSI721_DMAC_STS(TSI721_DMACH_MAINT))) & TSI721_DMAC_STS_RUN) { 142 + while ((ch_stat = ioread32(regs + TSI721_DMAC_STS)) 143 + & TSI721_DMAC_STS_RUN) { 145 144 udelay(1); 146 145 if (++i >= 5000000) { 147 146 dev_dbg(&priv->pdev->dev, 148 147 "%s : DMA[%d] read timeout ch_status=%x\n", 149 - __func__, TSI721_DMACH_MAINT, ch_stat); 148 + __func__, priv->mdma.ch_id, ch_stat); 150 149 if (!do_wr) 151 150 *data = 0xffffffff; 152 151 err = -EIO; ··· 161 162 __func__, ch_stat); 162 163 dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n", 163 164 do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset); 164 - iowrite32(TSI721_DMAC_INT_ALL, 165 - priv->regs + TSI721_DMAC_INT(TSI721_DMACH_MAINT)); 166 - iowrite32(TSI721_DMAC_CTL_INIT, 167 - priv->regs + TSI721_DMAC_CTL(TSI721_DMACH_MAINT)); 165 + iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT); 166 + iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); 168 167 udelay(10); 169 - iowrite32(0, priv->regs + 170 - TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT)); 168 + iowrite32(0, regs + TSI721_DMAC_DWRCNT); 171 169 udelay(1); 172 170 if (!do_wr) 173 171 *data = 0xffffffff; ··· 180 184 * NOTE: Skipping check and clear FIFO entries because we are waiting 181 185 * for transfer to be completed. 182 186 */ 183 - swr_ptr = ioread32(priv->regs + TSI721_DMAC_DSWP(TSI721_DMACH_MAINT)); 184 - iowrite32(swr_ptr, priv->regs + TSI721_DMAC_DSRP(TSI721_DMACH_MAINT)); 187 + swr_ptr = ioread32(regs + TSI721_DMAC_DSWP); 188 + iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP); 185 189 err_out: 186 190 187 191 return err; ··· 537 541 tsi721_pw_handler(mport); 538 542 } 539 543 544 + #ifdef CONFIG_RAPIDIO_DMA_ENGINE 545 + if (dev_int & TSI721_DEV_INT_BDMA_CH) { 546 + int ch; 547 + 548 + if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) { 549 + dev_dbg(&priv->pdev->dev, 550 + "IRQ from DMA channel 0x%08x\n", dev_ch_int); 551 + 552 + for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) { 553 + if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch))) 554 + continue; 555 + tsi721_bdma_handler(&priv->bdma[ch]); 556 + } 557 + } 558 + } 559 + #endif 540 560 return IRQ_HANDLED; 541 561 } 542 562 ··· 565 553 priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 566 554 iowrite32(TSI721_SR_CHINT_IDBQRCV, 567 555 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 568 - iowrite32(TSI721_INT_SR2PC_CHAN(IDB_QUEUE), 569 - priv->regs + TSI721_DEV_CHAN_INTE); 570 556 571 557 /* Enable SRIO MAC interrupts */ 572 558 iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT, 573 559 priv->regs + TSI721_RIO_EM_DEV_INT_EN); 574 560 561 + /* Enable interrupts from channels in use */ 562 + #ifdef CONFIG_RAPIDIO_DMA_ENGINE 563 + intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) | 564 + (TSI721_INT_BDMA_CHAN_M & 565 + ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT)); 566 + #else 567 + intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE); 568 + #endif 569 + iowrite32(intr, priv->regs + TSI721_DEV_CHAN_INTE); 570 + 575 571 if (priv->flags & TSI721_USING_MSIX) 576 572 intr = TSI721_DEV_INT_SRIO; 577 573 else 578 574 intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | 579 - TSI721_DEV_INT_SMSG_CH; 575 + TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH; 580 576 581 577 iowrite32(intr, priv->regs + TSI721_DEV_INTE); 582 578 ioread32(priv->regs + TSI721_DEV_INTE); ··· 735 715 TSI721_MSIX_OMSG_INT(i); 736 716 } 737 717 718 + #ifdef CONFIG_RAPIDIO_DMA_ENGINE 719 + /* 720 + * Initialize MSI-X entries for Block DMA Engine: 721 + * this driver supports XXX DMA channels 722 + * (one is reserved for SRIO maintenance transactions) 723 + */ 724 + for (i = 0; i < TSI721_DMA_CHNUM; i++) { 725 + entries[TSI721_VECT_DMA0_DONE + i].entry = 726 + TSI721_MSIX_DMACH_DONE(i); 727 + entries[TSI721_VECT_DMA0_INT + i].entry = 728 + TSI721_MSIX_DMACH_INT(i); 729 + } 730 + #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ 731 + 738 732 err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries)); 739 733 if (err) { 740 734 if (err > 0) 741 735 dev_info(&priv->pdev->dev, 742 736 "Only %d MSI-X vectors available, " 743 737 "not using MSI-X\n", err); 738 + else 739 + dev_err(&priv->pdev->dev, 740 + "Failed to enable MSI-X (err=%d)\n", err); 744 741 return err; 745 742 } 746 743 ··· 796 759 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombi%d@pci:%s", 797 760 i, pci_name(priv->pdev)); 798 761 } 762 + 763 + #ifdef CONFIG_RAPIDIO_DMA_ENGINE 764 + for (i = 0; i < TSI721_DMA_CHNUM; i++) { 765 + priv->msix[TSI721_VECT_DMA0_DONE + i].vector = 766 + entries[TSI721_VECT_DMA0_DONE + i].vector; 767 + snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name, 768 + IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s", 769 + i, pci_name(priv->pdev)); 770 + 771 + priv->msix[TSI721_VECT_DMA0_INT + i].vector = 772 + entries[TSI721_VECT_DMA0_INT + i].vector; 773 + snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name, 774 + IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s", 775 + i, pci_name(priv->pdev)); 776 + } 777 + #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ 799 778 800 779 return 0; 801 780 } ··· 941 888 priv->idb_base = NULL; 942 889 } 943 890 944 - static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum) 891 + /** 892 + * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel. 893 + * @priv: pointer to tsi721 private data 894 + * 895 + * Initialize BDMA channel allocated for RapidIO maintenance read/write 896 + * request generation 897 + * Returns %0 on success or %-ENOMEM on failure. 898 + */ 899 + static int tsi721_bdma_maint_init(struct tsi721_device *priv) 945 900 { 946 901 struct tsi721_dma_desc *bd_ptr; 947 902 u64 *sts_ptr; 948 903 dma_addr_t bd_phys, sts_phys; 949 904 int sts_size; 950 - int bd_num = priv->bdma[chnum].bd_num; 905 + int bd_num = 2; 906 + void __iomem *regs; 951 907 952 - dev_dbg(&priv->pdev->dev, "Init Block DMA Engine, CH%d\n", chnum); 908 + dev_dbg(&priv->pdev->dev, 909 + "Init Block DMA Engine for Maintenance requests, CH%d\n", 910 + TSI721_DMACH_MAINT); 953 911 954 912 /* 955 913 * Initialize DMA channel for maintenance requests 956 914 */ 915 + 916 + priv->mdma.ch_id = TSI721_DMACH_MAINT; 917 + regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT); 957 918 958 919 /* Allocate space for DMA descriptors */ 959 920 bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, ··· 976 909 if (!bd_ptr) 977 910 return -ENOMEM; 978 911 979 - priv->bdma[chnum].bd_phys = bd_phys; 980 - priv->bdma[chnum].bd_base = bd_ptr; 912 + priv->mdma.bd_num = bd_num; 913 + priv->mdma.bd_phys = bd_phys; 914 + priv->mdma.bd_base = bd_ptr; 981 915 982 916 dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n", 983 917 bd_ptr, (unsigned long long)bd_phys); ··· 995 927 dma_free_coherent(&priv->pdev->dev, 996 928 bd_num * sizeof(struct tsi721_dma_desc), 997 929 bd_ptr, bd_phys); 998 - priv->bdma[chnum].bd_base = NULL; 930 + priv->mdma.bd_base = NULL; 999 931 return -ENOMEM; 1000 932 } 1001 933 1002 - priv->bdma[chnum].sts_phys = sts_phys; 1003 - priv->bdma[chnum].sts_base = sts_ptr; 1004 - priv->bdma[chnum].sts_size = sts_size; 934 + priv->mdma.sts_phys = sts_phys; 935 + priv->mdma.sts_base = sts_ptr; 936 + priv->mdma.sts_size = sts_size; 1005 937 1006 938 dev_dbg(&priv->pdev->dev, 1007 939 "desc status FIFO @ %p (phys = %llx) size=0x%x\n", ··· 1014 946 bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); 1015 947 1016 948 /* Setup DMA descriptor pointers */ 1017 - iowrite32(((u64)bd_phys >> 32), 1018 - priv->regs + TSI721_DMAC_DPTRH(chnum)); 949 + iowrite32(((u64)bd_phys >> 32), regs + TSI721_DMAC_DPTRH); 1019 950 iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), 1020 - priv->regs + TSI721_DMAC_DPTRL(chnum)); 951 + regs + TSI721_DMAC_DPTRL); 1021 952 1022 953 /* Setup descriptor status FIFO */ 1023 - iowrite32(((u64)sts_phys >> 32), 1024 - priv->regs + TSI721_DMAC_DSBH(chnum)); 954 + iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH); 1025 955 iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), 1026 - priv->regs + TSI721_DMAC_DSBL(chnum)); 956 + regs + TSI721_DMAC_DSBL); 1027 957 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), 1028 - priv->regs + TSI721_DMAC_DSSZ(chnum)); 958 + regs + TSI721_DMAC_DSSZ); 1029 959 1030 960 /* Clear interrupt bits */ 1031 - iowrite32(TSI721_DMAC_INT_ALL, 1032 - priv->regs + TSI721_DMAC_INT(chnum)); 961 + iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT); 1033 962 1034 - ioread32(priv->regs + TSI721_DMAC_INT(chnum)); 963 + ioread32(regs + TSI721_DMAC_INT); 1035 964 1036 965 /* Toggle DMA channel initialization */ 1037 - iowrite32(TSI721_DMAC_CTL_INIT, priv->regs + TSI721_DMAC_CTL(chnum)); 1038 - ioread32(priv->regs + TSI721_DMAC_CTL(chnum)); 966 + iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); 967 + ioread32(regs + TSI721_DMAC_CTL); 1039 968 udelay(10); 1040 969 1041 970 return 0; 1042 971 } 1043 972 1044 - static int tsi721_bdma_ch_free(struct tsi721_device *priv, int chnum) 973 + static int tsi721_bdma_maint_free(struct tsi721_device *priv) 1045 974 { 1046 975 u32 ch_stat; 976 + struct tsi721_bdma_maint *mdma = &priv->mdma; 977 + void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id); 1047 978 1048 - if (priv->bdma[chnum].bd_base == NULL) 979 + if (mdma->bd_base == NULL) 1049 980 return 0; 1050 981 1051 982 /* Check if DMA channel still running */ 1052 - ch_stat = ioread32(priv->regs + TSI721_DMAC_STS(chnum)); 983 + ch_stat = ioread32(regs + TSI721_DMAC_STS); 1053 984 if (ch_stat & TSI721_DMAC_STS_RUN) 1054 985 return -EFAULT; 1055 986 1056 987 /* Put DMA channel into init state */ 1057 - iowrite32(TSI721_DMAC_CTL_INIT, 1058 - priv->regs + TSI721_DMAC_CTL(chnum)); 988 + iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); 1059 989 1060 990 /* Free space allocated for DMA descriptors */ 1061 991 dma_free_coherent(&priv->pdev->dev, 1062 - priv->bdma[chnum].bd_num * sizeof(struct tsi721_dma_desc), 1063 - priv->bdma[chnum].bd_base, priv->bdma[chnum].bd_phys); 1064 - priv->bdma[chnum].bd_base = NULL; 992 + mdma->bd_num * sizeof(struct tsi721_dma_desc), 993 + mdma->bd_base, mdma->bd_phys); 994 + mdma->bd_base = NULL; 1065 995 1066 996 /* Free space allocated for status FIFO */ 1067 997 dma_free_coherent(&priv->pdev->dev, 1068 - priv->bdma[chnum].sts_size * sizeof(struct tsi721_dma_sts), 1069 - priv->bdma[chnum].sts_base, priv->bdma[chnum].sts_phys); 1070 - priv->bdma[chnum].sts_base = NULL; 998 + mdma->sts_size * sizeof(struct tsi721_dma_sts), 999 + mdma->sts_base, mdma->sts_phys); 1000 + mdma->sts_base = NULL; 1071 1001 return 0; 1072 - } 1073 - 1074 - static int tsi721_bdma_init(struct tsi721_device *priv) 1075 - { 1076 - /* Initialize BDMA channel allocated for RapidIO maintenance read/write 1077 - * request generation 1078 - */ 1079 - priv->bdma[TSI721_DMACH_MAINT].bd_num = 2; 1080 - if (tsi721_bdma_ch_init(priv, TSI721_DMACH_MAINT)) { 1081 - dev_err(&priv->pdev->dev, "Unable to initialize maintenance DMA" 1082 - " channel %d, aborting\n", TSI721_DMACH_MAINT); 1083 - return -ENOMEM; 1084 - } 1085 - 1086 - return 0; 1087 - } 1088 - 1089 - static void tsi721_bdma_free(struct tsi721_device *priv) 1090 - { 1091 - tsi721_bdma_ch_free(priv, TSI721_DMACH_MAINT); 1092 1002 } 1093 1003 1094 1004 /* Enable Inbound Messaging Interrupts */ ··· 2081 2035 2082 2036 /* Disable all BDMA Channel interrupts */ 2083 2037 for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) 2084 - iowrite32(0, priv->regs + TSI721_DMAC_INTE(ch)); 2038 + iowrite32(0, 2039 + priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE); 2085 2040 2086 2041 /* Disable all general BDMA interrupts */ 2087 2042 iowrite32(0, priv->regs + TSI721_BDMA_INTE); ··· 2151 2104 mport->phy_type = RIO_PHY_SERIAL; 2152 2105 mport->priv = (void *)priv; 2153 2106 mport->phys_efptr = 0x100; 2107 + priv->mport = mport; 2154 2108 2155 2109 INIT_LIST_HEAD(&mport->dbells); 2156 2110 ··· 2177 2129 if (!err) { 2178 2130 tsi721_interrupts_init(priv); 2179 2131 ops->pwenable = tsi721_pw_enable; 2180 - } else 2132 + } else { 2181 2133 dev_err(&pdev->dev, "Unable to get assigned PCI IRQ " 2182 2134 "vector %02X err=0x%x\n", pdev->irq, err); 2135 + goto err_exit; 2136 + } 2183 2137 2138 + #ifdef CONFIG_RAPIDIO_DMA_ENGINE 2139 + tsi721_register_dma(priv); 2140 + #endif 2184 2141 /* Enable SRIO link */ 2185 2142 iowrite32(ioread32(priv->regs + TSI721_DEVCTL) | 2186 2143 TSI721_DEVCTL_SRBOOT_CMPL, 2187 2144 priv->regs + TSI721_DEVCTL); 2188 2145 2189 2146 rio_register_mport(mport); 2190 - priv->mport = mport; 2191 2147 2192 2148 if (mport->host_deviceid >= 0) 2193 2149 iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | ··· 2201 2149 iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); 2202 2150 2203 2151 return 0; 2152 + 2153 + err_exit: 2154 + kfree(mport); 2155 + kfree(ops); 2156 + return err; 2204 2157 } 2205 2158 2206 2159 static int __devinit tsi721_probe(struct pci_dev *pdev, ··· 2351 2294 tsi721_init_pc2sr_mapping(priv); 2352 2295 tsi721_init_sr2pc_mapping(priv); 2353 2296 2354 - if (tsi721_bdma_init(priv)) { 2297 + if (tsi721_bdma_maint_init(priv)) { 2355 2298 dev_err(&pdev->dev, "BDMA initialization failed, aborting\n"); 2356 2299 err = -ENOMEM; 2357 2300 goto err_unmap_bars; ··· 2376 2319 err_free_consistent: 2377 2320 tsi721_doorbell_free(priv); 2378 2321 err_free_bdma: 2379 - tsi721_bdma_free(priv); 2322 + tsi721_bdma_maint_free(priv); 2380 2323 err_unmap_bars: 2381 2324 if (priv->regs) 2382 2325 iounmap(priv->regs);
+90 -15
drivers/rapidio/devices/tsi721.h
··· 167 167 #define TSI721_DEV_INTE 0x29840 168 168 #define TSI721_DEV_INT 0x29844 169 169 #define TSI721_DEV_INTSET 0x29848 170 + #define TSI721_DEV_INT_BDMA_CH 0x00002000 171 + #define TSI721_DEV_INT_BDMA_NCH 0x00001000 170 172 #define TSI721_DEV_INT_SMSG_CH 0x00000800 171 173 #define TSI721_DEV_INT_SMSG_NCH 0x00000400 172 174 #define TSI721_DEV_INT_SR2PC_CH 0x00000200 ··· 183 181 #define TSI721_INT_IMSG_CHAN(x) (1 << (16 + (x))) 184 182 #define TSI721_INT_OMSG_CHAN_M 0x0000ff00 185 183 #define TSI721_INT_OMSG_CHAN(x) (1 << (8 + (x))) 184 + #define TSI721_INT_BDMA_CHAN_M 0x000000ff 185 + #define TSI721_INT_BDMA_CHAN(x) (1 << (x)) 186 186 187 187 /* 188 188 * PC2SR block registers ··· 239 235 * x = 0..7 240 236 */ 241 237 242 - #define TSI721_DMAC_DWRCNT(x) (0x51000 + (x) * 0x1000) 243 - #define TSI721_DMAC_DRDCNT(x) (0x51004 + (x) * 0x1000) 238 + #define TSI721_DMAC_BASE(x) (0x51000 + (x) * 0x1000) 244 239 245 - #define TSI721_DMAC_CTL(x) (0x51008 + (x) * 0x1000) 240 + #define TSI721_DMAC_DWRCNT 0x000 241 + #define TSI721_DMAC_DRDCNT 0x004 242 + 243 + #define TSI721_DMAC_CTL 0x008 246 244 #define TSI721_DMAC_CTL_SUSP 0x00000002 247 245 #define TSI721_DMAC_CTL_INIT 0x00000001 248 246 249 - #define TSI721_DMAC_INT(x) (0x5100c + (x) * 0x1000) 247 + #define TSI721_DMAC_INT 0x00c 250 248 #define TSI721_DMAC_INT_STFULL 0x00000010 251 249 #define TSI721_DMAC_INT_DONE 0x00000008 252 250 #define TSI721_DMAC_INT_SUSP 0x00000004 ··· 256 250 #define TSI721_DMAC_INT_IOFDONE 0x00000001 257 251 #define TSI721_DMAC_INT_ALL 0x0000001f 258 252 259 - #define TSI721_DMAC_INTSET(x) (0x51010 + (x) * 0x1000) 253 + #define TSI721_DMAC_INTSET 0x010 260 254 261 - #define TSI721_DMAC_STS(x) (0x51014 + (x) * 0x1000) 255 + #define TSI721_DMAC_STS 0x014 262 256 #define TSI721_DMAC_STS_ABORT 0x00400000 263 257 #define TSI721_DMAC_STS_RUN 0x00200000 264 258 #define TSI721_DMAC_STS_CS 0x001f0000 265 259 266 - #define TSI721_DMAC_INTE(x) (0x51018 + (x) * 0x1000) 260 + #define TSI721_DMAC_INTE 0x018 267 261 268 - #define TSI721_DMAC_DPTRL(x) (0x51024 + (x) * 0x1000) 262 + #define TSI721_DMAC_DPTRL 0x024 269 263 #define TSI721_DMAC_DPTRL_MASK 0xffffffe0 270 264 271 - #define TSI721_DMAC_DPTRH(x) (0x51028 + (x) * 0x1000) 265 + #define TSI721_DMAC_DPTRH 0x028 272 266 273 - #define TSI721_DMAC_DSBL(x) (0x5102c + (x) * 0x1000) 267 + #define TSI721_DMAC_DSBL 0x02c 274 268 #define TSI721_DMAC_DSBL_MASK 0xffffffc0 275 269 276 - #define TSI721_DMAC_DSBH(x) (0x51030 + (x) * 0x1000) 270 + #define TSI721_DMAC_DSBH 0x030 277 271 278 - #define TSI721_DMAC_DSSZ(x) (0x51034 + (x) * 0x1000) 272 + #define TSI721_DMAC_DSSZ 0x034 279 273 #define TSI721_DMAC_DSSZ_SIZE_M 0x0000000f 280 274 #define TSI721_DMAC_DSSZ_SIZE(size) (__fls(size) - 4) 281 275 282 - 283 - #define TSI721_DMAC_DSRP(x) (0x51038 + (x) * 0x1000) 276 + #define TSI721_DMAC_DSRP 0x038 284 277 #define TSI721_DMAC_DSRP_MASK 0x0007ffff 285 278 286 - #define TSI721_DMAC_DSWP(x) (0x5103c + (x) * 0x1000) 279 + #define TSI721_DMAC_DSWP 0x03c 287 280 #define TSI721_DMAC_DSWP_MASK 0x0007ffff 288 281 289 282 #define TSI721_BDMA_INTE 0x5f000 ··· 617 612 #define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */ 618 613 #define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */ 619 614 615 + #define TSI721_DMACH_DMA 1 /* DMA channel for data transfers */ 616 + 620 617 #define MSG_DMA_ENTRY_INX_TO_SIZE(x) ((0x10 << (x)) & 0xFFFF0) 621 618 622 619 enum tsi721_smsg_int_flag { ··· 633 626 634 627 /* Structures */ 635 628 629 + #ifdef CONFIG_RAPIDIO_DMA_ENGINE 630 + 631 + struct tsi721_tx_desc { 632 + struct dma_async_tx_descriptor txd; 633 + struct tsi721_dma_desc *hw_desc; 634 + u16 destid; 635 + /* low 64-bits of 66-bit RIO address */ 636 + u64 rio_addr; 637 + /* upper 2-bits of 66-bit RIO address */ 638 + u8 rio_addr_u; 639 + bool interrupt; 640 + struct list_head desc_node; 641 + struct list_head tx_list; 642 + }; 643 + 636 644 struct tsi721_bdma_chan { 645 + int id; 646 + void __iomem *regs; 647 + int bd_num; /* number of buffer descriptors */ 648 + void *bd_base; /* start of DMA descriptors */ 649 + dma_addr_t bd_phys; 650 + void *sts_base; /* start of DMA BD status FIFO */ 651 + dma_addr_t sts_phys; 652 + int sts_size; 653 + u32 sts_rdptr; 654 + u32 wr_count; 655 + u32 wr_count_next; 656 + 657 + struct dma_chan dchan; 658 + struct tsi721_tx_desc *tx_desc; 659 + spinlock_t lock; 660 + struct list_head active_list; 661 + struct list_head queue; 662 + struct list_head free_list; 663 + dma_cookie_t completed_cookie; 664 + struct tasklet_struct tasklet; 665 + }; 666 + 667 + #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ 668 + 669 + struct tsi721_bdma_maint { 670 + int ch_id; /* BDMA channel number */ 637 671 int bd_num; /* number of buffer descriptors */ 638 672 void *bd_base; /* start of DMA descriptors */ 639 673 dma_addr_t bd_phys; ··· 769 721 TSI721_VECT_IMB1_INT, 770 722 TSI721_VECT_IMB2_INT, 771 723 TSI721_VECT_IMB3_INT, 724 + #ifdef CONFIG_RAPIDIO_DMA_ENGINE 725 + TSI721_VECT_DMA0_DONE, 726 + TSI721_VECT_DMA1_DONE, 727 + TSI721_VECT_DMA2_DONE, 728 + TSI721_VECT_DMA3_DONE, 729 + TSI721_VECT_DMA4_DONE, 730 + TSI721_VECT_DMA5_DONE, 731 + TSI721_VECT_DMA6_DONE, 732 + TSI721_VECT_DMA7_DONE, 733 + TSI721_VECT_DMA0_INT, 734 + TSI721_VECT_DMA1_INT, 735 + TSI721_VECT_DMA2_INT, 736 + TSI721_VECT_DMA3_INT, 737 + TSI721_VECT_DMA4_INT, 738 + TSI721_VECT_DMA5_INT, 739 + TSI721_VECT_DMA6_INT, 740 + TSI721_VECT_DMA7_INT, 741 + #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ 772 742 TSI721_VECT_MAX 773 743 }; 774 744 ··· 820 754 u32 pw_discard_count; 821 755 822 756 /* BDMA Engine */ 757 + struct tsi721_bdma_maint mdma; /* Maintenance rd/wr request channel */ 758 + 759 + #ifdef CONFIG_RAPIDIO_DMA_ENGINE 823 760 struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM]; 761 + #endif 824 762 825 763 /* Inbound Messaging */ 826 764 int imsg_init[TSI721_IMSG_CHNUM]; ··· 834 764 int omsg_init[TSI721_OMSG_CHNUM]; 835 765 struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM]; 836 766 }; 767 + 768 + #ifdef CONFIG_RAPIDIO_DMA_ENGINE 769 + extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan); 770 + extern int __devinit tsi721_register_dma(struct tsi721_device *priv); 771 + #endif 837 772 838 773 #endif
+823
drivers/rapidio/devices/tsi721_dma.c
··· 1 + /* 2 + * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge 3 + * 4 + * Copyright 2011 Integrated Device Technology, Inc. 5 + * Alexandre Bounine <alexandre.bounine@idt.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms of the GNU General Public License as published by the Free 9 + * Software Foundation; either version 2 of the License, or (at your option) 10 + * any later version. 11 + * 12 + * This program is distributed in the hope that it will be useful, but WITHOUT 13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 + * more details. 16 + * 17 + * You should have received a copy of the GNU General Public License along with 18 + * this program; if not, write to the Free Software Foundation, Inc., 59 19 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 + */ 21 + 22 + #include <linux/io.h> 23 + #include <linux/errno.h> 24 + #include <linux/init.h> 25 + #include <linux/ioport.h> 26 + #include <linux/kernel.h> 27 + #include <linux/module.h> 28 + #include <linux/pci.h> 29 + #include <linux/rio.h> 30 + #include <linux/rio_drv.h> 31 + #include <linux/dma-mapping.h> 32 + #include <linux/interrupt.h> 33 + #include <linux/kfifo.h> 34 + #include <linux/delay.h> 35 + 36 + #include "tsi721.h" 37 + 38 + static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan) 39 + { 40 + return container_of(chan, struct tsi721_bdma_chan, dchan); 41 + } 42 + 43 + static inline struct tsi721_device *to_tsi721(struct dma_device *ddev) 44 + { 45 + return container_of(ddev, struct rio_mport, dma)->priv; 46 + } 47 + 48 + static inline 49 + struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd) 50 + { 51 + return container_of(txd, struct tsi721_tx_desc, txd); 52 + } 53 + 54 + static inline 55 + struct tsi721_tx_desc *tsi721_dma_first_active( 56 + struct tsi721_bdma_chan *bdma_chan) 57 + { 58 + return list_first_entry(&bdma_chan->active_list, 59 + struct tsi721_tx_desc, desc_node); 60 + } 61 + 62 + static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan) 63 + { 64 + struct tsi721_dma_desc *bd_ptr; 65 + struct device *dev = bdma_chan->dchan.device->dev; 66 + u64 *sts_ptr; 67 + dma_addr_t bd_phys; 68 + dma_addr_t sts_phys; 69 + int sts_size; 70 + int bd_num = bdma_chan->bd_num; 71 + 72 + dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id); 73 + 74 + /* Allocate space for DMA descriptors */ 75 + bd_ptr = dma_zalloc_coherent(dev, 76 + bd_num * sizeof(struct tsi721_dma_desc), 77 + &bd_phys, GFP_KERNEL); 78 + if (!bd_ptr) 79 + return -ENOMEM; 80 + 81 + bdma_chan->bd_phys = bd_phys; 82 + bdma_chan->bd_base = bd_ptr; 83 + 84 + dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n", 85 + bd_ptr, (unsigned long long)bd_phys); 86 + 87 + /* Allocate space for descriptor status FIFO */ 88 + sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? 89 + bd_num : TSI721_DMA_MINSTSSZ; 90 + sts_size = roundup_pow_of_two(sts_size); 91 + sts_ptr = dma_zalloc_coherent(dev, 92 + sts_size * sizeof(struct tsi721_dma_sts), 93 + &sts_phys, GFP_KERNEL); 94 + if (!sts_ptr) { 95 + /* Free space allocated for DMA descriptors */ 96 + dma_free_coherent(dev, 97 + bd_num * sizeof(struct tsi721_dma_desc), 98 + bd_ptr, bd_phys); 99 + bdma_chan->bd_base = NULL; 100 + return -ENOMEM; 101 + } 102 + 103 + bdma_chan->sts_phys = sts_phys; 104 + bdma_chan->sts_base = sts_ptr; 105 + bdma_chan->sts_size = sts_size; 106 + 107 + dev_dbg(dev, 108 + "desc status FIFO @ %p (phys = %llx) size=0x%x\n", 109 + sts_ptr, (unsigned long long)sts_phys, sts_size); 110 + 111 + /* Initialize DMA descriptors ring */ 112 + bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29); 113 + bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys & 114 + TSI721_DMAC_DPTRL_MASK); 115 + bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); 116 + 117 + /* Setup DMA descriptor pointers */ 118 + iowrite32(((u64)bd_phys >> 32), 119 + bdma_chan->regs + TSI721_DMAC_DPTRH); 120 + iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), 121 + bdma_chan->regs + TSI721_DMAC_DPTRL); 122 + 123 + /* Setup descriptor status FIFO */ 124 + iowrite32(((u64)sts_phys >> 32), 125 + bdma_chan->regs + TSI721_DMAC_DSBH); 126 + iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), 127 + bdma_chan->regs + TSI721_DMAC_DSBL); 128 + iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), 129 + bdma_chan->regs + TSI721_DMAC_DSSZ); 130 + 131 + /* Clear interrupt bits */ 132 + iowrite32(TSI721_DMAC_INT_ALL, 133 + bdma_chan->regs + TSI721_DMAC_INT); 134 + 135 + ioread32(bdma_chan->regs + TSI721_DMAC_INT); 136 + 137 + /* Toggle DMA channel initialization */ 138 + iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); 139 + ioread32(bdma_chan->regs + TSI721_DMAC_CTL); 140 + bdma_chan->wr_count = bdma_chan->wr_count_next = 0; 141 + bdma_chan->sts_rdptr = 0; 142 + udelay(10); 143 + 144 + return 0; 145 + } 146 + 147 + static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan) 148 + { 149 + u32 ch_stat; 150 + 151 + if (bdma_chan->bd_base == NULL) 152 + return 0; 153 + 154 + /* Check if DMA channel still running */ 155 + ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS); 156 + if (ch_stat & TSI721_DMAC_STS_RUN) 157 + return -EFAULT; 158 + 159 + /* Put DMA channel into init state */ 160 + iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); 161 + 162 + /* Free space allocated for DMA descriptors */ 163 + dma_free_coherent(bdma_chan->dchan.device->dev, 164 + bdma_chan->bd_num * sizeof(struct tsi721_dma_desc), 165 + bdma_chan->bd_base, bdma_chan->bd_phys); 166 + bdma_chan->bd_base = NULL; 167 + 168 + /* Free space allocated for status FIFO */ 169 + dma_free_coherent(bdma_chan->dchan.device->dev, 170 + bdma_chan->sts_size * sizeof(struct tsi721_dma_sts), 171 + bdma_chan->sts_base, bdma_chan->sts_phys); 172 + bdma_chan->sts_base = NULL; 173 + return 0; 174 + } 175 + 176 + static void 177 + tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable) 178 + { 179 + if (enable) { 180 + /* Clear pending BDMA channel interrupts */ 181 + iowrite32(TSI721_DMAC_INT_ALL, 182 + bdma_chan->regs + TSI721_DMAC_INT); 183 + ioread32(bdma_chan->regs + TSI721_DMAC_INT); 184 + /* Enable BDMA channel interrupts */ 185 + iowrite32(TSI721_DMAC_INT_ALL, 186 + bdma_chan->regs + TSI721_DMAC_INTE); 187 + } else { 188 + /* Disable BDMA channel interrupts */ 189 + iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); 190 + /* Clear pending BDMA channel interrupts */ 191 + iowrite32(TSI721_DMAC_INT_ALL, 192 + bdma_chan->regs + TSI721_DMAC_INT); 193 + } 194 + 195 + } 196 + 197 + static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan) 198 + { 199 + u32 sts; 200 + 201 + sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); 202 + return ((sts & TSI721_DMAC_STS_RUN) == 0); 203 + } 204 + 205 + void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) 206 + { 207 + /* Disable BDMA channel interrupts */ 208 + iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); 209 + 210 + tasklet_schedule(&bdma_chan->tasklet); 211 + } 212 + 213 + #ifdef CONFIG_PCI_MSI 214 + /** 215 + * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels 216 + * @irq: Linux interrupt number 217 + * @ptr: Pointer to interrupt-specific data (BDMA channel structure) 218 + * 219 + * Handles BDMA channel interrupts signaled using MSI-X. 220 + */ 221 + static irqreturn_t tsi721_bdma_msix(int irq, void *ptr) 222 + { 223 + struct tsi721_bdma_chan *bdma_chan = ptr; 224 + 225 + tsi721_bdma_handler(bdma_chan); 226 + return IRQ_HANDLED; 227 + } 228 + #endif /* CONFIG_PCI_MSI */ 229 + 230 + /* Must be called with the spinlock held */ 231 + static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan) 232 + { 233 + if (!tsi721_dma_is_idle(bdma_chan)) { 234 + dev_err(bdma_chan->dchan.device->dev, 235 + "BUG: Attempt to start non-idle channel\n"); 236 + return; 237 + } 238 + 239 + if (bdma_chan->wr_count == bdma_chan->wr_count_next) { 240 + dev_err(bdma_chan->dchan.device->dev, 241 + "BUG: Attempt to start DMA with no BDs ready\n"); 242 + return; 243 + } 244 + 245 + dev_dbg(bdma_chan->dchan.device->dev, 246 + "tx_chan: %p, chan: %d, regs: %p\n", 247 + bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs); 248 + 249 + iowrite32(bdma_chan->wr_count_next, 250 + bdma_chan->regs + TSI721_DMAC_DWRCNT); 251 + ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT); 252 + 253 + bdma_chan->wr_count = bdma_chan->wr_count_next; 254 + } 255 + 256 + static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan, 257 + struct tsi721_tx_desc *desc) 258 + { 259 + dev_dbg(bdma_chan->dchan.device->dev, 260 + "Put desc: %p into free list\n", desc); 261 + 262 + if (desc) { 263 + spin_lock_bh(&bdma_chan->lock); 264 + list_splice_init(&desc->tx_list, &bdma_chan->free_list); 265 + list_add(&desc->desc_node, &bdma_chan->free_list); 266 + bdma_chan->wr_count_next = bdma_chan->wr_count; 267 + spin_unlock_bh(&bdma_chan->lock); 268 + } 269 + } 270 + 271 + static 272 + struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan) 273 + { 274 + struct tsi721_tx_desc *tx_desc, *_tx_desc; 275 + struct tsi721_tx_desc *ret = NULL; 276 + int i; 277 + 278 + spin_lock_bh(&bdma_chan->lock); 279 + list_for_each_entry_safe(tx_desc, _tx_desc, 280 + &bdma_chan->free_list, desc_node) { 281 + if (async_tx_test_ack(&tx_desc->txd)) { 282 + list_del(&tx_desc->desc_node); 283 + ret = tx_desc; 284 + break; 285 + } 286 + dev_dbg(bdma_chan->dchan.device->dev, 287 + "desc %p not ACKed\n", tx_desc); 288 + } 289 + 290 + i = bdma_chan->wr_count_next % bdma_chan->bd_num; 291 + if (i == bdma_chan->bd_num - 1) { 292 + i = 0; 293 + bdma_chan->wr_count_next++; /* skip link descriptor */ 294 + } 295 + 296 + bdma_chan->wr_count_next++; 297 + tx_desc->txd.phys = bdma_chan->bd_phys + 298 + i * sizeof(struct tsi721_dma_desc); 299 + tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i]; 300 + 301 + spin_unlock_bh(&bdma_chan->lock); 302 + 303 + return ret; 304 + } 305 + 306 + static int 307 + tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan, 308 + struct tsi721_tx_desc *desc, struct scatterlist *sg, 309 + enum dma_rtype rtype, u32 sys_size) 310 + { 311 + struct tsi721_dma_desc *bd_ptr = desc->hw_desc; 312 + u64 rio_addr; 313 + 314 + if (sg_dma_len(sg) > TSI721_DMAD_BCOUNT1 + 1) { 315 + dev_err(bdma_chan->dchan.device->dev, 316 + "SG element is too large\n"); 317 + return -EINVAL; 318 + } 319 + 320 + dev_dbg(bdma_chan->dchan.device->dev, 321 + "desc: 0x%llx, addr: 0x%llx len: 0x%x\n", 322 + (u64)desc->txd.phys, (unsigned long long)sg_dma_address(sg), 323 + sg_dma_len(sg)); 324 + 325 + dev_dbg(bdma_chan->dchan.device->dev, 326 + "bd_ptr = %p did=%d raddr=0x%llx\n", 327 + bd_ptr, desc->destid, desc->rio_addr); 328 + 329 + /* Initialize DMA descriptor */ 330 + bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) | 331 + (rtype << 19) | desc->destid); 332 + if (desc->interrupt) 333 + bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF); 334 + bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) | 335 + (sys_size << 26) | sg_dma_len(sg)); 336 + rio_addr = (desc->rio_addr >> 2) | 337 + ((u64)(desc->rio_addr_u & 0x3) << 62); 338 + bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff); 339 + bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32); 340 + bd_ptr->t1.bufptr_lo = cpu_to_le32( 341 + (u64)sg_dma_address(sg) & 0xffffffff); 342 + bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32); 343 + bd_ptr->t1.s_dist = 0; 344 + bd_ptr->t1.s_size = 0; 345 + 346 + return 0; 347 + } 348 + 349 + static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan, 350 + struct tsi721_tx_desc *desc) 351 + { 352 + struct dma_async_tx_descriptor *txd = &desc->txd; 353 + dma_async_tx_callback callback = txd->callback; 354 + void *param = txd->callback_param; 355 + 356 + list_splice_init(&desc->tx_list, &bdma_chan->free_list); 357 + list_move(&desc->desc_node, &bdma_chan->free_list); 358 + bdma_chan->completed_cookie = txd->cookie; 359 + 360 + if (callback) 361 + callback(param); 362 + } 363 + 364 + static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan) 365 + { 366 + struct tsi721_tx_desc *desc, *_d; 367 + LIST_HEAD(list); 368 + 369 + BUG_ON(!tsi721_dma_is_idle(bdma_chan)); 370 + 371 + if (!list_empty(&bdma_chan->queue)) 372 + tsi721_start_dma(bdma_chan); 373 + 374 + list_splice_init(&bdma_chan->active_list, &list); 375 + list_splice_init(&bdma_chan->queue, &bdma_chan->active_list); 376 + 377 + list_for_each_entry_safe(desc, _d, &list, desc_node) 378 + tsi721_dma_chain_complete(bdma_chan, desc); 379 + } 380 + 381 + static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan) 382 + { 383 + u32 srd_ptr; 384 + u64 *sts_ptr; 385 + int i, j; 386 + 387 + /* Check and clear descriptor status FIFO entries */ 388 + srd_ptr = bdma_chan->sts_rdptr; 389 + sts_ptr = bdma_chan->sts_base; 390 + j = srd_ptr * 8; 391 + while (sts_ptr[j]) { 392 + for (i = 0; i < 8 && sts_ptr[j]; i++, j++) 393 + sts_ptr[j] = 0; 394 + 395 + ++srd_ptr; 396 + srd_ptr %= bdma_chan->sts_size; 397 + j = srd_ptr * 8; 398 + } 399 + 400 + iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP); 401 + bdma_chan->sts_rdptr = srd_ptr; 402 + } 403 + 404 + static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan) 405 + { 406 + if (list_empty(&bdma_chan->active_list) || 407 + list_is_singular(&bdma_chan->active_list)) { 408 + dev_dbg(bdma_chan->dchan.device->dev, 409 + "%s: Active_list empty\n", __func__); 410 + tsi721_dma_complete_all(bdma_chan); 411 + } else { 412 + dev_dbg(bdma_chan->dchan.device->dev, 413 + "%s: Active_list NOT empty\n", __func__); 414 + tsi721_dma_chain_complete(bdma_chan, 415 + tsi721_dma_first_active(bdma_chan)); 416 + tsi721_start_dma(bdma_chan); 417 + } 418 + } 419 + 420 + static void tsi721_dma_tasklet(unsigned long data) 421 + { 422 + struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data; 423 + u32 dmac_int, dmac_sts; 424 + 425 + dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); 426 + dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n", 427 + __func__, bdma_chan->id, dmac_int); 428 + /* Clear channel interrupts */ 429 + iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT); 430 + 431 + if (dmac_int & TSI721_DMAC_INT_ERR) { 432 + dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); 433 + dev_err(bdma_chan->dchan.device->dev, 434 + "%s: DMA ERROR - DMAC%d_STS = 0x%x\n", 435 + __func__, bdma_chan->id, dmac_sts); 436 + } 437 + 438 + if (dmac_int & TSI721_DMAC_INT_STFULL) { 439 + dev_err(bdma_chan->dchan.device->dev, 440 + "%s: DMAC%d descriptor status FIFO is full\n", 441 + __func__, bdma_chan->id); 442 + } 443 + 444 + if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) { 445 + tsi721_clr_stat(bdma_chan); 446 + spin_lock(&bdma_chan->lock); 447 + tsi721_advance_work(bdma_chan); 448 + spin_unlock(&bdma_chan->lock); 449 + } 450 + 451 + /* Re-Enable BDMA channel interrupts */ 452 + iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE); 453 + } 454 + 455 + static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) 456 + { 457 + struct tsi721_tx_desc *desc = to_tsi721_desc(txd); 458 + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan); 459 + dma_cookie_t cookie; 460 + 461 + spin_lock_bh(&bdma_chan->lock); 462 + 463 + cookie = txd->chan->cookie; 464 + if (++cookie < 0) 465 + cookie = 1; 466 + txd->chan->cookie = cookie; 467 + txd->cookie = cookie; 468 + 469 + if (list_empty(&bdma_chan->active_list)) { 470 + list_add_tail(&desc->desc_node, &bdma_chan->active_list); 471 + tsi721_start_dma(bdma_chan); 472 + } else { 473 + list_add_tail(&desc->desc_node, &bdma_chan->queue); 474 + } 475 + 476 + spin_unlock_bh(&bdma_chan->lock); 477 + return cookie; 478 + } 479 + 480 + static int tsi721_alloc_chan_resources(struct dma_chan *dchan) 481 + { 482 + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 483 + #ifdef CONFIG_PCI_MSI 484 + struct tsi721_device *priv = to_tsi721(dchan->device); 485 + #endif 486 + struct tsi721_tx_desc *desc = NULL; 487 + LIST_HEAD(tmp_list); 488 + int i; 489 + int rc; 490 + 491 + if (bdma_chan->bd_base) 492 + return bdma_chan->bd_num - 1; 493 + 494 + /* Initialize BDMA channel */ 495 + if (tsi721_bdma_ch_init(bdma_chan)) { 496 + dev_err(dchan->device->dev, "Unable to initialize data DMA" 497 + " channel %d, aborting\n", bdma_chan->id); 498 + return -ENOMEM; 499 + } 500 + 501 + /* Alocate matching number of logical descriptors */ 502 + desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc), 503 + GFP_KERNEL); 504 + if (!desc) { 505 + dev_err(dchan->device->dev, 506 + "Failed to allocate logical descriptors\n"); 507 + rc = -ENOMEM; 508 + goto err_out; 509 + } 510 + 511 + bdma_chan->tx_desc = desc; 512 + 513 + for (i = 0; i < bdma_chan->bd_num - 1; i++) { 514 + dma_async_tx_descriptor_init(&desc[i].txd, dchan); 515 + desc[i].txd.tx_submit = tsi721_tx_submit; 516 + desc[i].txd.flags = DMA_CTRL_ACK; 517 + INIT_LIST_HEAD(&desc[i].tx_list); 518 + list_add_tail(&desc[i].desc_node, &tmp_list); 519 + } 520 + 521 + spin_lock_bh(&bdma_chan->lock); 522 + list_splice(&tmp_list, &bdma_chan->free_list); 523 + bdma_chan->completed_cookie = dchan->cookie = 1; 524 + spin_unlock_bh(&bdma_chan->lock); 525 + 526 + #ifdef CONFIG_PCI_MSI 527 + if (priv->flags & TSI721_USING_MSIX) { 528 + /* Request interrupt service if we are in MSI-X mode */ 529 + rc = request_irq( 530 + priv->msix[TSI721_VECT_DMA0_DONE + 531 + bdma_chan->id].vector, 532 + tsi721_bdma_msix, 0, 533 + priv->msix[TSI721_VECT_DMA0_DONE + 534 + bdma_chan->id].irq_name, 535 + (void *)bdma_chan); 536 + 537 + if (rc) { 538 + dev_dbg(dchan->device->dev, 539 + "Unable to allocate MSI-X interrupt for " 540 + "BDMA%d-DONE\n", bdma_chan->id); 541 + goto err_out; 542 + } 543 + 544 + rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT + 545 + bdma_chan->id].vector, 546 + tsi721_bdma_msix, 0, 547 + priv->msix[TSI721_VECT_DMA0_INT + 548 + bdma_chan->id].irq_name, 549 + (void *)bdma_chan); 550 + 551 + if (rc) { 552 + dev_dbg(dchan->device->dev, 553 + "Unable to allocate MSI-X interrupt for " 554 + "BDMA%d-INT\n", bdma_chan->id); 555 + free_irq( 556 + priv->msix[TSI721_VECT_DMA0_DONE + 557 + bdma_chan->id].vector, 558 + (void *)bdma_chan); 559 + rc = -EIO; 560 + goto err_out; 561 + } 562 + } 563 + #endif /* CONFIG_PCI_MSI */ 564 + 565 + tasklet_enable(&bdma_chan->tasklet); 566 + tsi721_bdma_interrupt_enable(bdma_chan, 1); 567 + 568 + return bdma_chan->bd_num - 1; 569 + 570 + err_out: 571 + kfree(desc); 572 + tsi721_bdma_ch_free(bdma_chan); 573 + return rc; 574 + } 575 + 576 + static void tsi721_free_chan_resources(struct dma_chan *dchan) 577 + { 578 + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 579 + #ifdef CONFIG_PCI_MSI 580 + struct tsi721_device *priv = to_tsi721(dchan->device); 581 + #endif 582 + LIST_HEAD(list); 583 + 584 + dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); 585 + 586 + if (bdma_chan->bd_base == NULL) 587 + return; 588 + 589 + BUG_ON(!list_empty(&bdma_chan->active_list)); 590 + BUG_ON(!list_empty(&bdma_chan->queue)); 591 + 592 + tasklet_disable(&bdma_chan->tasklet); 593 + 594 + spin_lock_bh(&bdma_chan->lock); 595 + list_splice_init(&bdma_chan->free_list, &list); 596 + spin_unlock_bh(&bdma_chan->lock); 597 + 598 + tsi721_bdma_interrupt_enable(bdma_chan, 0); 599 + 600 + #ifdef CONFIG_PCI_MSI 601 + if (priv->flags & TSI721_USING_MSIX) { 602 + free_irq(priv->msix[TSI721_VECT_DMA0_DONE + 603 + bdma_chan->id].vector, (void *)bdma_chan); 604 + free_irq(priv->msix[TSI721_VECT_DMA0_INT + 605 + bdma_chan->id].vector, (void *)bdma_chan); 606 + } 607 + #endif /* CONFIG_PCI_MSI */ 608 + 609 + tsi721_bdma_ch_free(bdma_chan); 610 + kfree(bdma_chan->tx_desc); 611 + } 612 + 613 + static 614 + enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, 615 + struct dma_tx_state *txstate) 616 + { 617 + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 618 + dma_cookie_t last_used; 619 + dma_cookie_t last_completed; 620 + int ret; 621 + 622 + spin_lock_bh(&bdma_chan->lock); 623 + last_completed = bdma_chan->completed_cookie; 624 + last_used = dchan->cookie; 625 + spin_unlock_bh(&bdma_chan->lock); 626 + 627 + ret = dma_async_is_complete(cookie, last_completed, last_used); 628 + 629 + dma_set_tx_state(txstate, last_completed, last_used, 0); 630 + 631 + dev_dbg(dchan->device->dev, 632 + "%s: exit, ret: %d, last_completed: %d, last_used: %d\n", 633 + __func__, ret, last_completed, last_used); 634 + 635 + return ret; 636 + } 637 + 638 + static void tsi721_issue_pending(struct dma_chan *dchan) 639 + { 640 + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 641 + 642 + dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); 643 + 644 + if (tsi721_dma_is_idle(bdma_chan)) { 645 + spin_lock_bh(&bdma_chan->lock); 646 + tsi721_advance_work(bdma_chan); 647 + spin_unlock_bh(&bdma_chan->lock); 648 + } else 649 + dev_dbg(dchan->device->dev, 650 + "%s: DMA channel still busy\n", __func__); 651 + } 652 + 653 + static 654 + struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, 655 + struct scatterlist *sgl, unsigned int sg_len, 656 + enum dma_transfer_direction dir, unsigned long flags, 657 + void *tinfo) 658 + { 659 + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 660 + struct tsi721_tx_desc *desc = NULL; 661 + struct tsi721_tx_desc *first = NULL; 662 + struct scatterlist *sg; 663 + struct rio_dma_ext *rext = tinfo; 664 + u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */ 665 + unsigned int i; 666 + u32 sys_size = dma_to_mport(dchan->device)->sys_size; 667 + enum dma_rtype rtype; 668 + 669 + if (!sgl || !sg_len) { 670 + dev_err(dchan->device->dev, "%s: No SG list\n", __func__); 671 + return NULL; 672 + } 673 + 674 + if (dir == DMA_DEV_TO_MEM) 675 + rtype = NREAD; 676 + else if (dir == DMA_MEM_TO_DEV) { 677 + switch (rext->wr_type) { 678 + case RDW_ALL_NWRITE: 679 + rtype = ALL_NWRITE; 680 + break; 681 + case RDW_ALL_NWRITE_R: 682 + rtype = ALL_NWRITE_R; 683 + break; 684 + case RDW_LAST_NWRITE_R: 685 + default: 686 + rtype = LAST_NWRITE_R; 687 + break; 688 + } 689 + } else { 690 + dev_err(dchan->device->dev, 691 + "%s: Unsupported DMA direction option\n", __func__); 692 + return NULL; 693 + } 694 + 695 + for_each_sg(sgl, sg, sg_len, i) { 696 + int err; 697 + 698 + dev_dbg(dchan->device->dev, "%s: sg #%d\n", __func__, i); 699 + desc = tsi721_desc_get(bdma_chan); 700 + if (!desc) { 701 + dev_err(dchan->device->dev, 702 + "Not enough descriptors available\n"); 703 + goto err_desc_get; 704 + } 705 + 706 + if (sg_is_last(sg)) 707 + desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; 708 + else 709 + desc->interrupt = false; 710 + 711 + desc->destid = rext->destid; 712 + desc->rio_addr = rio_addr; 713 + desc->rio_addr_u = 0; 714 + 715 + err = tsi721_fill_desc(bdma_chan, desc, sg, rtype, sys_size); 716 + if (err) { 717 + dev_err(dchan->device->dev, 718 + "Failed to build desc: %d\n", err); 719 + goto err_desc_get; 720 + } 721 + 722 + rio_addr += sg_dma_len(sg); 723 + 724 + if (!first) 725 + first = desc; 726 + else 727 + list_add_tail(&desc->desc_node, &first->tx_list); 728 + } 729 + 730 + first->txd.cookie = -EBUSY; 731 + desc->txd.flags = flags; 732 + 733 + return &first->txd; 734 + 735 + err_desc_get: 736 + tsi721_desc_put(bdma_chan, first); 737 + return NULL; 738 + } 739 + 740 + static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, 741 + unsigned long arg) 742 + { 743 + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 744 + struct tsi721_tx_desc *desc, *_d; 745 + LIST_HEAD(list); 746 + 747 + dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); 748 + 749 + if (cmd != DMA_TERMINATE_ALL) 750 + return -ENXIO; 751 + 752 + spin_lock_bh(&bdma_chan->lock); 753 + 754 + /* make sure to stop the transfer */ 755 + iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL); 756 + 757 + list_splice_init(&bdma_chan->active_list, &list); 758 + list_splice_init(&bdma_chan->queue, &list); 759 + 760 + list_for_each_entry_safe(desc, _d, &list, desc_node) 761 + tsi721_dma_chain_complete(bdma_chan, desc); 762 + 763 + spin_unlock_bh(&bdma_chan->lock); 764 + 765 + return 0; 766 + } 767 + 768 + int __devinit tsi721_register_dma(struct tsi721_device *priv) 769 + { 770 + int i; 771 + int nr_channels = TSI721_DMA_MAXCH; 772 + int err; 773 + struct rio_mport *mport = priv->mport; 774 + 775 + mport->dma.dev = &priv->pdev->dev; 776 + mport->dma.chancnt = nr_channels; 777 + 778 + INIT_LIST_HEAD(&mport->dma.channels); 779 + 780 + for (i = 0; i < nr_channels; i++) { 781 + struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i]; 782 + 783 + if (i == TSI721_DMACH_MAINT) 784 + continue; 785 + 786 + bdma_chan->bd_num = 64; 787 + bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); 788 + 789 + bdma_chan->dchan.device = &mport->dma; 790 + bdma_chan->dchan.cookie = 1; 791 + bdma_chan->dchan.chan_id = i; 792 + bdma_chan->id = i; 793 + 794 + spin_lock_init(&bdma_chan->lock); 795 + 796 + INIT_LIST_HEAD(&bdma_chan->active_list); 797 + INIT_LIST_HEAD(&bdma_chan->queue); 798 + INIT_LIST_HEAD(&bdma_chan->free_list); 799 + 800 + tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, 801 + (unsigned long)bdma_chan); 802 + tasklet_disable(&bdma_chan->tasklet); 803 + list_add_tail(&bdma_chan->dchan.device_node, 804 + &mport->dma.channels); 805 + } 806 + 807 + dma_cap_zero(mport->dma.cap_mask); 808 + dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask); 809 + dma_cap_set(DMA_SLAVE, mport->dma.cap_mask); 810 + 811 + mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources; 812 + mport->dma.device_free_chan_resources = tsi721_free_chan_resources; 813 + mport->dma.device_tx_status = tsi721_tx_status; 814 + mport->dma.device_issue_pending = tsi721_issue_pending; 815 + mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; 816 + mport->dma.device_control = tsi721_device_control; 817 + 818 + err = dma_async_device_register(&mport->dma); 819 + if (err) 820 + dev_err(&priv->pdev->dev, "Failed to register DMA device\n"); 821 + 822 + return err; 823 + }