Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

net: wwan: t7xx: Add control DMA interface

Cross Layer DMA (CLDMA) Hardware interface (HIF) enables the control
path of Host-Modem data transfers. CLDMA HIF layer provides a common
interface to the Port Layer.

CLDMA manages 8 independent RX/TX physical channels with data flow
control in HW queues. CLDMA uses ring buffers of General Packet
Descriptors (GPD) for TX/RX. GPDs can represent multiple or single
data buffers (DB).

CLDMA HIF initializes GPD rings, registers ISR handlers for CLDMA
interrupts, and initializes CLDMA HW registers.

CLDMA TX flow:
1. Port Layer write
2. Get DB address
3. Configure GPD
4. Triggering processing via HW register write

CLDMA RX flow:
1. CLDMA HW sends a RX "done" to host
2. Driver starts thread to safely read GPD
3. DB is sent to Port layer
4. Create a new buffer for GPD ring

Note: This patch does not enable compilation since it has dependencies
such as t7xx_pcie_mac_clear_int()/t7xx_pcie_mac_set_int() and
struct t7xx_pci_dev which are added by the core patch.

Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Co-developed-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: Loic Poulain <loic.poulain@linaro.org>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Haijun Liu and committed by
David S. Miller
39d43904 a4ff3653

+1812
+281
drivers/net/wwan/t7xx/t7xx_cldma.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2021, MediaTek Inc. 4 + * Copyright (c) 2021-2022, Intel Corporation. 5 + * 6 + * Authors: 7 + * Haijun Liu <haijun.liu@mediatek.com> 8 + * Moises Veleta <moises.veleta@intel.com> 9 + * Ricardo Martinez <ricardo.martinez@linux.intel.com> 10 + * 11 + * Contributors: 12 + * Amir Hanania <amir.hanania@intel.com> 13 + * Andy Shevchenko <andriy.shevchenko@linux.intel.com> 14 + * Eliot Lee <eliot.lee@intel.com> 15 + * Sreehari Kancharla <sreehari.kancharla@intel.com> 16 + */ 17 + 18 + #include <linux/bits.h> 19 + #include <linux/delay.h> 20 + #include <linux/io.h> 21 + #include <linux/io-64-nonatomic-lo-hi.h> 22 + #include <linux/types.h> 23 + 24 + #include "t7xx_cldma.h" 25 + 26 + #define ADDR_SIZE 8 27 + 28 + void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info) 29 + { 30 + u32 val; 31 + 32 + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY); 33 + val |= IP_BUSY_WAKEUP; 34 + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY); 35 + } 36 + 37 + /** 38 + * t7xx_cldma_hw_restore() - Restore CLDMA HW registers. 39 + * @hw_info: Pointer to struct t7xx_cldma_hw. 40 + * 41 + * Restore HW after resume. Writes uplink configuration for CLDMA HW. 42 + */ 43 + void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info) 44 + { 45 + u32 ul_cfg; 46 + 47 + ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); 48 + ul_cfg &= ~UL_CFG_BIT_MODE_MASK; 49 + 50 + if (hw_info->hw_mode == MODE_BIT_64) 51 + ul_cfg |= UL_CFG_BIT_MODE_64; 52 + else if (hw_info->hw_mode == MODE_BIT_40) 53 + ul_cfg |= UL_CFG_BIT_MODE_40; 54 + else if (hw_info->hw_mode == MODE_BIT_36) 55 + ul_cfg |= UL_CFG_BIT_MODE_36; 56 + 57 + iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); 58 + /* Disable TX and RX invalid address check */ 59 + iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM); 60 + iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM); 61 + } 62 + 63 + void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, 64 + enum mtk_txrx tx_rx) 65 + { 66 + void __iomem *reg; 67 + u32 val; 68 + 69 + reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_START_CMD : 70 + hw_info->ap_pdn_base + REG_CLDMA_UL_START_CMD; 71 + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); 72 + iowrite32(val, reg); 73 + } 74 + 75 + void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info) 76 + { 77 + /* Enable the TX & RX interrupts */ 78 + iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0); 79 + iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0); 80 + /* Enable the empty queue interrupt */ 81 + iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0); 82 + iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0); 83 + } 84 + 85 + void t7xx_cldma_hw_reset(void __iomem *ao_base) 86 + { 87 + u32 val; 88 + 89 + val = ioread32(ao_base + REG_INFRA_RST2_SET); 90 + val |= RST2_PMIC_SW_RST_SET; 91 + iowrite32(val, ao_base + REG_INFRA_RST2_SET); 92 + val = ioread32(ao_base + REG_INFRA_RST4_SET); 93 + val |= RST4_CLDMA1_SW_RST_SET; 94 + iowrite32(val, ao_base + REG_INFRA_RST4_SET); 95 + udelay(1); 96 + 97 + val = ioread32(ao_base + REG_INFRA_RST4_CLR); 98 + val |= RST4_CLDMA1_SW_RST_CLR; 99 + iowrite32(val, ao_base + REG_INFRA_RST4_CLR); 100 + val = ioread32(ao_base + REG_INFRA_RST2_CLR); 101 + val |= RST2_PMIC_SW_RST_CLR; 102 + iowrite32(val, ao_base + REG_INFRA_RST2_CLR); 103 + } 104 + 105 + bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno) 106 + { 107 + u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE; 108 + 109 + return ioread64(hw_info->ap_pdn_base + offset); 110 + } 111 + 112 + void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address, 113 + enum mtk_txrx tx_rx) 114 + { 115 + u32 offset = qno * ADDR_SIZE; 116 + void __iomem *reg; 117 + 118 + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 : 119 + hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0; 120 + iowrite64(address, reg + offset); 121 + } 122 + 123 + void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, 124 + enum mtk_txrx tx_rx) 125 + { 126 + void __iomem *base = hw_info->ap_pdn_base; 127 + 128 + if (tx_rx == MTK_RX) 129 + iowrite32(BIT(qno), base + REG_CLDMA_DL_RESUME_CMD); 130 + else 131 + iowrite32(BIT(qno), base + REG_CLDMA_UL_RESUME_CMD); 132 + } 133 + 134 + unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno, 135 + enum mtk_txrx tx_rx) 136 + { 137 + void __iomem *reg; 138 + u32 mask, val; 139 + 140 + mask = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); 141 + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_STATUS : 142 + hw_info->ap_pdn_base + REG_CLDMA_UL_STATUS; 143 + val = ioread32(reg); 144 + 145 + return val & mask; 146 + } 147 + 148 + void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask) 149 + { 150 + unsigned int ch_id; 151 + 152 + ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); 153 + ch_id &= bitmask; 154 + /* Clear the ch IDs in the TX interrupt status register */ 155 + iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); 156 + ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); 157 + } 158 + 159 + void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask) 160 + { 161 + unsigned int ch_id; 162 + 163 + ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); 164 + ch_id &= bitmask; 165 + /* Clear the ch IDs in the RX interrupt status register */ 166 + iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); 167 + ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); 168 + } 169 + 170 + unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask, 171 + enum mtk_txrx tx_rx) 172 + { 173 + void __iomem *reg; 174 + u32 val; 175 + 176 + reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0 : 177 + hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0; 178 + val = ioread32(reg); 179 + return val & bitmask; 180 + } 181 + 182 + void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, 183 + enum mtk_txrx tx_rx) 184 + { 185 + void __iomem *reg; 186 + u32 val; 187 + 188 + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 : 189 + hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0; 190 + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); 191 + iowrite32(val, reg); 192 + } 193 + 194 + void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx) 195 + { 196 + void __iomem *reg; 197 + u32 val; 198 + 199 + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 : 200 + hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0; 201 + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); 202 + iowrite32(val << EQ_STA_BIT_OFFSET, reg); 203 + } 204 + 205 + void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, 206 + enum mtk_txrx tx_rx) 207 + { 208 + void __iomem *reg; 209 + u32 val; 210 + 211 + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 : 212 + hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0; 213 + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); 214 + iowrite32(val, reg); 215 + } 216 + 217 + void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx) 218 + { 219 + void __iomem *reg; 220 + u32 val; 221 + 222 + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 : 223 + hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0; 224 + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); 225 + iowrite32(val << EQ_STA_BIT_OFFSET, reg); 226 + } 227 + 228 + /** 229 + * t7xx_cldma_hw_init() - Initialize CLDMA HW. 230 + * @hw_info: Pointer to struct t7xx_cldma_hw. 231 + * 232 + * Write uplink and downlink configuration to CLDMA HW. 233 + */ 234 + void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info) 235 + { 236 + u32 ul_cfg, dl_cfg; 237 + 238 + ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); 239 + dl_cfg = ioread32(hw_info->ap_ao_base + REG_CLDMA_DL_CFG); 240 + /* Configure the DRAM address mode */ 241 + ul_cfg &= ~UL_CFG_BIT_MODE_MASK; 242 + dl_cfg &= ~DL_CFG_BIT_MODE_MASK; 243 + 244 + if (hw_info->hw_mode == MODE_BIT_64) { 245 + ul_cfg |= UL_CFG_BIT_MODE_64; 246 + dl_cfg |= DL_CFG_BIT_MODE_64; 247 + } else if (hw_info->hw_mode == MODE_BIT_40) { 248 + ul_cfg |= UL_CFG_BIT_MODE_40; 249 + dl_cfg |= DL_CFG_BIT_MODE_40; 250 + } else if (hw_info->hw_mode == MODE_BIT_36) { 251 + ul_cfg |= UL_CFG_BIT_MODE_36; 252 + dl_cfg |= DL_CFG_BIT_MODE_36; 253 + } 254 + 255 + iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); 256 + dl_cfg |= DL_CFG_UP_HW_LAST; 257 + iowrite32(dl_cfg, hw_info->ap_ao_base + REG_CLDMA_DL_CFG); 258 + iowrite32(0, hw_info->ap_ao_base + REG_CLDMA_INT_MASK); 259 + iowrite32(BUSY_MASK_MD, hw_info->ap_ao_base + REG_CLDMA_BUSY_MASK); 260 + iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM); 261 + iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM); 262 + } 263 + 264 + void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx) 265 + { 266 + void __iomem *reg; 267 + 268 + reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_STOP_CMD : 269 + hw_info->ap_pdn_base + REG_CLDMA_UL_STOP_CMD; 270 + iowrite32(CLDMA_ALL_Q, reg); 271 + } 272 + 273 + void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx) 274 + { 275 + void __iomem *reg; 276 + 277 + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 : 278 + hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0; 279 + iowrite32(TXRX_STATUS_BITMASK, reg); 280 + iowrite32(EMPTY_STATUS_BITMASK, reg); 281 + }
+180
drivers/net/wwan/t7xx/t7xx_cldma.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only 2 + * 3 + * Copyright (c) 2021, MediaTek Inc. 4 + * Copyright (c) 2021-2022, Intel Corporation. 5 + * 6 + * Authors: 7 + * Haijun Liu <haijun.liu@mediatek.com> 8 + * Moises Veleta <moises.veleta@intel.com> 9 + * Ricardo Martinez <ricardo.martinez@linux.intel.com> 10 + * 11 + * Contributors: 12 + * Amir Hanania <amir.hanania@intel.com> 13 + * Andy Shevchenko <andriy.shevchenko@linux.intel.com> 14 + * Sreehari Kancharla <sreehari.kancharla@intel.com> 15 + */ 16 + 17 + #ifndef __T7XX_CLDMA_H__ 18 + #define __T7XX_CLDMA_H__ 19 + 20 + #include <linux/bits.h> 21 + #include <linux/types.h> 22 + 23 + #define CLDMA_TXQ_NUM 8 24 + #define CLDMA_RXQ_NUM 8 25 + #define CLDMA_ALL_Q GENMASK(7, 0) 26 + 27 + /* Interrupt status bits */ 28 + #define EMPTY_STATUS_BITMASK GENMASK(15, 8) 29 + #define TXRX_STATUS_BITMASK GENMASK(7, 0) 30 + #define EQ_STA_BIT_OFFSET 8 31 + #define L2_INT_BIT_COUNT 16 32 + #define EQ_STA_BIT(index) (BIT((index) + EQ_STA_BIT_OFFSET) & EMPTY_STATUS_BITMASK) 33 + 34 + #define TQ_ERR_INT_BITMASK GENMASK(23, 16) 35 + #define TQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24) 36 + 37 + #define RQ_ERR_INT_BITMASK GENMASK(23, 16) 38 + #define RQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24) 39 + 40 + #define CLDMA0_AO_BASE 0x10049000 41 + #define CLDMA0_PD_BASE 0x1021d000 42 + #define CLDMA1_AO_BASE 0x1004b000 43 + #define CLDMA1_PD_BASE 0x1021f000 44 + 45 + #define CLDMA_R_AO_BASE 0x10023000 46 + #define CLDMA_R_PD_BASE 0x1023d000 47 + 48 + /* CLDMA TX */ 49 + #define REG_CLDMA_UL_START_ADDRL_0 0x0004 50 + #define REG_CLDMA_UL_START_ADDRH_0 0x0008 51 + #define REG_CLDMA_UL_CURRENT_ADDRL_0 0x0044 52 + #define REG_CLDMA_UL_CURRENT_ADDRH_0 0x0048 53 + #define REG_CLDMA_UL_STATUS 0x0084 54 + #define REG_CLDMA_UL_START_CMD 0x0088 55 + #define REG_CLDMA_UL_RESUME_CMD 0x008c 56 + #define REG_CLDMA_UL_STOP_CMD 0x0090 57 + #define REG_CLDMA_UL_ERROR 0x0094 58 + #define REG_CLDMA_UL_CFG 0x0098 59 + #define UL_CFG_BIT_MODE_36 BIT(5) 60 + #define UL_CFG_BIT_MODE_40 BIT(6) 61 + #define UL_CFG_BIT_MODE_64 BIT(7) 62 + #define UL_CFG_BIT_MODE_MASK GENMASK(7, 5) 63 + 64 + #define REG_CLDMA_UL_MEM 0x009c 65 + #define UL_MEM_CHECK_DIS BIT(0) 66 + 67 + /* CLDMA RX */ 68 + #define REG_CLDMA_DL_START_CMD 0x05bc 69 + #define REG_CLDMA_DL_RESUME_CMD 0x05c0 70 + #define REG_CLDMA_DL_STOP_CMD 0x05c4 71 + #define REG_CLDMA_DL_MEM 0x0508 72 + #define DL_MEM_CHECK_DIS BIT(0) 73 + 74 + #define REG_CLDMA_DL_CFG 0x0404 75 + #define DL_CFG_UP_HW_LAST BIT(2) 76 + #define DL_CFG_BIT_MODE_36 BIT(10) 77 + #define DL_CFG_BIT_MODE_40 BIT(11) 78 + #define DL_CFG_BIT_MODE_64 BIT(12) 79 + #define DL_CFG_BIT_MODE_MASK GENMASK(12, 10) 80 + 81 + #define REG_CLDMA_DL_START_ADDRL_0 0x0478 82 + #define REG_CLDMA_DL_START_ADDRH_0 0x047c 83 + #define REG_CLDMA_DL_CURRENT_ADDRL_0 0x04b8 84 + #define REG_CLDMA_DL_CURRENT_ADDRH_0 0x04bc 85 + #define REG_CLDMA_DL_STATUS 0x04f8 86 + 87 + /* CLDMA MISC */ 88 + #define REG_CLDMA_L2TISAR0 0x0810 89 + #define REG_CLDMA_L2TISAR1 0x0814 90 + #define REG_CLDMA_L2TIMR0 0x0818 91 + #define REG_CLDMA_L2TIMR1 0x081c 92 + #define REG_CLDMA_L2TIMCR0 0x0820 93 + #define REG_CLDMA_L2TIMCR1 0x0824 94 + #define REG_CLDMA_L2TIMSR0 0x0828 95 + #define REG_CLDMA_L2TIMSR1 0x082c 96 + #define REG_CLDMA_L3TISAR0 0x0830 97 + #define REG_CLDMA_L3TISAR1 0x0834 98 + #define REG_CLDMA_L2RISAR0 0x0850 99 + #define REG_CLDMA_L2RISAR1 0x0854 100 + #define REG_CLDMA_L3RISAR0 0x0870 101 + #define REG_CLDMA_L3RISAR1 0x0874 102 + #define REG_CLDMA_IP_BUSY 0x08b4 103 + #define IP_BUSY_WAKEUP BIT(0) 104 + #define CLDMA_L2TISAR0_ALL_INT_MASK GENMASK(15, 0) 105 + #define CLDMA_L2RISAR0_ALL_INT_MASK GENMASK(15, 0) 106 + 107 + /* CLDMA MISC */ 108 + #define REG_CLDMA_L2RIMR0 0x0858 109 + #define REG_CLDMA_L2RIMR1 0x085c 110 + #define REG_CLDMA_L2RIMCR0 0x0860 111 + #define REG_CLDMA_L2RIMCR1 0x0864 112 + #define REG_CLDMA_L2RIMSR0 0x0868 113 + #define REG_CLDMA_L2RIMSR1 0x086c 114 + #define REG_CLDMA_BUSY_MASK 0x0954 115 + #define BUSY_MASK_PCIE BIT(0) 116 + #define BUSY_MASK_AP BIT(1) 117 + #define BUSY_MASK_MD BIT(2) 118 + 119 + #define REG_CLDMA_INT_MASK 0x0960 120 + 121 + /* CLDMA RESET */ 122 + #define REG_INFRA_RST4_SET 0x0730 123 + #define RST4_CLDMA1_SW_RST_SET BIT(20) 124 + 125 + #define REG_INFRA_RST4_CLR 0x0734 126 + #define RST4_CLDMA1_SW_RST_CLR BIT(20) 127 + 128 + #define REG_INFRA_RST2_SET 0x0140 129 + #define RST2_PMIC_SW_RST_SET BIT(18) 130 + 131 + #define REG_INFRA_RST2_CLR 0x0144 132 + #define RST2_PMIC_SW_RST_CLR BIT(18) 133 + 134 + enum mtk_txrx { 135 + MTK_TX, 136 + MTK_RX, 137 + }; 138 + 139 + enum t7xx_hw_mode { 140 + MODE_BIT_32, 141 + MODE_BIT_36, 142 + MODE_BIT_40, 143 + MODE_BIT_64, 144 + }; 145 + 146 + struct t7xx_cldma_hw { 147 + enum t7xx_hw_mode hw_mode; 148 + void __iomem *ap_ao_base; 149 + void __iomem *ap_pdn_base; 150 + u32 phy_interrupt_id; 151 + }; 152 + 153 + void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, 154 + enum mtk_txrx tx_rx); 155 + void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, 156 + enum mtk_txrx tx_rx); 157 + void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, 158 + enum mtk_txrx tx_rx); 159 + void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx); 160 + unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno, 161 + enum mtk_txrx tx_rx); 162 + void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info); 163 + void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, 164 + enum mtk_txrx tx_rx); 165 + void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info); 166 + void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, 167 + enum mtk_txrx tx_rx); 168 + void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask); 169 + void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask); 170 + void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx); 171 + void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, 172 + unsigned int qno, u64 address, enum mtk_txrx tx_rx); 173 + void t7xx_cldma_hw_reset(void __iomem *ao_base); 174 + void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx); 175 + unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask, 176 + enum mtk_txrx tx_rx); 177 + void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info); 178 + void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info); 179 + bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno); 180 + #endif
+1192
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2021, MediaTek Inc. 4 + * Copyright (c) 2021-2022, Intel Corporation. 5 + * 6 + * Authors: 7 + * Amir Hanania <amir.hanania@intel.com> 8 + * Haijun Liu <haijun.liu@mediatek.com> 9 + * Moises Veleta <moises.veleta@intel.com> 10 + * Ricardo Martinez <ricardo.martinez@linux.intel.com> 11 + * Sreehari Kancharla <sreehari.kancharla@intel.com> 12 + * 13 + * Contributors: 14 + * Andy Shevchenko <andriy.shevchenko@linux.intel.com> 15 + * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com> 16 + * Eliot Lee <eliot.lee@intel.com> 17 + */ 18 + 19 + #include <linux/bits.h> 20 + #include <linux/bitops.h> 21 + #include <linux/delay.h> 22 + #include <linux/device.h> 23 + #include <linux/dmapool.h> 24 + #include <linux/dma-mapping.h> 25 + #include <linux/dma-direction.h> 26 + #include <linux/gfp.h> 27 + #include <linux/io.h> 28 + #include <linux/io-64-nonatomic-lo-hi.h> 29 + #include <linux/iopoll.h> 30 + #include <linux/irqreturn.h> 31 + #include <linux/kernel.h> 32 + #include <linux/kthread.h> 33 + #include <linux/list.h> 34 + #include <linux/netdevice.h> 35 + #include <linux/pci.h> 36 + #include <linux/sched.h> 37 + #include <linux/skbuff.h> 38 + #include <linux/slab.h> 39 + #include <linux/spinlock.h> 40 + #include <linux/types.h> 41 + #include <linux/wait.h> 42 + #include <linux/workqueue.h> 43 + 44 + #include "t7xx_cldma.h" 45 + #include "t7xx_hif_cldma.h" 46 + #include "t7xx_mhccif.h" 47 + #include "t7xx_pci.h" 48 + #include "t7xx_pcie_mac.h" 49 + #include "t7xx_reg.h" 50 + #include "t7xx_state_monitor.h" 51 + 52 + #define MAX_TX_BUDGET 16 53 + #define MAX_RX_BUDGET 16 54 + 55 + #define CHECK_Q_STOP_TIMEOUT_US 1000000 56 + #define CHECK_Q_STOP_STEP_US 10000 57 + 58 + #define CLDMA_JUMBO_BUFF_SZ 64528 /* 63kB + CCCI header */ 59 + 60 + static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, 61 + enum mtk_txrx tx_rx, unsigned int index) 62 + { 63 + queue->dir = tx_rx; 64 + queue->index = index; 65 + queue->md_ctrl = md_ctrl; 66 + queue->tr_ring = NULL; 67 + queue->tr_done = NULL; 68 + queue->tx_next = NULL; 69 + } 70 + 71 + static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, 72 + enum mtk_txrx tx_rx, unsigned int index) 73 + { 74 + md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index); 75 + init_waitqueue_head(&queue->req_wq); 76 + spin_lock_init(&queue->ring_lock); 77 + } 78 + 79 + static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr) 80 + { 81 + gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr)); 82 + gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr)); 83 + } 84 + 85 + static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr) 86 + { 87 + gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr)); 88 + gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr)); 89 + } 90 + 91 + static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req, 92 + size_t size) 93 + { 94 + req->skb = __dev_alloc_skb(size, GFP_KERNEL); 95 + if (!req->skb) 96 + return -ENOMEM; 97 + 98 + req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, 99 + skb_data_area_size(req->skb), DMA_FROM_DEVICE); 100 + if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) { 101 + dev_kfree_skb_any(req->skb); 102 + req->skb = NULL; 103 + req->mapped_buff = 0; 104 + dev_err(md_ctrl->dev, "DMA mapping failed\n"); 105 + return -ENOMEM; 106 + } 107 + 108 + return 0; 109 + } 110 + 111 + static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget) 112 + { 113 + struct cldma_ctrl *md_ctrl = queue->md_ctrl; 114 + unsigned int hwo_polling_count = 0; 115 + struct t7xx_cldma_hw *hw_info; 116 + bool rx_not_done = true; 117 + unsigned long flags; 118 + int count = 0; 119 + 120 + hw_info = &md_ctrl->hw_info; 121 + 122 + do { 123 + struct cldma_request *req; 124 + struct cldma_gpd *gpd; 125 + struct sk_buff *skb; 126 + int ret; 127 + 128 + req = queue->tr_done; 129 + if (!req) 130 + return -ENODATA; 131 + 132 + gpd = req->gpd; 133 + if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) { 134 + dma_addr_t gpd_addr; 135 + 136 + if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) { 137 + dev_err(md_ctrl->dev, "PCIe Link disconnected\n"); 138 + return -ENODEV; 139 + } 140 + 141 + gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 + 142 + queue->index * sizeof(u64)); 143 + if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100) 144 + return 0; 145 + 146 + udelay(1); 147 + continue; 148 + } 149 + 150 + hwo_polling_count = 0; 151 + skb = req->skb; 152 + 153 + if (req->mapped_buff) { 154 + dma_unmap_single(md_ctrl->dev, req->mapped_buff, 155 + skb_data_area_size(skb), DMA_FROM_DEVICE); 156 + req->mapped_buff = 0; 157 + } 158 + 159 + skb->len = 0; 160 + skb_reset_tail_pointer(skb); 161 + skb_put(skb, le16_to_cpu(gpd->data_buff_len)); 162 + 163 + ret = md_ctrl->recv_skb(queue, skb); 164 + /* Break processing, will try again later */ 165 + if (ret < 0) 166 + return ret; 167 + 168 + req->skb = NULL; 169 + t7xx_cldma_gpd_set_data_ptr(gpd, 0); 170 + 171 + spin_lock_irqsave(&queue->ring_lock, flags); 172 + queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); 173 + spin_unlock_irqrestore(&queue->ring_lock, flags); 174 + req = queue->rx_refill; 175 + 176 + ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size); 177 + if (ret) 178 + return ret; 179 + 180 + gpd = req->gpd; 181 + t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff); 182 + gpd->data_buff_len = 0; 183 + gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; 184 + 185 + spin_lock_irqsave(&queue->ring_lock, flags); 186 + queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); 187 + spin_unlock_irqrestore(&queue->ring_lock, flags); 188 + 189 + rx_not_done = ++count < budget || !need_resched(); 190 + } while (rx_not_done); 191 + 192 + *over_budget = true; 193 + return 0; 194 + } 195 + 196 + static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget) 197 + { 198 + struct cldma_ctrl *md_ctrl = queue->md_ctrl; 199 + struct t7xx_cldma_hw *hw_info; 200 + unsigned int pending_rx_int; 201 + bool over_budget = false; 202 + unsigned long flags; 203 + int ret; 204 + 205 + hw_info = &md_ctrl->hw_info; 206 + 207 + do { 208 + ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget); 209 + if (ret == -ENODATA) 210 + return 0; 211 + else if (ret) 212 + return ret; 213 + 214 + pending_rx_int = 0; 215 + 216 + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 217 + if (md_ctrl->rxq_active & BIT(queue->index)) { 218 + if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX)) 219 + t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX); 220 + 221 + pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index), 222 + MTK_RX); 223 + if (pending_rx_int) { 224 + t7xx_cldma_hw_rx_done(hw_info, pending_rx_int); 225 + 226 + if (over_budget) { 227 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 228 + return -EAGAIN; 229 + } 230 + } 231 + } 232 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 233 + } while (pending_rx_int); 234 + 235 + return 0; 236 + } 237 + 238 + static void t7xx_cldma_rx_done(struct work_struct *work) 239 + { 240 + struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work); 241 + struct cldma_ctrl *md_ctrl = queue->md_ctrl; 242 + int value; 243 + 244 + value = t7xx_cldma_gpd_rx_collect(queue, queue->budget); 245 + if (value && md_ctrl->rxq_active & BIT(queue->index)) { 246 + queue_work(queue->worker, &queue->cldma_work); 247 + return; 248 + } 249 + 250 + t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info); 251 + t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX); 252 + t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX); 253 + } 254 + 255 + static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue) 256 + { 257 + struct cldma_ctrl *md_ctrl = queue->md_ctrl; 258 + unsigned int dma_len, count = 0; 259 + struct cldma_request *req; 260 + struct cldma_gpd *gpd; 261 + unsigned long flags; 262 + dma_addr_t dma_free; 263 + struct sk_buff *skb; 264 + 265 + while (!kthread_should_stop()) { 266 + spin_lock_irqsave(&queue->ring_lock, flags); 267 + req = queue->tr_done; 268 + if (!req) { 269 + spin_unlock_irqrestore(&queue->ring_lock, flags); 270 + break; 271 + } 272 + gpd = req->gpd; 273 + if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) { 274 + spin_unlock_irqrestore(&queue->ring_lock, flags); 275 + break; 276 + } 277 + queue->budget++; 278 + dma_free = req->mapped_buff; 279 + dma_len = le16_to_cpu(gpd->data_buff_len); 280 + skb = req->skb; 281 + req->skb = NULL; 282 + queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); 283 + spin_unlock_irqrestore(&queue->ring_lock, flags); 284 + 285 + count++; 286 + dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE); 287 + dev_kfree_skb_any(skb); 288 + } 289 + 290 + if (count) 291 + wake_up_nr(&queue->req_wq, count); 292 + 293 + return count; 294 + } 295 + 296 + static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue) 297 + { 298 + struct cldma_ctrl *md_ctrl = queue->md_ctrl; 299 + struct cldma_request *req; 300 + dma_addr_t ul_curr_addr; 301 + unsigned long flags; 302 + bool pending_gpd; 303 + 304 + if (!(md_ctrl->txq_active & BIT(queue->index))) 305 + return; 306 + 307 + spin_lock_irqsave(&queue->ring_lock, flags); 308 + req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry); 309 + spin_unlock_irqrestore(&queue->ring_lock, flags); 310 + 311 + pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb; 312 + 313 + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 314 + if (pending_gpd) { 315 + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 316 + 317 + /* Check current processing TGPD, 64-bit address is in a table by Q index */ 318 + ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 + 319 + queue->index * sizeof(u64)); 320 + if (req->gpd_addr != ul_curr_addr) { 321 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 322 + dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n", 323 + md_ctrl->hif_id, queue->index); 324 + return; 325 + } 326 + 327 + t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX); 328 + } 329 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 330 + } 331 + 332 + static void t7xx_cldma_tx_done(struct work_struct *work) 333 + { 334 + struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work); 335 + struct cldma_ctrl *md_ctrl = queue->md_ctrl; 336 + struct t7xx_cldma_hw *hw_info; 337 + unsigned int l2_tx_int; 338 + unsigned long flags; 339 + 340 + hw_info = &md_ctrl->hw_info; 341 + t7xx_cldma_gpd_tx_collect(queue); 342 + l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index), 343 + MTK_TX); 344 + if (l2_tx_int & EQ_STA_BIT(queue->index)) { 345 + t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index)); 346 + t7xx_cldma_txq_empty_hndl(queue); 347 + } 348 + 349 + if (l2_tx_int & BIT(queue->index)) { 350 + t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index)); 351 + queue_work(queue->worker, &queue->cldma_work); 352 + return; 353 + } 354 + 355 + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 356 + if (md_ctrl->txq_active & BIT(queue->index)) { 357 + t7xx_cldma_clear_ip_busy(hw_info); 358 + t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX); 359 + t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX); 360 + } 361 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 362 + } 363 + 364 + static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl, 365 + struct cldma_ring *ring, enum dma_data_direction tx_rx) 366 + { 367 + struct cldma_request *req_cur, *req_next; 368 + 369 + list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) { 370 + if (req_cur->mapped_buff && req_cur->skb) { 371 + dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff, 372 + skb_data_area_size(req_cur->skb), tx_rx); 373 + req_cur->mapped_buff = 0; 374 + } 375 + 376 + dev_kfree_skb_any(req_cur->skb); 377 + 378 + if (req_cur->gpd) 379 + dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr); 380 + 381 + list_del(&req_cur->entry); 382 + kfree(req_cur); 383 + } 384 + } 385 + 386 + static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size) 387 + { 388 + struct cldma_request *req; 389 + int val; 390 + 391 + req = kzalloc(sizeof(*req), GFP_KERNEL); 392 + if (!req) 393 + return NULL; 394 + 395 + req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr); 396 + if (!req->gpd) 397 + goto err_free_req; 398 + 399 + val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size); 400 + if (val) 401 + goto err_free_pool; 402 + 403 + return req; 404 + 405 + err_free_pool: 406 + dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr); 407 + 408 + err_free_req: 409 + kfree(req); 410 + 411 + return NULL; 412 + } 413 + 414 + static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) 415 + { 416 + struct cldma_request *req; 417 + struct cldma_gpd *gpd; 418 + int i; 419 + 420 + INIT_LIST_HEAD(&ring->gpd_ring); 421 + ring->length = MAX_RX_BUDGET; 422 + 423 + for (i = 0; i < ring->length; i++) { 424 + req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size); 425 + if (!req) { 426 + t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE); 427 + return -ENOMEM; 428 + } 429 + 430 + gpd = req->gpd; 431 + t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff); 432 + gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size); 433 + gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; 434 + INIT_LIST_HEAD(&req->entry); 435 + list_add_tail(&req->entry, &ring->gpd_ring); 436 + } 437 + 438 + /* Link previous GPD to next GPD, circular */ 439 + list_for_each_entry(req, &ring->gpd_ring, entry) { 440 + t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr); 441 + gpd = req->gpd; 442 + } 443 + 444 + return 0; 445 + } 446 + 447 + static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl) 448 + { 449 + struct cldma_request *req; 450 + 451 + req = kzalloc(sizeof(*req), GFP_KERNEL); 452 + if (!req) 453 + return NULL; 454 + 455 + req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr); 456 + if (!req->gpd) { 457 + kfree(req); 458 + return NULL; 459 + } 460 + 461 + return req; 462 + } 463 + 464 + static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) 465 + { 466 + struct cldma_request *req; 467 + struct cldma_gpd *gpd; 468 + int i; 469 + 470 + INIT_LIST_HEAD(&ring->gpd_ring); 471 + ring->length = MAX_TX_BUDGET; 472 + 473 + for (i = 0; i < ring->length; i++) { 474 + req = t7xx_alloc_tx_request(md_ctrl); 475 + if (!req) { 476 + t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE); 477 + return -ENOMEM; 478 + } 479 + 480 + gpd = req->gpd; 481 + gpd->flags = GPD_FLAGS_IOC; 482 + INIT_LIST_HEAD(&req->entry); 483 + list_add_tail(&req->entry, &ring->gpd_ring); 484 + } 485 + 486 + /* Link previous GPD to next GPD, circular */ 487 + list_for_each_entry(req, &ring->gpd_ring, entry) { 488 + t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr); 489 + gpd = req->gpd; 490 + } 491 + 492 + return 0; 493 + } 494 + 495 + /** 496 + * t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values. 497 + * @queue: Pointer to the queue structure. 498 + * 499 + * Called with ring_lock (unless called during initialization phase) 500 + */ 501 + static void t7xx_cldma_q_reset(struct cldma_queue *queue) 502 + { 503 + struct cldma_request *req; 504 + 505 + req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry); 506 + queue->tr_done = req; 507 + queue->budget = queue->tr_ring->length; 508 + 509 + if (queue->dir == MTK_TX) 510 + queue->tx_next = req; 511 + else 512 + queue->rx_refill = req; 513 + } 514 + 515 + static void t7xx_cldma_rxq_init(struct cldma_queue *queue) 516 + { 517 + struct cldma_ctrl *md_ctrl = queue->md_ctrl; 518 + 519 + queue->dir = MTK_RX; 520 + queue->tr_ring = &md_ctrl->rx_ring[queue->index]; 521 + t7xx_cldma_q_reset(queue); 522 + } 523 + 524 + static void t7xx_cldma_txq_init(struct cldma_queue *queue) 525 + { 526 + struct cldma_ctrl *md_ctrl = queue->md_ctrl; 527 + 528 + queue->dir = MTK_TX; 529 + queue->tr_ring = &md_ctrl->tx_ring[queue->index]; 530 + t7xx_cldma_q_reset(queue); 531 + } 532 + 533 + static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl) 534 + { 535 + t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id); 536 + } 537 + 538 + static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl) 539 + { 540 + t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id); 541 + } 542 + 543 + static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl) 544 + { 545 + unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val; 546 + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 547 + int i; 548 + 549 + /* L2 raw interrupt status */ 550 + l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); 551 + l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); 552 + l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0); 553 + l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0); 554 + l2_tx_int &= ~l2_tx_int_msk; 555 + l2_rx_int &= ~l2_rx_int_msk; 556 + 557 + if (l2_tx_int) { 558 + if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) { 559 + /* Read and clear L3 TX interrupt status */ 560 + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0); 561 + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0); 562 + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1); 563 + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1); 564 + } 565 + 566 + t7xx_cldma_hw_tx_done(hw_info, l2_tx_int); 567 + if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { 568 + for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) { 569 + if (i < CLDMA_TXQ_NUM) { 570 + t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX); 571 + t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX); 572 + queue_work(md_ctrl->txq[i].worker, 573 + &md_ctrl->txq[i].cldma_work); 574 + } else { 575 + t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]); 576 + } 577 + } 578 + } 579 + } 580 + 581 + if (l2_rx_int) { 582 + if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) { 583 + /* Read and clear L3 RX interrupt status */ 584 + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0); 585 + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0); 586 + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1); 587 + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1); 588 + } 589 + 590 + t7xx_cldma_hw_rx_done(hw_info, l2_rx_int); 591 + if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { 592 + l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM; 593 + for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) { 594 + t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX); 595 + t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX); 596 + queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work); 597 + } 598 + } 599 + } 600 + } 601 + 602 + static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl) 603 + { 604 + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 605 + unsigned int tx_active; 606 + unsigned int rx_active; 607 + 608 + if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) 609 + return false; 610 + 611 + tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX); 612 + rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX); 613 + 614 + return tx_active || rx_active; 615 + } 616 + 617 + /** 618 + * t7xx_cldma_stop() - Stop CLDMA. 619 + * @md_ctrl: CLDMA context structure. 620 + * 621 + * Stop TX and RX queues. Disable L1 and L2 interrupts. 622 + * Clear status registers. 623 + * 624 + * Return: 625 + * * 0 - Success. 626 + * * -ERROR - Error code from polling cldma_queues_active. 627 + */ 628 + int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl) 629 + { 630 + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 631 + bool active; 632 + int i, ret; 633 + 634 + md_ctrl->rxq_active = 0; 635 + t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX); 636 + md_ctrl->txq_active = 0; 637 + t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX); 638 + md_ctrl->txq_started = 0; 639 + t7xx_cldma_disable_irq(md_ctrl); 640 + t7xx_cldma_hw_stop(hw_info, MTK_RX); 641 + t7xx_cldma_hw_stop(hw_info, MTK_TX); 642 + t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK); 643 + t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK); 644 + 645 + if (md_ctrl->is_late_init) { 646 + for (i = 0; i < CLDMA_TXQ_NUM; i++) 647 + flush_work(&md_ctrl->txq[i].cldma_work); 648 + 649 + for (i = 0; i < CLDMA_RXQ_NUM; i++) 650 + flush_work(&md_ctrl->rxq[i].cldma_work); 651 + } 652 + 653 + ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US, 654 + CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl); 655 + if (ret) 656 + dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id); 657 + 658 + return ret; 659 + } 660 + 661 + static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl) 662 + { 663 + int i; 664 + 665 + if (!md_ctrl->is_late_init) 666 + return; 667 + 668 + for (i = 0; i < CLDMA_TXQ_NUM; i++) 669 + t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE); 670 + 671 + for (i = 0; i < CLDMA_RXQ_NUM; i++) 672 + t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE); 673 + 674 + dma_pool_destroy(md_ctrl->gpd_dmapool); 675 + md_ctrl->gpd_dmapool = NULL; 676 + md_ctrl->is_late_init = false; 677 + } 678 + 679 + void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl) 680 + { 681 + unsigned long flags; 682 + int i; 683 + 684 + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 685 + md_ctrl->txq_active = 0; 686 + md_ctrl->rxq_active = 0; 687 + t7xx_cldma_disable_irq(md_ctrl); 688 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 689 + 690 + for (i = 0; i < CLDMA_TXQ_NUM; i++) { 691 + cancel_work_sync(&md_ctrl->txq[i].cldma_work); 692 + 693 + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 694 + md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); 695 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 696 + } 697 + 698 + for (i = 0; i < CLDMA_RXQ_NUM; i++) { 699 + cancel_work_sync(&md_ctrl->rxq[i].cldma_work); 700 + 701 + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 702 + md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); 703 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 704 + } 705 + 706 + t7xx_cldma_late_release(md_ctrl); 707 + } 708 + 709 + /** 710 + * t7xx_cldma_start() - Start CLDMA. 711 + * @md_ctrl: CLDMA context structure. 712 + * 713 + * Set TX/RX start address. 714 + * Start all RX queues and enable L2 interrupt. 715 + */ 716 + void t7xx_cldma_start(struct cldma_ctrl *md_ctrl) 717 + { 718 + unsigned long flags; 719 + 720 + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 721 + if (md_ctrl->is_late_init) { 722 + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 723 + int i; 724 + 725 + t7xx_cldma_enable_irq(md_ctrl); 726 + 727 + for (i = 0; i < CLDMA_TXQ_NUM; i++) { 728 + if (md_ctrl->txq[i].tr_done) 729 + t7xx_cldma_hw_set_start_addr(hw_info, i, 730 + md_ctrl->txq[i].tr_done->gpd_addr, 731 + MTK_TX); 732 + } 733 + 734 + for (i = 0; i < CLDMA_RXQ_NUM; i++) { 735 + if (md_ctrl->rxq[i].tr_done) 736 + t7xx_cldma_hw_set_start_addr(hw_info, i, 737 + md_ctrl->rxq[i].tr_done->gpd_addr, 738 + MTK_RX); 739 + } 740 + 741 + /* Enable L2 interrupt */ 742 + t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX); 743 + t7xx_cldma_hw_start(hw_info); 744 + md_ctrl->txq_started = 0; 745 + md_ctrl->txq_active |= TXRX_STATUS_BITMASK; 746 + md_ctrl->rxq_active |= TXRX_STATUS_BITMASK; 747 + } 748 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 749 + } 750 + 751 + static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum) 752 + { 753 + struct cldma_queue *txq = &md_ctrl->txq[qnum]; 754 + struct cldma_request *req; 755 + struct cldma_gpd *gpd; 756 + unsigned long flags; 757 + 758 + spin_lock_irqsave(&txq->ring_lock, flags); 759 + t7xx_cldma_q_reset(txq); 760 + list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) { 761 + gpd = req->gpd; 762 + gpd->flags &= ~GPD_FLAGS_HWO; 763 + t7xx_cldma_gpd_set_data_ptr(gpd, 0); 764 + gpd->data_buff_len = 0; 765 + dev_kfree_skb_any(req->skb); 766 + req->skb = NULL; 767 + } 768 + spin_unlock_irqrestore(&txq->ring_lock, flags); 769 + } 770 + 771 + static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum) 772 + { 773 + struct cldma_queue *rxq = &md_ctrl->rxq[qnum]; 774 + struct cldma_request *req; 775 + struct cldma_gpd *gpd; 776 + unsigned long flags; 777 + int ret = 0; 778 + 779 + spin_lock_irqsave(&rxq->ring_lock, flags); 780 + t7xx_cldma_q_reset(rxq); 781 + list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) { 782 + gpd = req->gpd; 783 + gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; 784 + gpd->data_buff_len = 0; 785 + 786 + if (req->skb) { 787 + req->skb->len = 0; 788 + skb_reset_tail_pointer(req->skb); 789 + } 790 + } 791 + 792 + list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) { 793 + if (req->skb) 794 + continue; 795 + 796 + ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size); 797 + if (ret) 798 + break; 799 + 800 + t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff); 801 + } 802 + spin_unlock_irqrestore(&rxq->ring_lock, flags); 803 + 804 + return ret; 805 + } 806 + 807 + void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx) 808 + { 809 + int i; 810 + 811 + if (tx_rx == MTK_TX) { 812 + for (i = 0; i < CLDMA_TXQ_NUM; i++) 813 + t7xx_cldma_clear_txq(md_ctrl, i); 814 + } else { 815 + for (i = 0; i < CLDMA_RXQ_NUM; i++) 816 + t7xx_cldma_clear_rxq(md_ctrl, i); 817 + } 818 + } 819 + 820 + void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx) 821 + { 822 + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 823 + unsigned long flags; 824 + 825 + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 826 + t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx); 827 + t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx); 828 + if (tx_rx == MTK_RX) 829 + md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK; 830 + else 831 + md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK; 832 + t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx); 833 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 834 + } 835 + 836 + static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req, 837 + struct sk_buff *skb) 838 + { 839 + struct cldma_ctrl *md_ctrl = queue->md_ctrl; 840 + struct cldma_gpd *gpd = tx_req->gpd; 841 + unsigned long flags; 842 + 843 + /* Update GPD */ 844 + tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE); 845 + 846 + if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) { 847 + dev_err(md_ctrl->dev, "DMA mapping failed\n"); 848 + return -ENOMEM; 849 + } 850 + 851 + t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff); 852 + gpd->data_buff_len = cpu_to_le16(skb->len); 853 + 854 + /* This lock must cover TGPD setting, as even without a resume operation, 855 + * CLDMA can send next HWO=1 if last TGPD just finished. 856 + */ 857 + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 858 + if (md_ctrl->txq_active & BIT(queue->index)) 859 + gpd->flags |= GPD_FLAGS_HWO; 860 + 861 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 862 + 863 + tx_req->skb = skb; 864 + return 0; 865 + } 866 + 867 + /* Called with cldma_lock */ 868 + static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno, 869 + struct cldma_request *prev_req) 870 + { 871 + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 872 + 873 + /* Check whether the device was powered off (CLDMA start address is not set) */ 874 + if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) { 875 + t7xx_cldma_hw_init(hw_info); 876 + t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX); 877 + md_ctrl->txq_started &= ~BIT(qno); 878 + } 879 + 880 + if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) { 881 + if (md_ctrl->txq_started & BIT(qno)) 882 + t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX); 883 + else 884 + t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX); 885 + 886 + md_ctrl->txq_started |= BIT(qno); 887 + } 888 + } 889 + 890 + /** 891 + * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets. 892 + * @md_ctrl: CLDMA context structure. 893 + * @recv_skb: Receiving skb callback. 894 + */ 895 + void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl, 896 + int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)) 897 + { 898 + md_ctrl->recv_skb = recv_skb; 899 + } 900 + 901 + /** 902 + * t7xx_cldma_send_skb() - Send control data to modem. 903 + * @md_ctrl: CLDMA context structure. 904 + * @qno: Queue number. 905 + * @skb: Socket buffer. 906 + * 907 + * Return: 908 + * * 0 - Success. 909 + * * -ENOMEM - Allocation failure. 910 + * * -EINVAL - Invalid queue request. 911 + * * -EIO - Queue is not active. 912 + * * -ETIMEDOUT - Timeout waiting for the device to wake up. 913 + */ 914 + int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb) 915 + { 916 + struct cldma_request *tx_req; 917 + struct cldma_queue *queue; 918 + unsigned long flags; 919 + int ret; 920 + 921 + if (qno >= CLDMA_TXQ_NUM) 922 + return -EINVAL; 923 + 924 + queue = &md_ctrl->txq[qno]; 925 + 926 + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 927 + if (!(md_ctrl->txq_active & BIT(qno))) { 928 + ret = -EIO; 929 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 930 + goto allow_sleep; 931 + } 932 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 933 + 934 + do { 935 + spin_lock_irqsave(&queue->ring_lock, flags); 936 + tx_req = queue->tx_next; 937 + if (queue->budget > 0 && !tx_req->skb) { 938 + struct list_head *gpd_ring = &queue->tr_ring->gpd_ring; 939 + 940 + queue->budget--; 941 + t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb); 942 + queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry); 943 + spin_unlock_irqrestore(&queue->ring_lock, flags); 944 + 945 + /* Protect the access to the modem for queues operations (resume/start) 946 + * which access shared locations by all the queues. 947 + * cldma_lock is independent of ring_lock which is per queue. 948 + */ 949 + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 950 + t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req); 951 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 952 + 953 + break; 954 + } 955 + spin_unlock_irqrestore(&queue->ring_lock, flags); 956 + 957 + if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) { 958 + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 959 + t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX); 960 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 961 + } 962 + 963 + ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0); 964 + } while (!ret); 965 + 966 + allow_sleep: 967 + return ret; 968 + } 969 + 970 + static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl) 971 + { 972 + char dma_pool_name[32]; 973 + int i, j, ret; 974 + 975 + if (md_ctrl->is_late_init) { 976 + dev_err(md_ctrl->dev, "CLDMA late init was already done\n"); 977 + return -EALREADY; 978 + } 979 + 980 + snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id); 981 + 982 + md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev, 983 + sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0); 984 + if (!md_ctrl->gpd_dmapool) { 985 + dev_err(md_ctrl->dev, "DMA pool alloc fail\n"); 986 + return -ENOMEM; 987 + } 988 + 989 + for (i = 0; i < CLDMA_TXQ_NUM; i++) { 990 + ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]); 991 + if (ret) { 992 + dev_err(md_ctrl->dev, "control TX ring init fail\n"); 993 + goto err_free_tx_ring; 994 + } 995 + } 996 + 997 + for (j = 0; j < CLDMA_RXQ_NUM; j++) { 998 + md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU; 999 + 1000 + if (j == CLDMA_RXQ_NUM - 1) 1001 + md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ; 1002 + 1003 + ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]); 1004 + if (ret) { 1005 + dev_err(md_ctrl->dev, "Control RX ring init fail\n"); 1006 + goto err_free_rx_ring; 1007 + } 1008 + } 1009 + 1010 + for (i = 0; i < CLDMA_TXQ_NUM; i++) 1011 + t7xx_cldma_txq_init(&md_ctrl->txq[i]); 1012 + 1013 + for (j = 0; j < CLDMA_RXQ_NUM; j++) 1014 + t7xx_cldma_rxq_init(&md_ctrl->rxq[j]); 1015 + 1016 + md_ctrl->is_late_init = true; 1017 + return 0; 1018 + 1019 + err_free_rx_ring: 1020 + while (j--) 1021 + t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE); 1022 + 1023 + err_free_tx_ring: 1024 + while (i--) 1025 + t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE); 1026 + 1027 + return ret; 1028 + } 1029 + 1030 + static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr) 1031 + { 1032 + return addr + phy_addr - addr_trs1; 1033 + } 1034 + 1035 + static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl) 1036 + { 1037 + struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr; 1038 + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 1039 + u32 phy_ao_base, phy_pd_base; 1040 + 1041 + if (md_ctrl->hif_id != CLDMA_ID_MD) 1042 + return; 1043 + 1044 + phy_ao_base = CLDMA1_AO_BASE; 1045 + phy_pd_base = CLDMA1_PD_BASE; 1046 + hw_info->phy_interrupt_id = CLDMA1_INT; 1047 + hw_info->hw_mode = MODE_BIT_64; 1048 + hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, 1049 + pbase->pcie_dev_reg_trsl_addr, phy_ao_base); 1050 + hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, 1051 + pbase->pcie_dev_reg_trsl_addr, phy_pd_base); 1052 + } 1053 + 1054 + static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb) 1055 + { 1056 + dev_kfree_skb_any(skb); 1057 + return 0; 1058 + } 1059 + 1060 + int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev) 1061 + { 1062 + struct device *dev = &t7xx_dev->pdev->dev; 1063 + struct cldma_ctrl *md_ctrl; 1064 + 1065 + md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL); 1066 + if (!md_ctrl) 1067 + return -ENOMEM; 1068 + 1069 + md_ctrl->t7xx_dev = t7xx_dev; 1070 + md_ctrl->dev = dev; 1071 + md_ctrl->hif_id = hif_id; 1072 + md_ctrl->recv_skb = t7xx_cldma_default_recv_skb; 1073 + t7xx_hw_info_init(md_ctrl); 1074 + t7xx_dev->md->md_ctrl[hif_id] = md_ctrl; 1075 + return 0; 1076 + } 1077 + 1078 + void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl) 1079 + { 1080 + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 1081 + unsigned long flags; 1082 + 1083 + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 1084 + t7xx_cldma_hw_stop(hw_info, MTK_TX); 1085 + t7xx_cldma_hw_stop(hw_info, MTK_RX); 1086 + t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK); 1087 + t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK); 1088 + t7xx_cldma_hw_init(hw_info); 1089 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 1090 + } 1091 + 1092 + static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data) 1093 + { 1094 + struct cldma_ctrl *md_ctrl = data; 1095 + u32 interrupt; 1096 + 1097 + interrupt = md_ctrl->hw_info.phy_interrupt_id; 1098 + t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt); 1099 + t7xx_cldma_irq_work_cb(md_ctrl); 1100 + t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt); 1101 + t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt); 1102 + return IRQ_HANDLED; 1103 + } 1104 + 1105 + static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl) 1106 + { 1107 + int i; 1108 + 1109 + for (i = 0; i < CLDMA_TXQ_NUM; i++) { 1110 + if (md_ctrl->txq[i].worker) { 1111 + destroy_workqueue(md_ctrl->txq[i].worker); 1112 + md_ctrl->txq[i].worker = NULL; 1113 + } 1114 + } 1115 + 1116 + for (i = 0; i < CLDMA_RXQ_NUM; i++) { 1117 + if (md_ctrl->rxq[i].worker) { 1118 + destroy_workqueue(md_ctrl->rxq[i].worker); 1119 + md_ctrl->rxq[i].worker = NULL; 1120 + } 1121 + } 1122 + } 1123 + 1124 + /** 1125 + * t7xx_cldma_init() - Initialize CLDMA. 1126 + * @md_ctrl: CLDMA context structure. 1127 + * 1128 + * Initialize HIF TX/RX queue structure. 1129 + * Register CLDMA callback ISR with PCIe driver. 1130 + * 1131 + * Return: 1132 + * * 0 - Success. 1133 + * * -ERROR - Error code from failure sub-initializations. 1134 + */ 1135 + int t7xx_cldma_init(struct cldma_ctrl *md_ctrl) 1136 + { 1137 + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 1138 + int i; 1139 + 1140 + md_ctrl->txq_active = 0; 1141 + md_ctrl->rxq_active = 0; 1142 + md_ctrl->is_late_init = false; 1143 + 1144 + spin_lock_init(&md_ctrl->cldma_lock); 1145 + 1146 + for (i = 0; i < CLDMA_TXQ_NUM; i++) { 1147 + md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); 1148 + md_ctrl->txq[i].worker = 1149 + alloc_workqueue("md_hif%d_tx%d_worker", 1150 + WQ_UNBOUND | WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI), 1151 + 1, md_ctrl->hif_id, i); 1152 + if (!md_ctrl->txq[i].worker) 1153 + goto err_workqueue; 1154 + 1155 + INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done); 1156 + } 1157 + 1158 + for (i = 0; i < CLDMA_RXQ_NUM; i++) { 1159 + md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); 1160 + INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done); 1161 + 1162 + md_ctrl->rxq[i].worker = alloc_workqueue("md_hif%d_rx%d_worker", 1163 + WQ_UNBOUND | WQ_MEM_RECLAIM, 1164 + 1, md_ctrl->hif_id, i); 1165 + if (!md_ctrl->rxq[i].worker) 1166 + goto err_workqueue; 1167 + } 1168 + 1169 + t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id); 1170 + md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler; 1171 + md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL; 1172 + md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl; 1173 + t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id); 1174 + return 0; 1175 + 1176 + err_workqueue: 1177 + t7xx_cldma_destroy_wqs(md_ctrl); 1178 + return -ENOMEM; 1179 + } 1180 + 1181 + void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl) 1182 + { 1183 + t7xx_cldma_late_release(md_ctrl); 1184 + t7xx_cldma_late_init(md_ctrl); 1185 + } 1186 + 1187 + void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl) 1188 + { 1189 + t7xx_cldma_stop(md_ctrl); 1190 + t7xx_cldma_late_release(md_ctrl); 1191 + t7xx_cldma_destroy_wqs(md_ctrl); 1192 + }
+126
drivers/net/wwan/t7xx/t7xx_hif_cldma.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only 2 + * 3 + * Copyright (c) 2021, MediaTek Inc. 4 + * Copyright (c) 2021-2022, Intel Corporation. 5 + * 6 + * Authors: 7 + * Haijun Liu <haijun.liu@mediatek.com> 8 + * Moises Veleta <moises.veleta@intel.com> 9 + * Ricardo Martinez <ricardo.martinez@linux.intel.com> 10 + * Sreehari Kancharla <sreehari.kancharla@intel.com> 11 + * 12 + * Contributors: 13 + * Amir Hanania <amir.hanania@intel.com> 14 + * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com> 15 + * Eliot Lee <eliot.lee@intel.com> 16 + */ 17 + 18 + #ifndef __T7XX_HIF_CLDMA_H__ 19 + #define __T7XX_HIF_CLDMA_H__ 20 + 21 + #include <linux/bits.h> 22 + #include <linux/device.h> 23 + #include <linux/dmapool.h> 24 + #include <linux/pci.h> 25 + #include <linux/skbuff.h> 26 + #include <linux/spinlock.h> 27 + #include <linux/wait.h> 28 + #include <linux/workqueue.h> 29 + #include <linux/types.h> 30 + 31 + #include "t7xx_cldma.h" 32 + #include "t7xx_pci.h" 33 + 34 + /** 35 + * enum cldma_id - Identifiers for CLDMA HW units. 36 + * @CLDMA_ID_MD: Modem control channel. 37 + * @CLDMA_ID_AP: Application Processor control channel (not used at the moment). 38 + * @CLDMA_NUM: Number of CLDMA HW units available. 39 + */ 40 + enum cldma_id { 41 + CLDMA_ID_MD, 42 + CLDMA_ID_AP, 43 + CLDMA_NUM 44 + }; 45 + 46 + struct cldma_gpd { 47 + u8 flags; 48 + u8 not_used1; 49 + __le16 rx_data_allow_len; 50 + __le32 next_gpd_ptr_h; 51 + __le32 next_gpd_ptr_l; 52 + __le32 data_buff_bd_ptr_h; 53 + __le32 data_buff_bd_ptr_l; 54 + __le16 data_buff_len; 55 + __le16 not_used2; 56 + }; 57 + 58 + struct cldma_request { 59 + struct cldma_gpd *gpd; /* Virtual address for CPU */ 60 + dma_addr_t gpd_addr; /* Physical address for DMA */ 61 + struct sk_buff *skb; 62 + dma_addr_t mapped_buff; 63 + struct list_head entry; 64 + }; 65 + 66 + struct cldma_ring { 67 + struct list_head gpd_ring; /* Ring of struct cldma_request */ 68 + unsigned int length; /* Number of struct cldma_request */ 69 + int pkt_size; 70 + }; 71 + 72 + struct cldma_queue { 73 + struct cldma_ctrl *md_ctrl; 74 + enum mtk_txrx dir; 75 + unsigned int index; 76 + struct cldma_ring *tr_ring; 77 + struct cldma_request *tr_done; 78 + struct cldma_request *rx_refill; 79 + struct cldma_request *tx_next; 80 + int budget; /* Same as ring buffer size by default */ 81 + spinlock_t ring_lock; 82 + wait_queue_head_t req_wq; /* Only for TX */ 83 + struct workqueue_struct *worker; 84 + struct work_struct cldma_work; 85 + }; 86 + 87 + struct cldma_ctrl { 88 + enum cldma_id hif_id; 89 + struct device *dev; 90 + struct t7xx_pci_dev *t7xx_dev; 91 + struct cldma_queue txq[CLDMA_TXQ_NUM]; 92 + struct cldma_queue rxq[CLDMA_RXQ_NUM]; 93 + unsigned short txq_active; 94 + unsigned short rxq_active; 95 + unsigned short txq_started; 96 + spinlock_t cldma_lock; /* Protects CLDMA structure */ 97 + /* Assumes T/R GPD/BD/SPD have the same size */ 98 + struct dma_pool *gpd_dmapool; 99 + struct cldma_ring tx_ring[CLDMA_TXQ_NUM]; 100 + struct cldma_ring rx_ring[CLDMA_RXQ_NUM]; 101 + struct t7xx_cldma_hw hw_info; 102 + bool is_late_init; 103 + int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb); 104 + }; 105 + 106 + #define GPD_FLAGS_HWO BIT(0) 107 + #define GPD_FLAGS_IOC BIT(7) 108 + #define GPD_DMAPOOL_ALIGN 16 109 + 110 + #define CLDMA_MTU 3584 /* 3.5kB */ 111 + 112 + int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev); 113 + void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl); 114 + int t7xx_cldma_init(struct cldma_ctrl *md_ctrl); 115 + void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl); 116 + void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl); 117 + void t7xx_cldma_start(struct cldma_ctrl *md_ctrl); 118 + int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl); 119 + void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl); 120 + void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl, 121 + int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)); 122 + int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb); 123 + void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx); 124 + void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx); 125 + 126 + #endif /* __T7XX_HIF_CLDMA_H__ */
+33
drivers/net/wwan/t7xx/t7xx_reg.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only 2 + * 3 + * Copyright (c) 2021, MediaTek Inc. 4 + * Copyright (c) 2021-2022, Intel Corporation. 5 + * 6 + * Authors: 7 + * Haijun Liu <haijun.liu@mediatek.com> 8 + * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com> 9 + * 10 + * Contributors: 11 + * Amir Hanania <amir.hanania@intel.com> 12 + * Andy Shevchenko <andriy.shevchenko@linux.intel.com> 13 + * Eliot Lee <eliot.lee@intel.com> 14 + * Moises Veleta <moises.veleta@intel.com> 15 + * Ricardo Martinez <ricardo.martinez@linux.intel.com> 16 + * Sreehari Kancharla <sreehari.kancharla@intel.com> 17 + */ 18 + 19 + #ifndef __T7XX_REG_H__ 20 + #define __T7XX_REG_H__ 21 + 22 + enum t7xx_int { 23 + DPMAIF_INT, 24 + CLDMA0_INT, 25 + CLDMA1_INT, 26 + CLDMA2_INT, 27 + MHCCIF_INT, 28 + DPMAIF2_INT, 29 + SAP_RGU_INT, 30 + CLDMA3_INT, 31 + }; 32 + 33 + #endif /* __T7XX_REG_H__ */