Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: qcom: Add GPI dma driver

This controller provides DMAengine capabilities for a variety of peripheral
buses such as I2C, UART, and SPI. By using GPI dmaengine driver, bus
drivers can use a standardize interface that is protocol independent to
transfer data between memory and peripheral.

Link: https://lore.kernel.org/r/20201109085450.24843-4-vkoul@kernel.org
Signed-off-by: Vinod Koul <vkoul@kernel.org>

+2399
+12
drivers/dma/qcom/Kconfig
··· 19 19 Enable support for the QCOM BAM DMA controller. This controller 20 20 provides DMA capabilities for a variety of on-chip devices. 21 21 22 + config QCOM_GPI_DMA 23 + tristate "Qualcomm Technologies GPI DMA support" 24 + depends on ARCH_QCOM 25 + select DMA_ENGINE 26 + select DMA_VIRTUAL_CHANNELS 27 + help 28 + Enable support for the QCOM GPI DMA controller. This controller 29 + provides DMA capabilities for a variety of peripheral buses such 30 + as I2C, UART, and SPI. By using GPI dmaengine driver, bus drivers 31 + can use a standardize interface that is protocol independent to 32 + transfer data between DDR and peripheral. 33 + 22 34 config QCOM_HIDMA_MGMT 23 35 tristate "Qualcomm Technologies HIDMA Management support" 24 36 select DMA_ENGINE
+1
drivers/dma/qcom/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 obj-$(CONFIG_QCOM_ADM) += qcom_adm.o 3 3 obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o 4 + obj-$(CONFIG_QCOM_GPI_DMA) += gpi.o 4 5 obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o 5 6 hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o 6 7 obj-$(CONFIG_QCOM_HIDMA) += hdma.o
+2303
drivers/dma/qcom/gpi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. 4 + * Copyright (c) 2020, Linaro Limited 5 + */ 6 + 7 + #include <dt-bindings/dma/qcom-gpi.h> 8 + #include <linux/bitfield.h> 9 + #include <linux/dma-mapping.h> 10 + #include <linux/dmaengine.h> 11 + #include <linux/module.h> 12 + #include <linux/of_dma.h> 13 + #include <linux/platform_device.h> 14 + #include <linux/dma/qcom-gpi-dma.h> 15 + #include <linux/scatterlist.h> 16 + #include <linux/slab.h> 17 + #include "../dmaengine.h" 18 + #include "../virt-dma.h" 19 + 20 + #define TRE_TYPE_DMA 0x10 21 + #define TRE_TYPE_GO 0x20 22 + #define TRE_TYPE_CONFIG0 0x22 23 + 24 + /* TRE flags */ 25 + #define TRE_FLAGS_CHAIN BIT(0) 26 + #define TRE_FLAGS_IEOB BIT(8) 27 + #define TRE_FLAGS_IEOT BIT(9) 28 + #define TRE_FLAGS_BEI BIT(10) 29 + #define TRE_FLAGS_LINK BIT(11) 30 + #define TRE_FLAGS_TYPE GENMASK(23, 16) 31 + 32 + /* SPI CONFIG0 WD0 */ 33 + #define TRE_SPI_C0_WORD_SZ GENMASK(4, 0) 34 + #define TRE_SPI_C0_LOOPBACK BIT(8) 35 + #define TRE_SPI_C0_CS BIT(11) 36 + #define TRE_SPI_C0_CPHA BIT(12) 37 + #define TRE_SPI_C0_CPOL BIT(13) 38 + #define TRE_SPI_C0_TX_PACK BIT(24) 39 + #define TRE_SPI_C0_RX_PACK BIT(25) 40 + 41 + /* CONFIG0 WD2 */ 42 + #define TRE_C0_CLK_DIV GENMASK(11, 0) 43 + #define TRE_C0_CLK_SRC GENMASK(19, 16) 44 + 45 + /* SPI GO WD0 */ 46 + #define TRE_SPI_GO_CMD GENMASK(4, 0) 47 + #define TRE_SPI_GO_CS GENMASK(10, 8) 48 + #define TRE_SPI_GO_FRAG BIT(26) 49 + 50 + /* GO WD2 */ 51 + #define TRE_RX_LEN GENMASK(23, 0) 52 + 53 + /* I2C Config0 WD0 */ 54 + #define TRE_I2C_C0_TLOW GENMASK(7, 0) 55 + #define TRE_I2C_C0_THIGH GENMASK(15, 8) 56 + #define TRE_I2C_C0_TCYL GENMASK(23, 16) 57 + #define TRE_I2C_C0_TX_PACK BIT(24) 58 + #define TRE_I2C_C0_RX_PACK BIT(25) 59 + 60 + /* I2C GO WD0 */ 61 + #define TRE_I2C_GO_CMD GENMASK(4, 0) 62 + #define TRE_I2C_GO_ADDR GENMASK(14, 8) 63 + #define TRE_I2C_GO_STRETCH BIT(26) 64 + 65 + /* DMA TRE */ 66 + #define TRE_DMA_LEN GENMASK(23, 0) 67 + 68 + /* Register offsets from gpi-top */ 69 + #define GPII_n_CH_k_CNTXT_0_OFFS(n, k) (0x20000 + (0x4000 * (n)) + (0x80 * (k))) 70 + #define GPII_n_CH_k_CNTXT_0_EL_SIZE GENMASK(31, 24) 71 + #define GPII_n_CH_k_CNTXT_0_CHSTATE GENMASK(23, 20) 72 + #define GPII_n_CH_k_CNTXT_0_ERIDX GENMASK(18, 14) 73 + #define GPII_n_CH_k_CNTXT_0_DIR BIT(3) 74 + #define GPII_n_CH_k_CNTXT_0_PROTO GENMASK(2, 0) 75 + 76 + #define GPII_n_CH_k_CNTXT_0(el_size, erindex, dir, chtype_proto) \ 77 + (FIELD_PREP(GPII_n_CH_k_CNTXT_0_EL_SIZE, el_size) | \ 78 + FIELD_PREP(GPII_n_CH_k_CNTXT_0_ERIDX, erindex) | \ 79 + FIELD_PREP(GPII_n_CH_k_CNTXT_0_DIR, dir) | \ 80 + FIELD_PREP(GPII_n_CH_k_CNTXT_0_PROTO, chtype_proto)) 81 + 82 + #define GPI_CHTYPE_DIR_IN (0) 83 + #define GPI_CHTYPE_DIR_OUT (1) 84 + 85 + #define GPI_CHTYPE_PROTO_GPI (0x2) 86 + 87 + #define GPII_n_CH_k_DOORBELL_0_OFFS(n, k) (0x22000 + (0x4000 * (n)) + (0x8 * (k))) 88 + #define GPII_n_CH_CMD_OFFS(n) (0x23008 + (0x4000 * (n))) 89 + #define GPII_n_CH_CMD_OPCODE GENMASK(31, 24) 90 + #define GPII_n_CH_CMD_CHID GENMASK(7, 0) 91 + #define GPII_n_CH_CMD(opcode, chid) \ 92 + (FIELD_PREP(GPII_n_CH_CMD_OPCODE, opcode) | \ 93 + FIELD_PREP(GPII_n_CH_CMD_CHID, chid)) 94 + 95 + #define GPII_n_CH_CMD_ALLOCATE (0) 96 + #define GPII_n_CH_CMD_START (1) 97 + #define GPII_n_CH_CMD_STOP (2) 98 + #define GPII_n_CH_CMD_RESET (9) 99 + #define GPII_n_CH_CMD_DE_ALLOC (10) 100 + #define GPII_n_CH_CMD_UART_SW_STALE (32) 101 + #define GPII_n_CH_CMD_UART_RFR_READY (33) 102 + #define GPII_n_CH_CMD_UART_RFR_NOT_READY (34) 103 + 104 + /* EV Context Array */ 105 + #define GPII_n_EV_CH_k_CNTXT_0_OFFS(n, k) (0x21000 + (0x4000 * (n)) + (0x80 * (k))) 106 + #define GPII_n_EV_k_CNTXT_0_EL_SIZE GENMASK(31, 24) 107 + #define GPII_n_EV_k_CNTXT_0_CHSTATE GENMASK(23, 20) 108 + #define GPII_n_EV_k_CNTXT_0_INTYPE BIT(16) 109 + #define GPII_n_EV_k_CNTXT_0_CHTYPE GENMASK(3, 0) 110 + 111 + #define GPII_n_EV_k_CNTXT_0(el_size, inttype, chtype) \ 112 + (FIELD_PREP(GPII_n_EV_k_CNTXT_0_EL_SIZE, el_size) | \ 113 + FIELD_PREP(GPII_n_EV_k_CNTXT_0_INTYPE, inttype) | \ 114 + FIELD_PREP(GPII_n_EV_k_CNTXT_0_CHTYPE, chtype)) 115 + 116 + #define GPI_INTTYPE_IRQ (1) 117 + #define GPI_CHTYPE_GPI_EV (0x2) 118 + 119 + enum CNTXT_OFFS { 120 + CNTXT_0_CONFIG = 0x0, 121 + CNTXT_1_R_LENGTH = 0x4, 122 + CNTXT_2_RING_BASE_LSB = 0x8, 123 + CNTXT_3_RING_BASE_MSB = 0xC, 124 + CNTXT_4_RING_RP_LSB = 0x10, 125 + CNTXT_5_RING_RP_MSB = 0x14, 126 + CNTXT_6_RING_WP_LSB = 0x18, 127 + CNTXT_7_RING_WP_MSB = 0x1C, 128 + CNTXT_8_RING_INT_MOD = 0x20, 129 + CNTXT_9_RING_INTVEC = 0x24, 130 + CNTXT_10_RING_MSI_LSB = 0x28, 131 + CNTXT_11_RING_MSI_MSB = 0x2C, 132 + CNTXT_12_RING_RP_UPDATE_LSB = 0x30, 133 + CNTXT_13_RING_RP_UPDATE_MSB = 0x34, 134 + }; 135 + 136 + #define GPII_n_EV_CH_k_DOORBELL_0_OFFS(n, k) (0x22100 + (0x4000 * (n)) + (0x8 * (k))) 137 + #define GPII_n_EV_CH_CMD_OFFS(n) (0x23010 + (0x4000 * (n))) 138 + #define GPII_n_EV_CMD_OPCODE GENMASK(31, 24) 139 + #define GPII_n_EV_CMD_CHID GENMASK(7, 0) 140 + #define GPII_n_EV_CMD(opcode, chid) \ 141 + (FIELD_PREP(GPII_n_EV_CMD_OPCODE, opcode) | \ 142 + FIELD_PREP(GPII_n_EV_CMD_CHID, chid)) 143 + 144 + #define GPII_n_EV_CH_CMD_ALLOCATE (0x00) 145 + #define GPII_n_EV_CH_CMD_RESET (0x09) 146 + #define GPII_n_EV_CH_CMD_DE_ALLOC (0x0A) 147 + 148 + #define GPII_n_CNTXT_TYPE_IRQ_OFFS(n) (0x23080 + (0x4000 * (n))) 149 + 150 + /* mask type register */ 151 + #define GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) (0x23088 + (0x4000 * (n))) 152 + #define GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK GENMASK(6, 0) 153 + #define GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL BIT(6) 154 + #define GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB BIT(3) 155 + #define GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB BIT(2) 156 + #define GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL BIT(1) 157 + #define GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL BIT(0) 158 + 159 + #define GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(n) (0x23090 + (0x4000 * (n))) 160 + #define GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) (0x23094 + (0x4000 * (n))) 161 + 162 + /* Mask channel control interrupt register */ 163 + #define GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(n) (0x23098 + (0x4000 * (n))) 164 + #define GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK GENMASK(1, 0) 165 + 166 + /* Mask event control interrupt register */ 167 + #define GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) (0x2309C + (0x4000 * (n))) 168 + #define GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK BIT(0) 169 + 170 + #define GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(n) (0x230A0 + (0x4000 * (n))) 171 + #define GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) (0x230A4 + (0x4000 * (n))) 172 + 173 + /* Mask event interrupt register */ 174 + #define GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) (0x230B8 + (0x4000 * (n))) 175 + #define GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK BIT(0) 176 + 177 + #define GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) (0x230C0 + (0x4000 * (n))) 178 + #define GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) (0x23100 + (0x4000 * (n))) 179 + #define GPI_GLOB_IRQ_ERROR_INT_MSK BIT(0) 180 + 181 + /* GPII specific Global - Enable bit register */ 182 + #define GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(n) (0x23108 + (0x4000 * (n))) 183 + #define GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) (0x23110 + (0x4000 * (n))) 184 + #define GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(n) (0x23118 + (0x4000 * (n))) 185 + 186 + /* GPII general interrupt - Enable bit register */ 187 + #define GPII_n_CNTXT_GPII_IRQ_EN_OFFS(n) (0x23120 + (0x4000 * (n))) 188 + #define GPII_n_CNTXT_GPII_IRQ_EN_BMSK GENMASK(3, 0) 189 + 190 + #define GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(n) (0x23128 + (0x4000 * (n))) 191 + 192 + /* GPII Interrupt Type register */ 193 + #define GPII_n_CNTXT_INTSET_OFFS(n) (0x23180 + (0x4000 * (n))) 194 + #define GPII_n_CNTXT_INTSET_BMSK BIT(0) 195 + 196 + #define GPII_n_CNTXT_MSI_BASE_LSB_OFFS(n) (0x23188 + (0x4000 * (n))) 197 + #define GPII_n_CNTXT_MSI_BASE_MSB_OFFS(n) (0x2318C + (0x4000 * (n))) 198 + #define GPII_n_CNTXT_SCRATCH_0_OFFS(n) (0x23400 + (0x4000 * (n))) 199 + #define GPII_n_CNTXT_SCRATCH_1_OFFS(n) (0x23404 + (0x4000 * (n))) 200 + 201 + #define GPII_n_ERROR_LOG_OFFS(n) (0x23200 + (0x4000 * (n))) 202 + 203 + /* QOS Registers */ 204 + #define GPII_n_CH_k_QOS_OFFS(n, k) (0x2005C + (0x4000 * (n)) + (0x80 * (k))) 205 + 206 + /* Scratch registers */ 207 + #define GPII_n_CH_k_SCRATCH_0_OFFS(n, k) (0x20060 + (0x4000 * (n)) + (0x80 * (k))) 208 + #define GPII_n_CH_k_SCRATCH_0_SEID GENMASK(2, 0) 209 + #define GPII_n_CH_k_SCRATCH_0_PROTO GENMASK(7, 4) 210 + #define GPII_n_CH_k_SCRATCH_0_PAIR GENMASK(20, 16) 211 + #define GPII_n_CH_k_SCRATCH_0(pair, proto, seid) \ 212 + (FIELD_PREP(GPII_n_CH_k_SCRATCH_0_PAIR, pair) | \ 213 + FIELD_PREP(GPII_n_CH_k_SCRATCH_0_PROTO, proto) | \ 214 + FIELD_PREP(GPII_n_CH_k_SCRATCH_0_SEID, seid)) 215 + #define GPII_n_CH_k_SCRATCH_1_OFFS(n, k) (0x20064 + (0x4000 * (n)) + (0x80 * (k))) 216 + #define GPII_n_CH_k_SCRATCH_2_OFFS(n, k) (0x20068 + (0x4000 * (n)) + (0x80 * (k))) 217 + #define GPII_n_CH_k_SCRATCH_3_OFFS(n, k) (0x2006C + (0x4000 * (n)) + (0x80 * (k))) 218 + 219 + struct __packed gpi_tre { 220 + u32 dword[4]; 221 + }; 222 + 223 + enum msm_gpi_tce_code { 224 + MSM_GPI_TCE_SUCCESS = 1, 225 + MSM_GPI_TCE_EOT = 2, 226 + MSM_GPI_TCE_EOB = 4, 227 + MSM_GPI_TCE_UNEXP_ERR = 16, 228 + }; 229 + 230 + #define CMD_TIMEOUT_MS (250) 231 + 232 + #define MAX_CHANNELS_PER_GPII (2) 233 + #define GPI_TX_CHAN (0) 234 + #define GPI_RX_CHAN (1) 235 + #define STATE_IGNORE (U32_MAX) 236 + #define EV_FACTOR (2) 237 + #define REQ_OF_DMA_ARGS (5) /* # of arguments required from client */ 238 + #define CHAN_TRES 64 239 + 240 + struct __packed xfer_compl_event { 241 + u64 ptr; 242 + u32 length:24; 243 + u8 code; 244 + u16 status; 245 + u8 type; 246 + u8 chid; 247 + }; 248 + 249 + struct __packed immediate_data_event { 250 + u8 data_bytes[8]; 251 + u8 length:4; 252 + u8 resvd:4; 253 + u16 tre_index; 254 + u8 code; 255 + u16 status; 256 + u8 type; 257 + u8 chid; 258 + }; 259 + 260 + struct __packed qup_notif_event { 261 + u32 status; 262 + u32 time; 263 + u32 count:24; 264 + u8 resvd; 265 + u16 resvd1; 266 + u8 type; 267 + u8 chid; 268 + }; 269 + 270 + struct __packed gpi_ere { 271 + u32 dword[4]; 272 + }; 273 + 274 + enum GPI_EV_TYPE { 275 + XFER_COMPLETE_EV_TYPE = 0x22, 276 + IMMEDIATE_DATA_EV_TYPE = 0x30, 277 + QUP_NOTIF_EV_TYPE = 0x31, 278 + STALE_EV_TYPE = 0xFF, 279 + }; 280 + 281 + union __packed gpi_event { 282 + struct __packed xfer_compl_event xfer_compl_event; 283 + struct __packed immediate_data_event immediate_data_event; 284 + struct __packed qup_notif_event qup_notif_event; 285 + struct __packed gpi_ere gpi_ere; 286 + }; 287 + 288 + enum gpii_irq_settings { 289 + DEFAULT_IRQ_SETTINGS, 290 + MASK_IEOB_SETTINGS, 291 + }; 292 + 293 + enum gpi_ev_state { 294 + DEFAULT_EV_CH_STATE = 0, 295 + EV_STATE_NOT_ALLOCATED = DEFAULT_EV_CH_STATE, 296 + EV_STATE_ALLOCATED, 297 + MAX_EV_STATES 298 + }; 299 + 300 + static const char *const gpi_ev_state_str[MAX_EV_STATES] = { 301 + [EV_STATE_NOT_ALLOCATED] = "NOT ALLOCATED", 302 + [EV_STATE_ALLOCATED] = "ALLOCATED", 303 + }; 304 + 305 + #define TO_GPI_EV_STATE_STR(_state) (((_state) >= MAX_EV_STATES) ? \ 306 + "INVALID" : gpi_ev_state_str[(_state)]) 307 + 308 + enum gpi_ch_state { 309 + DEFAULT_CH_STATE = 0x0, 310 + CH_STATE_NOT_ALLOCATED = DEFAULT_CH_STATE, 311 + CH_STATE_ALLOCATED = 0x1, 312 + CH_STATE_STARTED = 0x2, 313 + CH_STATE_STOPPED = 0x3, 314 + CH_STATE_STOP_IN_PROC = 0x4, 315 + CH_STATE_ERROR = 0xf, 316 + MAX_CH_STATES 317 + }; 318 + 319 + enum gpi_cmd { 320 + GPI_CH_CMD_BEGIN, 321 + GPI_CH_CMD_ALLOCATE = GPI_CH_CMD_BEGIN, 322 + GPI_CH_CMD_START, 323 + GPI_CH_CMD_STOP, 324 + GPI_CH_CMD_RESET, 325 + GPI_CH_CMD_DE_ALLOC, 326 + GPI_CH_CMD_UART_SW_STALE, 327 + GPI_CH_CMD_UART_RFR_READY, 328 + GPI_CH_CMD_UART_RFR_NOT_READY, 329 + GPI_CH_CMD_END = GPI_CH_CMD_UART_RFR_NOT_READY, 330 + GPI_EV_CMD_BEGIN, 331 + GPI_EV_CMD_ALLOCATE = GPI_EV_CMD_BEGIN, 332 + GPI_EV_CMD_RESET, 333 + GPI_EV_CMD_DEALLOC, 334 + GPI_EV_CMD_END = GPI_EV_CMD_DEALLOC, 335 + GPI_MAX_CMD, 336 + }; 337 + 338 + #define IS_CHAN_CMD(_cmd) ((_cmd) <= GPI_CH_CMD_END) 339 + 340 + static const char *const gpi_cmd_str[GPI_MAX_CMD] = { 341 + [GPI_CH_CMD_ALLOCATE] = "CH ALLOCATE", 342 + [GPI_CH_CMD_START] = "CH START", 343 + [GPI_CH_CMD_STOP] = "CH STOP", 344 + [GPI_CH_CMD_RESET] = "CH_RESET", 345 + [GPI_CH_CMD_DE_ALLOC] = "DE ALLOC", 346 + [GPI_CH_CMD_UART_SW_STALE] = "UART SW STALE", 347 + [GPI_CH_CMD_UART_RFR_READY] = "UART RFR READY", 348 + [GPI_CH_CMD_UART_RFR_NOT_READY] = "UART RFR NOT READY", 349 + [GPI_EV_CMD_ALLOCATE] = "EV ALLOCATE", 350 + [GPI_EV_CMD_RESET] = "EV RESET", 351 + [GPI_EV_CMD_DEALLOC] = "EV DEALLOC", 352 + }; 353 + 354 + #define TO_GPI_CMD_STR(_cmd) (((_cmd) >= GPI_MAX_CMD) ? "INVALID" : \ 355 + gpi_cmd_str[(_cmd)]) 356 + 357 + /* 358 + * @DISABLE_STATE: no register access allowed 359 + * @CONFIG_STATE: client has configured the channel 360 + * @PREP_HARDWARE: register access is allowed 361 + * however, no processing EVENTS 362 + * @ACTIVE_STATE: channels are fully operational 363 + * @PREPARE_TERMINATE: graceful termination of channels 364 + * register access is allowed 365 + * @PAUSE_STATE: channels are active, but not processing any events 366 + */ 367 + enum gpi_pm_state { 368 + DISABLE_STATE, 369 + CONFIG_STATE, 370 + PREPARE_HARDWARE, 371 + ACTIVE_STATE, 372 + PREPARE_TERMINATE, 373 + PAUSE_STATE, 374 + MAX_PM_STATE 375 + }; 376 + 377 + #define REG_ACCESS_VALID(_pm_state) ((_pm_state) >= PREPARE_HARDWARE) 378 + 379 + static const char *const gpi_pm_state_str[MAX_PM_STATE] = { 380 + [DISABLE_STATE] = "DISABLE", 381 + [CONFIG_STATE] = "CONFIG", 382 + [PREPARE_HARDWARE] = "PREPARE HARDWARE", 383 + [ACTIVE_STATE] = "ACTIVE", 384 + [PREPARE_TERMINATE] = "PREPARE TERMINATE", 385 + [PAUSE_STATE] = "PAUSE", 386 + }; 387 + 388 + #define TO_GPI_PM_STR(_state) (((_state) >= MAX_PM_STATE) ? \ 389 + "INVALID" : gpi_pm_state_str[(_state)]) 390 + 391 + static const struct { 392 + enum gpi_cmd gpi_cmd; 393 + u32 opcode; 394 + u32 state; 395 + } gpi_cmd_info[GPI_MAX_CMD] = { 396 + { 397 + GPI_CH_CMD_ALLOCATE, 398 + GPII_n_CH_CMD_ALLOCATE, 399 + CH_STATE_ALLOCATED, 400 + }, 401 + { 402 + GPI_CH_CMD_START, 403 + GPII_n_CH_CMD_START, 404 + CH_STATE_STARTED, 405 + }, 406 + { 407 + GPI_CH_CMD_STOP, 408 + GPII_n_CH_CMD_STOP, 409 + CH_STATE_STOPPED, 410 + }, 411 + { 412 + GPI_CH_CMD_RESET, 413 + GPII_n_CH_CMD_RESET, 414 + CH_STATE_ALLOCATED, 415 + }, 416 + { 417 + GPI_CH_CMD_DE_ALLOC, 418 + GPII_n_CH_CMD_DE_ALLOC, 419 + CH_STATE_NOT_ALLOCATED, 420 + }, 421 + { 422 + GPI_CH_CMD_UART_SW_STALE, 423 + GPII_n_CH_CMD_UART_SW_STALE, 424 + STATE_IGNORE, 425 + }, 426 + { 427 + GPI_CH_CMD_UART_RFR_READY, 428 + GPII_n_CH_CMD_UART_RFR_READY, 429 + STATE_IGNORE, 430 + }, 431 + { 432 + GPI_CH_CMD_UART_RFR_NOT_READY, 433 + GPII_n_CH_CMD_UART_RFR_NOT_READY, 434 + STATE_IGNORE, 435 + }, 436 + { 437 + GPI_EV_CMD_ALLOCATE, 438 + GPII_n_EV_CH_CMD_ALLOCATE, 439 + EV_STATE_ALLOCATED, 440 + }, 441 + { 442 + GPI_EV_CMD_RESET, 443 + GPII_n_EV_CH_CMD_RESET, 444 + EV_STATE_ALLOCATED, 445 + }, 446 + { 447 + GPI_EV_CMD_DEALLOC, 448 + GPII_n_EV_CH_CMD_DE_ALLOC, 449 + EV_STATE_NOT_ALLOCATED, 450 + }, 451 + }; 452 + 453 + struct gpi_ring { 454 + void *pre_aligned; 455 + size_t alloc_size; 456 + phys_addr_t phys_addr; 457 + dma_addr_t dma_handle; 458 + void *base; 459 + void *wp; 460 + void *rp; 461 + u32 len; 462 + u32 el_size; 463 + u32 elements; 464 + bool configured; 465 + }; 466 + 467 + struct gpi_dev { 468 + struct dma_device dma_device; 469 + struct device *dev; 470 + struct resource *res; 471 + void __iomem *regs; 472 + void __iomem *ee_base; /*ee register base address*/ 473 + u32 max_gpii; /* maximum # of gpii instances available per gpi block */ 474 + u32 gpii_mask; /* gpii instances available for apps */ 475 + u32 ev_factor; /* ev ring length factor */ 476 + struct gpii *gpiis; 477 + }; 478 + 479 + struct reg_info { 480 + char *name; 481 + u32 offset; 482 + u32 val; 483 + }; 484 + 485 + struct gchan { 486 + struct virt_dma_chan vc; 487 + u32 chid; 488 + u32 seid; 489 + u32 protocol; 490 + struct gpii *gpii; 491 + enum gpi_ch_state ch_state; 492 + enum gpi_pm_state pm_state; 493 + void __iomem *ch_cntxt_base_reg; 494 + void __iomem *ch_cntxt_db_reg; 495 + void __iomem *ch_cmd_reg; 496 + u32 dir; 497 + struct gpi_ring ch_ring; 498 + void *config; 499 + }; 500 + 501 + struct gpii { 502 + u32 gpii_id; 503 + struct gchan gchan[MAX_CHANNELS_PER_GPII]; 504 + struct gpi_dev *gpi_dev; 505 + int irq; 506 + void __iomem *regs; /* points to gpi top */ 507 + void __iomem *ev_cntxt_base_reg; 508 + void __iomem *ev_cntxt_db_reg; 509 + void __iomem *ev_ring_rp_lsb_reg; 510 + void __iomem *ev_cmd_reg; 511 + void __iomem *ieob_clr_reg; 512 + struct mutex ctrl_lock; 513 + enum gpi_ev_state ev_state; 514 + bool configured_irq; 515 + enum gpi_pm_state pm_state; 516 + rwlock_t pm_lock; 517 + struct gpi_ring ev_ring; 518 + struct tasklet_struct ev_task; /* event processing tasklet */ 519 + struct completion cmd_completion; 520 + enum gpi_cmd gpi_cmd; 521 + u32 cntxt_type_irq_msk; 522 + bool ieob_set; 523 + }; 524 + 525 + #define MAX_TRE 3 526 + 527 + struct gpi_desc { 528 + struct virt_dma_desc vd; 529 + size_t len; 530 + void *db; /* DB register to program */ 531 + struct gchan *gchan; 532 + struct gpi_tre tre[MAX_TRE]; 533 + u32 num_tre; 534 + }; 535 + 536 + static const u32 GPII_CHAN_DIR[MAX_CHANNELS_PER_GPII] = { 537 + GPI_CHTYPE_DIR_OUT, GPI_CHTYPE_DIR_IN 538 + }; 539 + 540 + static irqreturn_t gpi_handle_irq(int irq, void *data); 541 + static void gpi_ring_recycle_ev_element(struct gpi_ring *ring); 542 + static int gpi_ring_add_element(struct gpi_ring *ring, void **wp); 543 + static void gpi_process_events(struct gpii *gpii); 544 + 545 + static inline struct gchan *to_gchan(struct dma_chan *dma_chan) 546 + { 547 + return container_of(dma_chan, struct gchan, vc.chan); 548 + } 549 + 550 + static inline struct gpi_desc *to_gpi_desc(struct virt_dma_desc *vd) 551 + { 552 + return container_of(vd, struct gpi_desc, vd); 553 + } 554 + 555 + static inline phys_addr_t to_physical(const struct gpi_ring *const ring, 556 + void *addr) 557 + { 558 + return ring->phys_addr + (addr - ring->base); 559 + } 560 + 561 + static inline void *to_virtual(const struct gpi_ring *const ring, phys_addr_t addr) 562 + { 563 + return ring->base + (addr - ring->phys_addr); 564 + } 565 + 566 + static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr) 567 + { 568 + return readl_relaxed(addr); 569 + } 570 + 571 + static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val) 572 + { 573 + writel_relaxed(val, addr); 574 + } 575 + 576 + /* gpi_write_reg_field - write to specific bit field */ 577 + static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr, 578 + u32 mask, u32 shift, u32 val) 579 + { 580 + u32 tmp = gpi_read_reg(gpii, addr); 581 + 582 + tmp &= ~mask; 583 + val = tmp | ((val << shift) & mask); 584 + gpi_write_reg(gpii, addr, val); 585 + } 586 + 587 + static inline void 588 + gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val) 589 + { 590 + void __iomem *addr = gpii->regs + offset; 591 + u32 tmp = gpi_read_reg(gpii, addr); 592 + 593 + tmp &= ~mask; 594 + tmp |= u32_encode_bits(val, mask); 595 + 596 + gpi_write_reg(gpii, addr, tmp); 597 + } 598 + 599 + static void gpi_disable_interrupts(struct gpii *gpii) 600 + { 601 + gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id), 602 + GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK, 0); 603 + gpi_update_reg(gpii, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii->gpii_id), 604 + GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK, 0); 605 + gpi_update_reg(gpii, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii->gpii_id), 606 + GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK, 0); 607 + gpi_update_reg(gpii, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii->gpii_id), 608 + GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK, 0); 609 + gpi_update_reg(gpii, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii->gpii_id), 610 + GPII_n_CNTXT_GPII_IRQ_EN_BMSK, 0); 611 + gpi_update_reg(gpii, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii->gpii_id), 612 + GPII_n_CNTXT_GPII_IRQ_EN_BMSK, 0); 613 + gpi_update_reg(gpii, GPII_n_CNTXT_INTSET_OFFS(gpii->gpii_id), 614 + GPII_n_CNTXT_INTSET_BMSK, 0); 615 + 616 + gpii->cntxt_type_irq_msk = 0; 617 + devm_free_irq(gpii->gpi_dev->dev, gpii->irq, gpii); 618 + gpii->configured_irq = false; 619 + } 620 + 621 + /* configure and enable interrupts */ 622 + static int gpi_config_interrupts(struct gpii *gpii, enum gpii_irq_settings settings, bool mask) 623 + { 624 + const u32 enable = (GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL | 625 + GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB | 626 + GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB | 627 + GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL | 628 + GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL); 629 + int ret; 630 + 631 + if (!gpii->configured_irq) { 632 + ret = devm_request_irq(gpii->gpi_dev->dev, gpii->irq, 633 + gpi_handle_irq, IRQF_TRIGGER_HIGH, 634 + "gpi-dma", gpii); 635 + if (ret < 0) { 636 + dev_err(gpii->gpi_dev->dev, "error request irq:%d ret:%d\n", 637 + gpii->irq, ret); 638 + return ret; 639 + } 640 + } 641 + 642 + if (settings == MASK_IEOB_SETTINGS) { 643 + /* 644 + * GPII only uses one EV ring per gpii so we can globally 645 + * enable/disable IEOB interrupt 646 + */ 647 + if (mask) 648 + gpii->cntxt_type_irq_msk |= GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB; 649 + else 650 + gpii->cntxt_type_irq_msk &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB); 651 + gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id), 652 + GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK, gpii->cntxt_type_irq_msk); 653 + } else { 654 + gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id), 655 + GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK, enable); 656 + gpi_update_reg(gpii, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii->gpii_id), 657 + GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK, 658 + GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK); 659 + gpi_update_reg(gpii, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii->gpii_id), 660 + GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK, 661 + GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK); 662 + gpi_update_reg(gpii, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii->gpii_id), 663 + GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK, 664 + GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK); 665 + gpi_update_reg(gpii, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii->gpii_id), 666 + GPII_n_CNTXT_GPII_IRQ_EN_BMSK, 667 + GPII_n_CNTXT_GPII_IRQ_EN_BMSK); 668 + gpi_update_reg(gpii, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii->gpii_id), 669 + GPII_n_CNTXT_GPII_IRQ_EN_BMSK, GPII_n_CNTXT_GPII_IRQ_EN_BMSK); 670 + gpi_update_reg(gpii, GPII_n_CNTXT_MSI_BASE_LSB_OFFS(gpii->gpii_id), U32_MAX, 0); 671 + gpi_update_reg(gpii, GPII_n_CNTXT_MSI_BASE_MSB_OFFS(gpii->gpii_id), U32_MAX, 0); 672 + gpi_update_reg(gpii, GPII_n_CNTXT_SCRATCH_0_OFFS(gpii->gpii_id), U32_MAX, 0); 673 + gpi_update_reg(gpii, GPII_n_CNTXT_SCRATCH_1_OFFS(gpii->gpii_id), U32_MAX, 0); 674 + gpi_update_reg(gpii, GPII_n_CNTXT_INTSET_OFFS(gpii->gpii_id), 675 + GPII_n_CNTXT_INTSET_BMSK, 1); 676 + gpi_update_reg(gpii, GPII_n_ERROR_LOG_OFFS(gpii->gpii_id), U32_MAX, 0); 677 + 678 + gpii->cntxt_type_irq_msk = enable; 679 + } 680 + 681 + gpii->configured_irq = true; 682 + return 0; 683 + } 684 + 685 + /* Sends gpii event or channel command */ 686 + static int gpi_send_cmd(struct gpii *gpii, struct gchan *gchan, 687 + enum gpi_cmd gpi_cmd) 688 + { 689 + u32 chid = MAX_CHANNELS_PER_GPII; 690 + unsigned long timeout; 691 + void __iomem *cmd_reg; 692 + u32 cmd; 693 + 694 + if (gpi_cmd >= GPI_MAX_CMD) 695 + return -EINVAL; 696 + if (IS_CHAN_CMD(gpi_cmd)) 697 + chid = gchan->chid; 698 + 699 + dev_dbg(gpii->gpi_dev->dev, 700 + "sending cmd: %s:%u\n", TO_GPI_CMD_STR(gpi_cmd), chid); 701 + 702 + /* send opcode and wait for completion */ 703 + reinit_completion(&gpii->cmd_completion); 704 + gpii->gpi_cmd = gpi_cmd; 705 + 706 + cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gchan->ch_cmd_reg : gpii->ev_cmd_reg; 707 + cmd = IS_CHAN_CMD(gpi_cmd) ? GPII_n_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, chid) : 708 + GPII_n_EV_CMD(gpi_cmd_info[gpi_cmd].opcode, 0); 709 + gpi_write_reg(gpii, cmd_reg, cmd); 710 + timeout = wait_for_completion_timeout(&gpii->cmd_completion, 711 + msecs_to_jiffies(CMD_TIMEOUT_MS)); 712 + if (!timeout) { 713 + dev_err(gpii->gpi_dev->dev, "cmd: %s completion timeout:%u\n", 714 + TO_GPI_CMD_STR(gpi_cmd), chid); 715 + return -EIO; 716 + } 717 + 718 + /* confirm new ch state is correct , if the cmd is a state change cmd */ 719 + if (gpi_cmd_info[gpi_cmd].state == STATE_IGNORE) 720 + return 0; 721 + 722 + if (IS_CHAN_CMD(gpi_cmd) && gchan->ch_state == gpi_cmd_info[gpi_cmd].state) 723 + return 0; 724 + 725 + if (!IS_CHAN_CMD(gpi_cmd) && gpii->ev_state == gpi_cmd_info[gpi_cmd].state) 726 + return 0; 727 + 728 + return -EIO; 729 + } 730 + 731 + /* program transfer ring DB register */ 732 + static inline void gpi_write_ch_db(struct gchan *gchan, 733 + struct gpi_ring *ring, void *wp) 734 + { 735 + struct gpii *gpii = gchan->gpii; 736 + phys_addr_t p_wp; 737 + 738 + p_wp = to_physical(ring, wp); 739 + gpi_write_reg(gpii, gchan->ch_cntxt_db_reg, p_wp); 740 + } 741 + 742 + /* program event ring DB register */ 743 + static inline void gpi_write_ev_db(struct gpii *gpii, 744 + struct gpi_ring *ring, void *wp) 745 + { 746 + phys_addr_t p_wp; 747 + 748 + p_wp = ring->phys_addr + (wp - ring->base); 749 + gpi_write_reg(gpii, gpii->ev_cntxt_db_reg, p_wp); 750 + } 751 + 752 + /* process transfer completion interrupt */ 753 + static void gpi_process_ieob(struct gpii *gpii) 754 + { 755 + gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0)); 756 + 757 + gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 0); 758 + tasklet_hi_schedule(&gpii->ev_task); 759 + } 760 + 761 + /* process channel control interrupt */ 762 + static void gpi_process_ch_ctrl_irq(struct gpii *gpii) 763 + { 764 + u32 gpii_id = gpii->gpii_id; 765 + u32 offset = GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id); 766 + u32 ch_irq = gpi_read_reg(gpii, gpii->regs + offset); 767 + struct gchan *gchan; 768 + u32 chid, state; 769 + 770 + /* clear the status */ 771 + offset = GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id); 772 + gpi_write_reg(gpii, gpii->regs + offset, (u32)ch_irq); 773 + 774 + for (chid = 0; chid < MAX_CHANNELS_PER_GPII; chid++) { 775 + if (!(BIT(chid) & ch_irq)) 776 + continue; 777 + 778 + gchan = &gpii->gchan[chid]; 779 + state = gpi_read_reg(gpii, gchan->ch_cntxt_base_reg + 780 + CNTXT_0_CONFIG); 781 + state = FIELD_GET(GPII_n_CH_k_CNTXT_0_CHSTATE, state); 782 + 783 + /* 784 + * CH_CMD_DEALLOC cmd always successful. However cmd does 785 + * not change hardware status. So overwriting software state 786 + * to default state. 787 + */ 788 + if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC) 789 + state = DEFAULT_CH_STATE; 790 + gchan->ch_state = state; 791 + 792 + /* 793 + * Triggering complete all if ch_state is not a stop in process. 794 + * Stop in process is a transition state and we will wait for 795 + * stop interrupt before notifying. 796 + */ 797 + if (gchan->ch_state != CH_STATE_STOP_IN_PROC) 798 + complete_all(&gpii->cmd_completion); 799 + } 800 + } 801 + 802 + /* processing gpi general error interrupts */ 803 + static void gpi_process_gen_err_irq(struct gpii *gpii) 804 + { 805 + u32 gpii_id = gpii->gpii_id; 806 + u32 offset = GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(gpii_id); 807 + u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset); 808 + 809 + /* clear the status */ 810 + dev_dbg(gpii->gpi_dev->dev, "irq_stts:0x%x\n", irq_stts); 811 + 812 + /* Clear the register */ 813 + offset = GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(gpii_id); 814 + gpi_write_reg(gpii, gpii->regs + offset, irq_stts); 815 + } 816 + 817 + /* processing gpi level error interrupts */ 818 + static void gpi_process_glob_err_irq(struct gpii *gpii) 819 + { 820 + u32 gpii_id = gpii->gpii_id; 821 + u32 offset = GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id); 822 + u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset); 823 + 824 + offset = GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id); 825 + gpi_write_reg(gpii, gpii->regs + offset, irq_stts); 826 + 827 + /* only error interrupt should be set */ 828 + if (irq_stts & ~GPI_GLOB_IRQ_ERROR_INT_MSK) { 829 + dev_err(gpii->gpi_dev->dev, "invalid error status:0x%x\n", irq_stts); 830 + return; 831 + } 832 + 833 + offset = GPII_n_ERROR_LOG_OFFS(gpii_id); 834 + gpi_write_reg(gpii, gpii->regs + offset, 0); 835 + } 836 + 837 + /* gpii interrupt handler */ 838 + static irqreturn_t gpi_handle_irq(int irq, void *data) 839 + { 840 + struct gpii *gpii = data; 841 + u32 gpii_id = gpii->gpii_id; 842 + u32 type, offset; 843 + unsigned long flags; 844 + 845 + read_lock_irqsave(&gpii->pm_lock, flags); 846 + 847 + /* 848 + * States are out of sync to receive interrupt 849 + * while software state is in DISABLE state, bailing out. 850 + */ 851 + if (!REG_ACCESS_VALID(gpii->pm_state)) { 852 + dev_err(gpii->gpi_dev->dev, "receive interrupt while in %s state\n", 853 + TO_GPI_PM_STR(gpii->pm_state)); 854 + goto exit_irq; 855 + } 856 + 857 + offset = GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id); 858 + type = gpi_read_reg(gpii, gpii->regs + offset); 859 + 860 + do { 861 + /* global gpii error */ 862 + if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB) { 863 + gpi_process_glob_err_irq(gpii); 864 + type &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB); 865 + } 866 + 867 + /* transfer complete interrupt */ 868 + if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) { 869 + gpi_process_ieob(gpii); 870 + type &= ~GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB; 871 + } 872 + 873 + /* event control irq */ 874 + if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) { 875 + u32 ev_state; 876 + u32 ev_ch_irq; 877 + 878 + dev_dbg(gpii->gpi_dev->dev, 879 + "processing EV CTRL interrupt\n"); 880 + offset = GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id); 881 + ev_ch_irq = gpi_read_reg(gpii, gpii->regs + offset); 882 + 883 + offset = GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS 884 + (gpii_id); 885 + gpi_write_reg(gpii, gpii->regs + offset, ev_ch_irq); 886 + ev_state = gpi_read_reg(gpii, gpii->ev_cntxt_base_reg + 887 + CNTXT_0_CONFIG); 888 + ev_state = FIELD_GET(GPII_n_EV_k_CNTXT_0_CHSTATE, ev_state); 889 + 890 + /* 891 + * CMD EV_CMD_DEALLOC is always successful. However 892 + * cmd does not change hardware status. So overwriting 893 + * software state to default state. 894 + */ 895 + if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC) 896 + ev_state = DEFAULT_EV_CH_STATE; 897 + 898 + gpii->ev_state = ev_state; 899 + dev_dbg(gpii->gpi_dev->dev, "setting EV state to %s\n", 900 + TO_GPI_EV_STATE_STR(gpii->ev_state)); 901 + complete_all(&gpii->cmd_completion); 902 + type &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL); 903 + } 904 + 905 + /* channel control irq */ 906 + if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL) { 907 + dev_dbg(gpii->gpi_dev->dev, "process CH CTRL interrupts\n"); 908 + gpi_process_ch_ctrl_irq(gpii); 909 + type &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL); 910 + } 911 + 912 + if (type) { 913 + dev_err(gpii->gpi_dev->dev, "Unhandled interrupt status:0x%x\n", type); 914 + gpi_process_gen_err_irq(gpii); 915 + goto exit_irq; 916 + } 917 + 918 + offset = GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id); 919 + type = gpi_read_reg(gpii, gpii->regs + offset); 920 + } while (type); 921 + 922 + exit_irq: 923 + read_unlock_irqrestore(&gpii->pm_lock, flags); 924 + 925 + return IRQ_HANDLED; 926 + } 927 + 928 + /* process DMA Immediate completion data events */ 929 + static void gpi_process_imed_data_event(struct gchan *gchan, 930 + struct immediate_data_event *imed_event) 931 + { 932 + struct gpii *gpii = gchan->gpii; 933 + struct gpi_ring *ch_ring = &gchan->ch_ring; 934 + void *tre = ch_ring->base + (ch_ring->el_size * imed_event->tre_index); 935 + struct dmaengine_result result; 936 + struct gpi_desc *gpi_desc; 937 + struct virt_dma_desc *vd; 938 + unsigned long flags; 939 + u32 chid; 940 + 941 + /* 942 + * If channel not active don't process event 943 + */ 944 + if (gchan->pm_state != ACTIVE_STATE) { 945 + dev_err(gpii->gpi_dev->dev, "skipping processing event because ch @ %s state\n", 946 + TO_GPI_PM_STR(gchan->pm_state)); 947 + return; 948 + } 949 + 950 + spin_lock_irqsave(&gchan->vc.lock, flags); 951 + vd = vchan_next_desc(&gchan->vc); 952 + if (!vd) { 953 + struct gpi_ere *gpi_ere; 954 + struct gpi_tre *gpi_tre; 955 + 956 + spin_unlock_irqrestore(&gchan->vc.lock, flags); 957 + dev_dbg(gpii->gpi_dev->dev, "event without a pending descriptor!\n"); 958 + gpi_ere = (struct gpi_ere *)imed_event; 959 + dev_dbg(gpii->gpi_dev->dev, 960 + "Event: %08x %08x %08x %08x\n", 961 + gpi_ere->dword[0], gpi_ere->dword[1], 962 + gpi_ere->dword[2], gpi_ere->dword[3]); 963 + gpi_tre = tre; 964 + dev_dbg(gpii->gpi_dev->dev, 965 + "Pending TRE: %08x %08x %08x %08x\n", 966 + gpi_tre->dword[0], gpi_tre->dword[1], 967 + gpi_tre->dword[2], gpi_tre->dword[3]); 968 + return; 969 + } 970 + gpi_desc = to_gpi_desc(vd); 971 + spin_unlock_irqrestore(&gchan->vc.lock, flags); 972 + 973 + /* 974 + * RP pointed by Event is to last TRE processed, 975 + * we need to update ring rp to tre + 1 976 + */ 977 + tre += ch_ring->el_size; 978 + if (tre >= (ch_ring->base + ch_ring->len)) 979 + tre = ch_ring->base; 980 + ch_ring->rp = tre; 981 + 982 + /* make sure rp updates are immediately visible to all cores */ 983 + smp_wmb(); 984 + 985 + chid = imed_event->chid; 986 + if (imed_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) { 987 + if (chid == GPI_RX_CHAN) 988 + goto gpi_free_desc; 989 + else 990 + return; 991 + } 992 + 993 + if (imed_event->code == MSM_GPI_TCE_UNEXP_ERR) 994 + result.result = DMA_TRANS_ABORTED; 995 + else 996 + result.result = DMA_TRANS_NOERROR; 997 + result.residue = gpi_desc->len - imed_event->length; 998 + 999 + dma_cookie_complete(&vd->tx); 1000 + dmaengine_desc_get_callback_invoke(&vd->tx, &result); 1001 + 1002 + gpi_free_desc: 1003 + spin_lock_irqsave(&gchan->vc.lock, flags); 1004 + list_del(&vd->node); 1005 + spin_unlock_irqrestore(&gchan->vc.lock, flags); 1006 + kfree(gpi_desc); 1007 + gpi_desc = NULL; 1008 + } 1009 + 1010 + /* processing transfer completion events */ 1011 + static void gpi_process_xfer_compl_event(struct gchan *gchan, 1012 + struct xfer_compl_event *compl_event) 1013 + { 1014 + struct gpii *gpii = gchan->gpii; 1015 + struct gpi_ring *ch_ring = &gchan->ch_ring; 1016 + void *ev_rp = to_virtual(ch_ring, compl_event->ptr); 1017 + struct virt_dma_desc *vd; 1018 + struct gpi_desc *gpi_desc; 1019 + struct dmaengine_result result; 1020 + unsigned long flags; 1021 + u32 chid; 1022 + 1023 + /* only process events on active channel */ 1024 + if (unlikely(gchan->pm_state != ACTIVE_STATE)) { 1025 + dev_err(gpii->gpi_dev->dev, "skipping processing event because ch @ %s state\n", 1026 + TO_GPI_PM_STR(gchan->pm_state)); 1027 + return; 1028 + } 1029 + 1030 + spin_lock_irqsave(&gchan->vc.lock, flags); 1031 + vd = vchan_next_desc(&gchan->vc); 1032 + if (!vd) { 1033 + struct gpi_ere *gpi_ere; 1034 + 1035 + spin_unlock_irqrestore(&gchan->vc.lock, flags); 1036 + dev_err(gpii->gpi_dev->dev, "Event without a pending descriptor!\n"); 1037 + gpi_ere = (struct gpi_ere *)compl_event; 1038 + dev_err(gpii->gpi_dev->dev, 1039 + "Event: %08x %08x %08x %08x\n", 1040 + gpi_ere->dword[0], gpi_ere->dword[1], 1041 + gpi_ere->dword[2], gpi_ere->dword[3]); 1042 + return; 1043 + } 1044 + 1045 + gpi_desc = to_gpi_desc(vd); 1046 + spin_unlock_irqrestore(&gchan->vc.lock, flags); 1047 + 1048 + /* 1049 + * RP pointed by Event is to last TRE processed, 1050 + * we need to update ring rp to ev_rp + 1 1051 + */ 1052 + ev_rp += ch_ring->el_size; 1053 + if (ev_rp >= (ch_ring->base + ch_ring->len)) 1054 + ev_rp = ch_ring->base; 1055 + ch_ring->rp = ev_rp; 1056 + 1057 + /* update must be visible to other cores */ 1058 + smp_wmb(); 1059 + 1060 + chid = compl_event->chid; 1061 + if (compl_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) { 1062 + if (chid == GPI_RX_CHAN) 1063 + goto gpi_free_desc; 1064 + else 1065 + return; 1066 + } 1067 + 1068 + if (compl_event->code == MSM_GPI_TCE_UNEXP_ERR) { 1069 + dev_err(gpii->gpi_dev->dev, "Error in Transaction\n"); 1070 + result.result = DMA_TRANS_ABORTED; 1071 + } else { 1072 + dev_dbg(gpii->gpi_dev->dev, "Transaction Success\n"); 1073 + result.result = DMA_TRANS_NOERROR; 1074 + } 1075 + result.residue = gpi_desc->len - compl_event->length; 1076 + dev_dbg(gpii->gpi_dev->dev, "Residue %d\n", result.residue); 1077 + 1078 + dma_cookie_complete(&vd->tx); 1079 + dmaengine_desc_get_callback_invoke(&vd->tx, &result); 1080 + 1081 + gpi_free_desc: 1082 + spin_lock_irqsave(&gchan->vc.lock, flags); 1083 + list_del(&vd->node); 1084 + spin_unlock_irqrestore(&gchan->vc.lock, flags); 1085 + kfree(gpi_desc); 1086 + gpi_desc = NULL; 1087 + } 1088 + 1089 + /* process all events */ 1090 + static void gpi_process_events(struct gpii *gpii) 1091 + { 1092 + struct gpi_ring *ev_ring = &gpii->ev_ring; 1093 + phys_addr_t cntxt_rp; 1094 + void *rp; 1095 + union gpi_event *gpi_event; 1096 + struct gchan *gchan; 1097 + u32 chid, type; 1098 + 1099 + cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg); 1100 + rp = to_virtual(ev_ring, cntxt_rp); 1101 + 1102 + do { 1103 + while (rp != ev_ring->rp) { 1104 + gpi_event = ev_ring->rp; 1105 + chid = gpi_event->xfer_compl_event.chid; 1106 + type = gpi_event->xfer_compl_event.type; 1107 + 1108 + dev_dbg(gpii->gpi_dev->dev, 1109 + "Event: CHID:%u, type:%x %08x %08x %08x %08x\n", 1110 + chid, type, gpi_event->gpi_ere.dword[0], 1111 + gpi_event->gpi_ere.dword[1], gpi_event->gpi_ere.dword[2], 1112 + gpi_event->gpi_ere.dword[3]); 1113 + 1114 + switch (type) { 1115 + case XFER_COMPLETE_EV_TYPE: 1116 + gchan = &gpii->gchan[chid]; 1117 + gpi_process_xfer_compl_event(gchan, 1118 + &gpi_event->xfer_compl_event); 1119 + break; 1120 + case STALE_EV_TYPE: 1121 + dev_dbg(gpii->gpi_dev->dev, "stale event, not processing\n"); 1122 + break; 1123 + case IMMEDIATE_DATA_EV_TYPE: 1124 + gchan = &gpii->gchan[chid]; 1125 + gpi_process_imed_data_event(gchan, 1126 + &gpi_event->immediate_data_event); 1127 + break; 1128 + case QUP_NOTIF_EV_TYPE: 1129 + dev_dbg(gpii->gpi_dev->dev, "QUP_NOTIF_EV_TYPE\n"); 1130 + break; 1131 + default: 1132 + dev_dbg(gpii->gpi_dev->dev, 1133 + "not supported event type:0x%x\n", type); 1134 + } 1135 + gpi_ring_recycle_ev_element(ev_ring); 1136 + } 1137 + gpi_write_ev_db(gpii, ev_ring, ev_ring->wp); 1138 + 1139 + /* clear pending IEOB events */ 1140 + gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0)); 1141 + 1142 + cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg); 1143 + rp = to_virtual(ev_ring, cntxt_rp); 1144 + 1145 + } while (rp != ev_ring->rp); 1146 + } 1147 + 1148 + /* processing events using tasklet */ 1149 + static void gpi_ev_tasklet(unsigned long data) 1150 + { 1151 + struct gpii *gpii = (struct gpii *)data; 1152 + 1153 + read_lock_bh(&gpii->pm_lock); 1154 + if (!REG_ACCESS_VALID(gpii->pm_state)) { 1155 + read_unlock_bh(&gpii->pm_lock); 1156 + dev_err(gpii->gpi_dev->dev, "not processing any events, pm_state:%s\n", 1157 + TO_GPI_PM_STR(gpii->pm_state)); 1158 + return; 1159 + } 1160 + 1161 + /* process the events */ 1162 + gpi_process_events(gpii); 1163 + 1164 + /* enable IEOB, switching back to interrupts */ 1165 + gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1); 1166 + read_unlock_bh(&gpii->pm_lock); 1167 + } 1168 + 1169 + /* marks all pending events for the channel as stale */ 1170 + static void gpi_mark_stale_events(struct gchan *gchan) 1171 + { 1172 + struct gpii *gpii = gchan->gpii; 1173 + struct gpi_ring *ev_ring = &gpii->ev_ring; 1174 + u32 cntxt_rp, local_rp; 1175 + void *ev_rp; 1176 + 1177 + cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg); 1178 + 1179 + ev_rp = ev_ring->rp; 1180 + local_rp = (u32)to_physical(ev_ring, ev_rp); 1181 + while (local_rp != cntxt_rp) { 1182 + union gpi_event *gpi_event = ev_rp; 1183 + u32 chid = gpi_event->xfer_compl_event.chid; 1184 + 1185 + if (chid == gchan->chid) 1186 + gpi_event->xfer_compl_event.type = STALE_EV_TYPE; 1187 + ev_rp += ev_ring->el_size; 1188 + if (ev_rp >= (ev_ring->base + ev_ring->len)) 1189 + ev_rp = ev_ring->base; 1190 + cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg); 1191 + local_rp = (u32)to_physical(ev_ring, ev_rp); 1192 + } 1193 + } 1194 + 1195 + /* reset sw state and issue channel reset or de-alloc */ 1196 + static int gpi_reset_chan(struct gchan *gchan, enum gpi_cmd gpi_cmd) 1197 + { 1198 + struct gpii *gpii = gchan->gpii; 1199 + struct gpi_ring *ch_ring = &gchan->ch_ring; 1200 + unsigned long flags; 1201 + LIST_HEAD(list); 1202 + int ret; 1203 + 1204 + ret = gpi_send_cmd(gpii, gchan, gpi_cmd); 1205 + if (ret) { 1206 + dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n", 1207 + TO_GPI_CMD_STR(gpi_cmd), ret); 1208 + return ret; 1209 + } 1210 + 1211 + /* initialize the local ring ptrs */ 1212 + ch_ring->rp = ch_ring->base; 1213 + ch_ring->wp = ch_ring->base; 1214 + 1215 + /* visible to other cores */ 1216 + smp_wmb(); 1217 + 1218 + /* check event ring for any stale events */ 1219 + write_lock_irq(&gpii->pm_lock); 1220 + gpi_mark_stale_events(gchan); 1221 + 1222 + /* remove all async descriptors */ 1223 + spin_lock_irqsave(&gchan->vc.lock, flags); 1224 + vchan_get_all_descriptors(&gchan->vc, &list); 1225 + spin_unlock_irqrestore(&gchan->vc.lock, flags); 1226 + write_unlock_irq(&gpii->pm_lock); 1227 + vchan_dma_desc_free_list(&gchan->vc, &list); 1228 + 1229 + return 0; 1230 + } 1231 + 1232 + static int gpi_start_chan(struct gchan *gchan) 1233 + { 1234 + struct gpii *gpii = gchan->gpii; 1235 + int ret; 1236 + 1237 + ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_START); 1238 + if (ret) { 1239 + dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n", 1240 + TO_GPI_CMD_STR(GPI_CH_CMD_START), ret); 1241 + return ret; 1242 + } 1243 + 1244 + /* gpii CH is active now */ 1245 + write_lock_irq(&gpii->pm_lock); 1246 + gchan->pm_state = ACTIVE_STATE; 1247 + write_unlock_irq(&gpii->pm_lock); 1248 + 1249 + return 0; 1250 + } 1251 + 1252 + static int gpi_stop_chan(struct gchan *gchan) 1253 + { 1254 + struct gpii *gpii = gchan->gpii; 1255 + int ret; 1256 + 1257 + ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_STOP); 1258 + if (ret) { 1259 + dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n", 1260 + TO_GPI_CMD_STR(GPI_CH_CMD_STOP), ret); 1261 + return ret; 1262 + } 1263 + 1264 + return 0; 1265 + } 1266 + 1267 + /* allocate and configure the transfer channel */ 1268 + static int gpi_alloc_chan(struct gchan *chan, bool send_alloc_cmd) 1269 + { 1270 + struct gpii *gpii = chan->gpii; 1271 + struct gpi_ring *ring = &chan->ch_ring; 1272 + int ret; 1273 + u32 id = gpii->gpii_id; 1274 + u32 chid = chan->chid; 1275 + u32 pair_chid = !chid; 1276 + 1277 + if (send_alloc_cmd) { 1278 + ret = gpi_send_cmd(gpii, chan, GPI_CH_CMD_ALLOCATE); 1279 + if (ret) { 1280 + dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n", 1281 + TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE), ret); 1282 + return ret; 1283 + } 1284 + } 1285 + 1286 + gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_0_CONFIG, 1287 + GPII_n_CH_k_CNTXT_0(ring->el_size, 0, chan->dir, GPI_CHTYPE_PROTO_GPI)); 1288 + gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_1_R_LENGTH, ring->len); 1289 + gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_2_RING_BASE_LSB, ring->phys_addr); 1290 + gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_3_RING_BASE_MSB, 1291 + upper_32_bits(ring->phys_addr)); 1292 + gpi_write_reg(gpii, chan->ch_cntxt_db_reg + CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB, 1293 + upper_32_bits(ring->phys_addr)); 1294 + gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_0_OFFS(id, chid), 1295 + GPII_n_CH_k_SCRATCH_0(pair_chid, chan->protocol, chan->seid)); 1296 + gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_1_OFFS(id, chid), 0); 1297 + gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_2_OFFS(id, chid), 0); 1298 + gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_3_OFFS(id, chid), 0); 1299 + gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_QOS_OFFS(id, chid), 1); 1300 + 1301 + /* flush all the writes */ 1302 + wmb(); 1303 + return 0; 1304 + } 1305 + 1306 + /* allocate and configure event ring */ 1307 + static int gpi_alloc_ev_chan(struct gpii *gpii) 1308 + { 1309 + struct gpi_ring *ring = &gpii->ev_ring; 1310 + void __iomem *base = gpii->ev_cntxt_base_reg; 1311 + int ret; 1312 + 1313 + ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_ALLOCATE); 1314 + if (ret) { 1315 + dev_err(gpii->gpi_dev->dev, "error with cmd:%s ret:%d\n", 1316 + TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE), ret); 1317 + return ret; 1318 + } 1319 + 1320 + /* program event context */ 1321 + gpi_write_reg(gpii, base + CNTXT_0_CONFIG, 1322 + GPII_n_EV_k_CNTXT_0(ring->el_size, GPI_INTTYPE_IRQ, GPI_CHTYPE_GPI_EV)); 1323 + gpi_write_reg(gpii, base + CNTXT_1_R_LENGTH, ring->len); 1324 + gpi_write_reg(gpii, base + CNTXT_2_RING_BASE_LSB, lower_32_bits(ring->phys_addr)); 1325 + gpi_write_reg(gpii, base + CNTXT_3_RING_BASE_MSB, upper_32_bits(ring->phys_addr)); 1326 + gpi_write_reg(gpii, gpii->ev_cntxt_db_reg + CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB, 1327 + upper_32_bits(ring->phys_addr)); 1328 + gpi_write_reg(gpii, base + CNTXT_8_RING_INT_MOD, 0); 1329 + gpi_write_reg(gpii, base + CNTXT_10_RING_MSI_LSB, 0); 1330 + gpi_write_reg(gpii, base + CNTXT_11_RING_MSI_MSB, 0); 1331 + gpi_write_reg(gpii, base + CNTXT_8_RING_INT_MOD, 0); 1332 + gpi_write_reg(gpii, base + CNTXT_12_RING_RP_UPDATE_LSB, 0); 1333 + gpi_write_reg(gpii, base + CNTXT_13_RING_RP_UPDATE_MSB, 0); 1334 + 1335 + /* add events to ring */ 1336 + ring->wp = (ring->base + ring->len - ring->el_size); 1337 + 1338 + /* flush all the writes */ 1339 + wmb(); 1340 + 1341 + /* gpii is active now */ 1342 + write_lock_irq(&gpii->pm_lock); 1343 + gpii->pm_state = ACTIVE_STATE; 1344 + write_unlock_irq(&gpii->pm_lock); 1345 + gpi_write_ev_db(gpii, ring, ring->wp); 1346 + 1347 + return 0; 1348 + } 1349 + 1350 + /* calculate # of ERE/TRE available to queue */ 1351 + static int gpi_ring_num_elements_avail(const struct gpi_ring * const ring) 1352 + { 1353 + int elements = 0; 1354 + 1355 + if (ring->wp < ring->rp) { 1356 + elements = ((ring->rp - ring->wp) / ring->el_size) - 1; 1357 + } else { 1358 + elements = (ring->rp - ring->base) / ring->el_size; 1359 + elements += ((ring->base + ring->len - ring->wp) / ring->el_size) - 1; 1360 + } 1361 + 1362 + return elements; 1363 + } 1364 + 1365 + static int gpi_ring_add_element(struct gpi_ring *ring, void **wp) 1366 + { 1367 + if (gpi_ring_num_elements_avail(ring) <= 0) 1368 + return -ENOMEM; 1369 + 1370 + *wp = ring->wp; 1371 + ring->wp += ring->el_size; 1372 + if (ring->wp >= (ring->base + ring->len)) 1373 + ring->wp = ring->base; 1374 + 1375 + /* visible to other cores */ 1376 + smp_wmb(); 1377 + 1378 + return 0; 1379 + } 1380 + 1381 + static void gpi_ring_recycle_ev_element(struct gpi_ring *ring) 1382 + { 1383 + /* Update the WP */ 1384 + ring->wp += ring->el_size; 1385 + if (ring->wp >= (ring->base + ring->len)) 1386 + ring->wp = ring->base; 1387 + 1388 + /* Update the RP */ 1389 + ring->rp += ring->el_size; 1390 + if (ring->rp >= (ring->base + ring->len)) 1391 + ring->rp = ring->base; 1392 + 1393 + /* visible to other cores */ 1394 + smp_wmb(); 1395 + } 1396 + 1397 + static void gpi_free_ring(struct gpi_ring *ring, 1398 + struct gpii *gpii) 1399 + { 1400 + dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size, 1401 + ring->pre_aligned, ring->dma_handle); 1402 + memset(ring, 0, sizeof(*ring)); 1403 + } 1404 + 1405 + /* allocate memory for transfer and event rings */ 1406 + static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements, 1407 + u32 el_size, struct gpii *gpii) 1408 + { 1409 + u64 len = elements * el_size; 1410 + int bit; 1411 + 1412 + /* ring len must be power of 2 */ 1413 + bit = find_last_bit((unsigned long *)&len, 32); 1414 + if (((1 << bit) - 1) & len) 1415 + bit++; 1416 + len = 1 << bit; 1417 + ring->alloc_size = (len + (len - 1)); 1418 + dev_dbg(gpii->gpi_dev->dev, 1419 + "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n", 1420 + elements, el_size, (elements * el_size), len, 1421 + ring->alloc_size); 1422 + 1423 + ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev, 1424 + ring->alloc_size, 1425 + &ring->dma_handle, GFP_KERNEL); 1426 + if (!ring->pre_aligned) { 1427 + dev_err(gpii->gpi_dev->dev, "could not alloc size:%lu mem for ring\n", 1428 + ring->alloc_size); 1429 + return -ENOMEM; 1430 + } 1431 + 1432 + /* align the physical mem */ 1433 + ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1); 1434 + ring->base = ring->pre_aligned + (ring->phys_addr - ring->dma_handle); 1435 + ring->rp = ring->base; 1436 + ring->wp = ring->base; 1437 + ring->len = len; 1438 + ring->el_size = el_size; 1439 + ring->elements = ring->len / ring->el_size; 1440 + memset(ring->base, 0, ring->len); 1441 + ring->configured = true; 1442 + 1443 + /* update to other cores */ 1444 + smp_wmb(); 1445 + 1446 + dev_dbg(gpii->gpi_dev->dev, 1447 + "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n", 1448 + ring->dma_handle, ring->phys_addr, ring->len, 1449 + ring->el_size, ring->elements); 1450 + 1451 + return 0; 1452 + } 1453 + 1454 + /* copy tre into transfer ring */ 1455 + static void gpi_queue_xfer(struct gpii *gpii, struct gchan *gchan, 1456 + struct gpi_tre *gpi_tre, void **wp) 1457 + { 1458 + struct gpi_tre *ch_tre; 1459 + int ret; 1460 + 1461 + /* get next tre location we can copy */ 1462 + ret = gpi_ring_add_element(&gchan->ch_ring, (void **)&ch_tre); 1463 + if (unlikely(ret)) { 1464 + dev_err(gpii->gpi_dev->dev, "Error adding ring element to xfer ring\n"); 1465 + return; 1466 + } 1467 + 1468 + /* copy the tre info */ 1469 + memcpy(ch_tre, gpi_tre, sizeof(*ch_tre)); 1470 + *wp = ch_tre; 1471 + } 1472 + 1473 + /* reset and restart transfer channel */ 1474 + static int gpi_terminate_all(struct dma_chan *chan) 1475 + { 1476 + struct gchan *gchan = to_gchan(chan); 1477 + struct gpii *gpii = gchan->gpii; 1478 + int schid, echid, i; 1479 + int ret = 0; 1480 + 1481 + mutex_lock(&gpii->ctrl_lock); 1482 + 1483 + /* 1484 + * treat both channels as a group if its protocol is not UART 1485 + * STOP, RESET, or START needs to be in lockstep 1486 + */ 1487 + schid = (gchan->protocol == QCOM_GPI_UART) ? gchan->chid : 0; 1488 + echid = (gchan->protocol == QCOM_GPI_UART) ? schid + 1 : MAX_CHANNELS_PER_GPII; 1489 + 1490 + /* stop the channel */ 1491 + for (i = schid; i < echid; i++) { 1492 + gchan = &gpii->gchan[i]; 1493 + 1494 + /* disable ch state so no more TRE processing */ 1495 + write_lock_irq(&gpii->pm_lock); 1496 + gchan->pm_state = PREPARE_TERMINATE; 1497 + write_unlock_irq(&gpii->pm_lock); 1498 + 1499 + /* send command to Stop the channel */ 1500 + ret = gpi_stop_chan(gchan); 1501 + } 1502 + 1503 + /* reset the channels (clears any pending tre) */ 1504 + for (i = schid; i < echid; i++) { 1505 + gchan = &gpii->gchan[i]; 1506 + 1507 + ret = gpi_reset_chan(gchan, GPI_CH_CMD_RESET); 1508 + if (ret) { 1509 + dev_err(gpii->gpi_dev->dev, "Error resetting channel ret:%d\n", ret); 1510 + goto terminate_exit; 1511 + } 1512 + 1513 + /* reprogram channel CNTXT */ 1514 + ret = gpi_alloc_chan(gchan, false); 1515 + if (ret) { 1516 + dev_err(gpii->gpi_dev->dev, "Error alloc_channel ret:%d\n", ret); 1517 + goto terminate_exit; 1518 + } 1519 + } 1520 + 1521 + /* restart the channels */ 1522 + for (i = schid; i < echid; i++) { 1523 + gchan = &gpii->gchan[i]; 1524 + 1525 + ret = gpi_start_chan(gchan); 1526 + if (ret) { 1527 + dev_err(gpii->gpi_dev->dev, "Error Starting Channel ret:%d\n", ret); 1528 + goto terminate_exit; 1529 + } 1530 + } 1531 + 1532 + terminate_exit: 1533 + mutex_unlock(&gpii->ctrl_lock); 1534 + return ret; 1535 + } 1536 + 1537 + /* pause dma transfer for all channels */ 1538 + static int gpi_pause(struct dma_chan *chan) 1539 + { 1540 + struct gchan *gchan = to_gchan(chan); 1541 + struct gpii *gpii = gchan->gpii; 1542 + int i, ret; 1543 + 1544 + mutex_lock(&gpii->ctrl_lock); 1545 + 1546 + /* 1547 + * pause/resume are per gpii not per channel, so 1548 + * client needs to call pause only once 1549 + */ 1550 + if (gpii->pm_state == PAUSE_STATE) { 1551 + dev_dbg(gpii->gpi_dev->dev, "channel is already paused\n"); 1552 + mutex_unlock(&gpii->ctrl_lock); 1553 + return 0; 1554 + } 1555 + 1556 + /* send stop command to stop the channels */ 1557 + for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) { 1558 + ret = gpi_stop_chan(&gpii->gchan[i]); 1559 + if (ret) { 1560 + mutex_unlock(&gpii->ctrl_lock); 1561 + return ret; 1562 + } 1563 + } 1564 + 1565 + disable_irq(gpii->irq); 1566 + 1567 + /* Wait for threads to complete out */ 1568 + tasklet_kill(&gpii->ev_task); 1569 + 1570 + write_lock_irq(&gpii->pm_lock); 1571 + gpii->pm_state = PAUSE_STATE; 1572 + write_unlock_irq(&gpii->pm_lock); 1573 + mutex_unlock(&gpii->ctrl_lock); 1574 + 1575 + return 0; 1576 + } 1577 + 1578 + /* resume dma transfer */ 1579 + static int gpi_resume(struct dma_chan *chan) 1580 + { 1581 + struct gchan *gchan = to_gchan(chan); 1582 + struct gpii *gpii = gchan->gpii; 1583 + int i, ret; 1584 + 1585 + mutex_lock(&gpii->ctrl_lock); 1586 + if (gpii->pm_state == ACTIVE_STATE) { 1587 + dev_dbg(gpii->gpi_dev->dev, "channel is already active\n"); 1588 + mutex_unlock(&gpii->ctrl_lock); 1589 + return 0; 1590 + } 1591 + 1592 + enable_irq(gpii->irq); 1593 + 1594 + /* send start command to start the channels */ 1595 + for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) { 1596 + ret = gpi_send_cmd(gpii, &gpii->gchan[i], GPI_CH_CMD_START); 1597 + if (ret) { 1598 + dev_err(gpii->gpi_dev->dev, "Error starting chan, ret:%d\n", ret); 1599 + mutex_unlock(&gpii->ctrl_lock); 1600 + return ret; 1601 + } 1602 + } 1603 + 1604 + write_lock_irq(&gpii->pm_lock); 1605 + gpii->pm_state = ACTIVE_STATE; 1606 + write_unlock_irq(&gpii->pm_lock); 1607 + mutex_unlock(&gpii->ctrl_lock); 1608 + 1609 + return 0; 1610 + } 1611 + 1612 + static void gpi_desc_free(struct virt_dma_desc *vd) 1613 + { 1614 + struct gpi_desc *gpi_desc = to_gpi_desc(vd); 1615 + 1616 + kfree(gpi_desc); 1617 + gpi_desc = NULL; 1618 + } 1619 + 1620 + static int 1621 + gpi_peripheral_config(struct dma_chan *chan, struct dma_slave_config *config) 1622 + { 1623 + struct gchan *gchan = to_gchan(chan); 1624 + 1625 + if (!config->peripheral_config) 1626 + return -EINVAL; 1627 + 1628 + gchan->config = krealloc(gchan->config, config->peripheral_size, GFP_NOWAIT); 1629 + if (!gchan->config) 1630 + return -ENOMEM; 1631 + 1632 + memcpy(gchan->config, config->peripheral_config, config->peripheral_size); 1633 + 1634 + return 0; 1635 + } 1636 + 1637 + static int gpi_create_i2c_tre(struct gchan *chan, struct gpi_desc *desc, 1638 + struct scatterlist *sgl, enum dma_transfer_direction direction) 1639 + { 1640 + struct gpi_i2c_config *i2c = chan->config; 1641 + struct device *dev = chan->gpii->gpi_dev->dev; 1642 + unsigned int tre_idx = 0; 1643 + dma_addr_t address; 1644 + struct gpi_tre *tre; 1645 + unsigned int i; 1646 + 1647 + /* first create config tre if applicable */ 1648 + if (i2c->set_config) { 1649 + tre = &desc->tre[tre_idx]; 1650 + tre_idx++; 1651 + 1652 + tre->dword[0] = u32_encode_bits(i2c->low_count, TRE_I2C_C0_TLOW); 1653 + tre->dword[0] |= u32_encode_bits(i2c->high_count, TRE_I2C_C0_THIGH); 1654 + tre->dword[0] |= u32_encode_bits(i2c->cycle_count, TRE_I2C_C0_TCYL); 1655 + tre->dword[0] |= u32_encode_bits(i2c->pack_enable, TRE_I2C_C0_TX_PACK); 1656 + tre->dword[0] |= u32_encode_bits(i2c->pack_enable, TRE_I2C_C0_RX_PACK); 1657 + 1658 + tre->dword[1] = 0; 1659 + 1660 + tre->dword[2] = u32_encode_bits(i2c->clk_div, TRE_C0_CLK_DIV); 1661 + 1662 + tre->dword[3] = u32_encode_bits(TRE_TYPE_CONFIG0, TRE_FLAGS_TYPE); 1663 + tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN); 1664 + } 1665 + 1666 + /* create the GO tre for Tx */ 1667 + if (i2c->op == I2C_WRITE) { 1668 + tre = &desc->tre[tre_idx]; 1669 + tre_idx++; 1670 + 1671 + if (i2c->multi_msg) 1672 + tre->dword[0] = u32_encode_bits(I2C_READ, TRE_I2C_GO_CMD); 1673 + else 1674 + tre->dword[0] = u32_encode_bits(i2c->op, TRE_I2C_GO_CMD); 1675 + 1676 + tre->dword[0] |= u32_encode_bits(i2c->addr, TRE_I2C_GO_ADDR); 1677 + tre->dword[0] |= u32_encode_bits(i2c->stretch, TRE_I2C_GO_STRETCH); 1678 + 1679 + tre->dword[1] = 0; 1680 + tre->dword[2] = u32_encode_bits(i2c->rx_len, TRE_RX_LEN); 1681 + 1682 + tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE); 1683 + 1684 + if (i2c->multi_msg) 1685 + tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK); 1686 + else 1687 + tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN); 1688 + } 1689 + 1690 + if (i2c->op == I2C_READ || i2c->multi_msg == false) { 1691 + /* create the DMA TRE */ 1692 + tre = &desc->tre[tre_idx]; 1693 + tre_idx++; 1694 + 1695 + address = sg_dma_address(sgl); 1696 + tre->dword[0] = lower_32_bits(address); 1697 + tre->dword[1] = upper_32_bits(address); 1698 + 1699 + tre->dword[2] = u32_encode_bits(sg_dma_len(sgl), TRE_DMA_LEN); 1700 + 1701 + tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE); 1702 + tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT); 1703 + }; 1704 + 1705 + for (i = 0; i < tre_idx; i++) 1706 + dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n", i, desc->tre[i].dword[0], 1707 + desc->tre[i].dword[1], desc->tre[i].dword[2], desc->tre[i].dword[3]); 1708 + 1709 + return tre_idx; 1710 + } 1711 + 1712 + static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc, 1713 + struct scatterlist *sgl, enum dma_transfer_direction direction) 1714 + { 1715 + struct gpi_spi_config *spi = chan->config; 1716 + struct device *dev = chan->gpii->gpi_dev->dev; 1717 + unsigned int tre_idx = 0; 1718 + dma_addr_t address; 1719 + struct gpi_tre *tre; 1720 + unsigned int i; 1721 + 1722 + /* first create config tre if applicable */ 1723 + if (direction == DMA_MEM_TO_DEV && spi->set_config) { 1724 + tre = &desc->tre[tre_idx]; 1725 + tre_idx++; 1726 + 1727 + tre->dword[0] = u32_encode_bits(spi->word_len, TRE_SPI_C0_WORD_SZ); 1728 + tre->dword[0] |= u32_encode_bits(spi->loopback_en, TRE_SPI_C0_LOOPBACK); 1729 + tre->dword[0] |= u32_encode_bits(spi->clock_pol_high, TRE_SPI_C0_CPOL); 1730 + tre->dword[0] |= u32_encode_bits(spi->data_pol_high, TRE_SPI_C0_CPHA); 1731 + tre->dword[0] |= u32_encode_bits(spi->pack_en, TRE_SPI_C0_TX_PACK); 1732 + tre->dword[0] |= u32_encode_bits(spi->pack_en, TRE_SPI_C0_RX_PACK); 1733 + 1734 + tre->dword[1] = 0; 1735 + 1736 + tre->dword[2] = u32_encode_bits(spi->clk_div, TRE_C0_CLK_DIV); 1737 + tre->dword[2] |= u32_encode_bits(spi->clk_src, TRE_C0_CLK_SRC); 1738 + 1739 + tre->dword[3] = u32_encode_bits(TRE_TYPE_CONFIG0, TRE_FLAGS_TYPE); 1740 + tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN); 1741 + } 1742 + 1743 + /* create the GO tre for Tx */ 1744 + if (direction == DMA_MEM_TO_DEV) { 1745 + tre = &desc->tre[tre_idx]; 1746 + tre_idx++; 1747 + 1748 + tre->dword[0] = u32_encode_bits(spi->fragmentation, TRE_SPI_GO_FRAG); 1749 + tre->dword[0] |= u32_encode_bits(spi->cs, TRE_SPI_GO_CS); 1750 + tre->dword[0] |= u32_encode_bits(spi->cmd, TRE_SPI_GO_CMD); 1751 + 1752 + tre->dword[1] = 0; 1753 + 1754 + tre->dword[2] = u32_encode_bits(spi->rx_len, TRE_RX_LEN); 1755 + 1756 + tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE); 1757 + if (spi->cmd == SPI_RX) 1758 + tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB); 1759 + else 1760 + tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN); 1761 + } 1762 + 1763 + /* create the dma tre */ 1764 + tre = &desc->tre[tre_idx]; 1765 + tre_idx++; 1766 + 1767 + address = sg_dma_address(sgl); 1768 + tre->dword[0] = lower_32_bits(address); 1769 + tre->dword[1] = upper_32_bits(address); 1770 + 1771 + tre->dword[2] = u32_encode_bits(sg_dma_len(sgl), TRE_DMA_LEN); 1772 + 1773 + tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE); 1774 + if (direction == DMA_MEM_TO_DEV) 1775 + tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT); 1776 + 1777 + for (i = 0; i < tre_idx; i++) 1778 + dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n", i, desc->tre[i].dword[0], 1779 + desc->tre[i].dword[1], desc->tre[i].dword[2], desc->tre[i].dword[3]); 1780 + 1781 + return tre_idx; 1782 + } 1783 + 1784 + /* copy tre into transfer ring */ 1785 + static struct dma_async_tx_descriptor * 1786 + gpi_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 1787 + unsigned int sg_len, enum dma_transfer_direction direction, 1788 + unsigned long flags, void *context) 1789 + { 1790 + struct gchan *gchan = to_gchan(chan); 1791 + struct gpii *gpii = gchan->gpii; 1792 + struct device *dev = gpii->gpi_dev->dev; 1793 + struct gpi_ring *ch_ring = &gchan->ch_ring; 1794 + struct gpi_desc *gpi_desc; 1795 + u32 nr, nr_tre = 0; 1796 + u8 set_config; 1797 + int i; 1798 + 1799 + gpii->ieob_set = false; 1800 + if (!is_slave_direction(direction)) { 1801 + dev_err(gpii->gpi_dev->dev, "invalid dma direction: %d\n", direction); 1802 + return NULL; 1803 + } 1804 + 1805 + if (sg_len > 1) { 1806 + dev_err(dev, "Multi sg sent, we support only one atm: %d\n", sg_len); 1807 + return NULL; 1808 + } 1809 + 1810 + nr_tre = 3; 1811 + set_config = *(u32 *)gchan->config; 1812 + if (!set_config) 1813 + nr_tre = 2; 1814 + if (direction == DMA_DEV_TO_MEM) /* rx */ 1815 + nr_tre = 1; 1816 + 1817 + /* calculate # of elements required & available */ 1818 + nr = gpi_ring_num_elements_avail(ch_ring); 1819 + if (nr < nr_tre) { 1820 + dev_err(dev, "not enough space in ring, avail:%u required:%u\n", nr, nr_tre); 1821 + return NULL; 1822 + } 1823 + 1824 + gpi_desc = kzalloc(sizeof(*gpi_desc), GFP_NOWAIT); 1825 + if (!gpi_desc) 1826 + return NULL; 1827 + 1828 + /* create TREs for xfer */ 1829 + if (gchan->protocol == QCOM_GPI_SPI) { 1830 + i = gpi_create_spi_tre(gchan, gpi_desc, sgl, direction); 1831 + } else if (gchan->protocol == QCOM_GPI_I2C) { 1832 + i = gpi_create_i2c_tre(gchan, gpi_desc, sgl, direction); 1833 + } else { 1834 + dev_err(dev, "invalid peripheral: %d\n", gchan->protocol); 1835 + kfree(gpi_desc); 1836 + return NULL; 1837 + } 1838 + 1839 + /* set up the descriptor */ 1840 + gpi_desc->gchan = gchan; 1841 + gpi_desc->len = sg_dma_len(sgl); 1842 + gpi_desc->num_tre = i; 1843 + 1844 + return vchan_tx_prep(&gchan->vc, &gpi_desc->vd, flags); 1845 + } 1846 + 1847 + /* rings transfer ring db to being transfer */ 1848 + static void gpi_issue_pending(struct dma_chan *chan) 1849 + { 1850 + struct gchan *gchan = to_gchan(chan); 1851 + struct gpii *gpii = gchan->gpii; 1852 + unsigned long flags, pm_lock_flags; 1853 + struct virt_dma_desc *vd = NULL; 1854 + struct gpi_desc *gpi_desc; 1855 + struct gpi_ring *ch_ring = &gchan->ch_ring; 1856 + void *tre, *wp = NULL; 1857 + int i; 1858 + 1859 + read_lock_irqsave(&gpii->pm_lock, pm_lock_flags); 1860 + 1861 + /* move all submitted discriptors to issued list */ 1862 + spin_lock_irqsave(&gchan->vc.lock, flags); 1863 + if (vchan_issue_pending(&gchan->vc)) 1864 + vd = list_last_entry(&gchan->vc.desc_issued, 1865 + struct virt_dma_desc, node); 1866 + spin_unlock_irqrestore(&gchan->vc.lock, flags); 1867 + 1868 + /* nothing to do list is empty */ 1869 + if (!vd) { 1870 + read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags); 1871 + return; 1872 + } 1873 + 1874 + gpi_desc = to_gpi_desc(vd); 1875 + for (i = 0; i < gpi_desc->num_tre; i++) { 1876 + tre = &gpi_desc->tre[i]; 1877 + gpi_queue_xfer(gpii, gchan, tre, &wp); 1878 + } 1879 + 1880 + gpi_desc->db = ch_ring->wp; 1881 + gpi_write_ch_db(gchan, &gchan->ch_ring, gpi_desc->db); 1882 + read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags); 1883 + } 1884 + 1885 + static int gpi_ch_init(struct gchan *gchan) 1886 + { 1887 + struct gpii *gpii = gchan->gpii; 1888 + const int ev_factor = gpii->gpi_dev->ev_factor; 1889 + u32 elements; 1890 + int i = 0, ret = 0; 1891 + 1892 + gchan->pm_state = CONFIG_STATE; 1893 + 1894 + /* check if both channels are configured before continue */ 1895 + for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) 1896 + if (gpii->gchan[i].pm_state != CONFIG_STATE) 1897 + goto exit_gpi_init; 1898 + 1899 + /* protocol must be same for both channels */ 1900 + if (gpii->gchan[0].protocol != gpii->gchan[1].protocol) { 1901 + dev_err(gpii->gpi_dev->dev, "protocol did not match protocol %u != %u\n", 1902 + gpii->gchan[0].protocol, gpii->gchan[1].protocol); 1903 + ret = -EINVAL; 1904 + goto exit_gpi_init; 1905 + } 1906 + 1907 + /* allocate memory for event ring */ 1908 + elements = CHAN_TRES << ev_factor; 1909 + ret = gpi_alloc_ring(&gpii->ev_ring, elements, 1910 + sizeof(union gpi_event), gpii); 1911 + if (ret) 1912 + goto exit_gpi_init; 1913 + 1914 + /* configure interrupts */ 1915 + write_lock_irq(&gpii->pm_lock); 1916 + gpii->pm_state = PREPARE_HARDWARE; 1917 + write_unlock_irq(&gpii->pm_lock); 1918 + ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0); 1919 + if (ret) { 1920 + dev_err(gpii->gpi_dev->dev, "error config. interrupts, ret:%d\n", ret); 1921 + goto error_config_int; 1922 + } 1923 + 1924 + /* allocate event rings */ 1925 + ret = gpi_alloc_ev_chan(gpii); 1926 + if (ret) { 1927 + dev_err(gpii->gpi_dev->dev, "error alloc_ev_chan:%d\n", ret); 1928 + goto error_alloc_ev_ring; 1929 + } 1930 + 1931 + /* Allocate all channels */ 1932 + for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) { 1933 + ret = gpi_alloc_chan(&gpii->gchan[i], true); 1934 + if (ret) { 1935 + dev_err(gpii->gpi_dev->dev, "Error allocating chan:%d\n", ret); 1936 + goto error_alloc_chan; 1937 + } 1938 + } 1939 + 1940 + /* start channels */ 1941 + for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) { 1942 + ret = gpi_start_chan(&gpii->gchan[i]); 1943 + if (ret) { 1944 + dev_err(gpii->gpi_dev->dev, "Error start chan:%d\n", ret); 1945 + goto error_start_chan; 1946 + } 1947 + } 1948 + return ret; 1949 + 1950 + error_start_chan: 1951 + for (i = i - 1; i >= 0; i++) { 1952 + gpi_stop_chan(&gpii->gchan[i]); 1953 + gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET); 1954 + } 1955 + i = 2; 1956 + error_alloc_chan: 1957 + for (i = i - 1; i >= 0; i--) 1958 + gpi_reset_chan(gchan, GPI_CH_CMD_DE_ALLOC); 1959 + error_alloc_ev_ring: 1960 + gpi_disable_interrupts(gpii); 1961 + error_config_int: 1962 + gpi_free_ring(&gpii->ev_ring, gpii); 1963 + exit_gpi_init: 1964 + mutex_unlock(&gpii->ctrl_lock); 1965 + return ret; 1966 + } 1967 + 1968 + /* release all channel resources */ 1969 + static void gpi_free_chan_resources(struct dma_chan *chan) 1970 + { 1971 + struct gchan *gchan = to_gchan(chan); 1972 + struct gpii *gpii = gchan->gpii; 1973 + enum gpi_pm_state cur_state; 1974 + int ret, i; 1975 + 1976 + mutex_lock(&gpii->ctrl_lock); 1977 + 1978 + cur_state = gchan->pm_state; 1979 + 1980 + /* disable ch state so no more TRE processing for this channel */ 1981 + write_lock_irq(&gpii->pm_lock); 1982 + gchan->pm_state = PREPARE_TERMINATE; 1983 + write_unlock_irq(&gpii->pm_lock); 1984 + 1985 + /* attempt to do graceful hardware shutdown */ 1986 + if (cur_state == ACTIVE_STATE) { 1987 + gpi_stop_chan(gchan); 1988 + 1989 + ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET); 1990 + if (ret) 1991 + dev_err(gpii->gpi_dev->dev, "error resetting channel:%d\n", ret); 1992 + 1993 + gpi_reset_chan(gchan, GPI_CH_CMD_DE_ALLOC); 1994 + } 1995 + 1996 + /* free all allocated memory */ 1997 + gpi_free_ring(&gchan->ch_ring, gpii); 1998 + vchan_free_chan_resources(&gchan->vc); 1999 + kfree(gchan->config); 2000 + 2001 + write_lock_irq(&gpii->pm_lock); 2002 + gchan->pm_state = DISABLE_STATE; 2003 + write_unlock_irq(&gpii->pm_lock); 2004 + 2005 + /* if other rings are still active exit */ 2006 + for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) 2007 + if (gpii->gchan[i].ch_ring.configured) 2008 + goto exit_free; 2009 + 2010 + /* deallocate EV Ring */ 2011 + cur_state = gpii->pm_state; 2012 + write_lock_irq(&gpii->pm_lock); 2013 + gpii->pm_state = PREPARE_TERMINATE; 2014 + write_unlock_irq(&gpii->pm_lock); 2015 + 2016 + /* wait for threads to complete out */ 2017 + tasklet_kill(&gpii->ev_task); 2018 + 2019 + /* send command to de allocate event ring */ 2020 + if (cur_state == ACTIVE_STATE) 2021 + gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC); 2022 + 2023 + gpi_free_ring(&gpii->ev_ring, gpii); 2024 + 2025 + /* disable interrupts */ 2026 + if (cur_state == ACTIVE_STATE) 2027 + gpi_disable_interrupts(gpii); 2028 + 2029 + /* set final state to disable */ 2030 + write_lock_irq(&gpii->pm_lock); 2031 + gpii->pm_state = DISABLE_STATE; 2032 + write_unlock_irq(&gpii->pm_lock); 2033 + 2034 + exit_free: 2035 + mutex_unlock(&gpii->ctrl_lock); 2036 + } 2037 + 2038 + /* allocate channel resources */ 2039 + static int gpi_alloc_chan_resources(struct dma_chan *chan) 2040 + { 2041 + struct gchan *gchan = to_gchan(chan); 2042 + struct gpii *gpii = gchan->gpii; 2043 + int ret; 2044 + 2045 + mutex_lock(&gpii->ctrl_lock); 2046 + 2047 + /* allocate memory for transfer ring */ 2048 + ret = gpi_alloc_ring(&gchan->ch_ring, CHAN_TRES, 2049 + sizeof(struct gpi_tre), gpii); 2050 + if (ret) 2051 + goto xfer_alloc_err; 2052 + 2053 + ret = gpi_ch_init(gchan); 2054 + 2055 + mutex_unlock(&gpii->ctrl_lock); 2056 + 2057 + return ret; 2058 + xfer_alloc_err: 2059 + mutex_unlock(&gpii->ctrl_lock); 2060 + 2061 + return ret; 2062 + } 2063 + 2064 + static int gpi_find_avail_gpii(struct gpi_dev *gpi_dev, u32 seid) 2065 + { 2066 + struct gchan *tx_chan, *rx_chan; 2067 + unsigned int gpii; 2068 + 2069 + /* check if same seid is already configured for another chid */ 2070 + for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) { 2071 + if (!((1 << gpii) & gpi_dev->gpii_mask)) 2072 + continue; 2073 + 2074 + tx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_TX_CHAN]; 2075 + rx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_RX_CHAN]; 2076 + 2077 + if (rx_chan->vc.chan.client_count && rx_chan->seid == seid) 2078 + return gpii; 2079 + if (tx_chan->vc.chan.client_count && tx_chan->seid == seid) 2080 + return gpii; 2081 + } 2082 + 2083 + /* no channels configured with same seid, return next avail gpii */ 2084 + for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) { 2085 + if (!((1 << gpii) & gpi_dev->gpii_mask)) 2086 + continue; 2087 + 2088 + tx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_TX_CHAN]; 2089 + rx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_RX_CHAN]; 2090 + 2091 + /* check if gpii is configured */ 2092 + if (tx_chan->vc.chan.client_count || 2093 + rx_chan->vc.chan.client_count) 2094 + continue; 2095 + 2096 + /* found a free gpii */ 2097 + return gpii; 2098 + } 2099 + 2100 + /* no gpii instance available to use */ 2101 + return -EIO; 2102 + } 2103 + 2104 + /* gpi_of_dma_xlate: open client requested channel */ 2105 + static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args, 2106 + struct of_dma *of_dma) 2107 + { 2108 + struct gpi_dev *gpi_dev = (struct gpi_dev *)of_dma->of_dma_data; 2109 + u32 seid, chid; 2110 + int gpii; 2111 + struct gchan *gchan; 2112 + 2113 + if (args->args_count < 3) { 2114 + dev_err(gpi_dev->dev, "gpii require minimum 2 args, client passed:%d args\n", 2115 + args->args_count); 2116 + return NULL; 2117 + } 2118 + 2119 + chid = args->args[0]; 2120 + if (chid >= MAX_CHANNELS_PER_GPII) { 2121 + dev_err(gpi_dev->dev, "gpii channel:%d not valid\n", chid); 2122 + return NULL; 2123 + } 2124 + 2125 + seid = args->args[1]; 2126 + 2127 + /* find next available gpii to use */ 2128 + gpii = gpi_find_avail_gpii(gpi_dev, seid); 2129 + if (gpii < 0) { 2130 + dev_err(gpi_dev->dev, "no available gpii instances\n"); 2131 + return NULL; 2132 + } 2133 + 2134 + gchan = &gpi_dev->gpiis[gpii].gchan[chid]; 2135 + if (gchan->vc.chan.client_count) { 2136 + dev_err(gpi_dev->dev, "gpii:%d chid:%d seid:%d already configured\n", 2137 + gpii, chid, gchan->seid); 2138 + return NULL; 2139 + } 2140 + 2141 + gchan->seid = seid; 2142 + gchan->protocol = args->args[2]; 2143 + 2144 + return dma_get_slave_channel(&gchan->vc.chan); 2145 + } 2146 + 2147 + static int gpi_probe(struct platform_device *pdev) 2148 + { 2149 + struct gpi_dev *gpi_dev; 2150 + unsigned int i; 2151 + int ret; 2152 + 2153 + gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL); 2154 + if (!gpi_dev) 2155 + return -ENOMEM; 2156 + 2157 + gpi_dev->dev = &pdev->dev; 2158 + gpi_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2159 + gpi_dev->regs = devm_ioremap_resource(gpi_dev->dev, gpi_dev->res); 2160 + if (IS_ERR(gpi_dev->regs)) 2161 + return PTR_ERR(gpi_dev->regs); 2162 + gpi_dev->ee_base = gpi_dev->regs; 2163 + 2164 + ret = of_property_read_u32(gpi_dev->dev->of_node, "dma-channels", 2165 + &gpi_dev->max_gpii); 2166 + if (ret) { 2167 + dev_err(gpi_dev->dev, "missing 'max-no-gpii' DT node\n"); 2168 + return ret; 2169 + } 2170 + 2171 + ret = of_property_read_u32(gpi_dev->dev->of_node, "dma-channel-mask", 2172 + &gpi_dev->gpii_mask); 2173 + if (ret) { 2174 + dev_err(gpi_dev->dev, "missing 'gpii-mask' DT node\n"); 2175 + return ret; 2176 + } 2177 + 2178 + gpi_dev->ev_factor = EV_FACTOR; 2179 + 2180 + ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64)); 2181 + if (ret) { 2182 + dev_err(gpi_dev->dev, "Error setting dma_mask to 64, ret:%d\n", ret); 2183 + return ret; 2184 + } 2185 + 2186 + gpi_dev->gpiis = devm_kzalloc(gpi_dev->dev, sizeof(*gpi_dev->gpiis) * 2187 + gpi_dev->max_gpii, GFP_KERNEL); 2188 + if (!gpi_dev->gpiis) 2189 + return -ENOMEM; 2190 + 2191 + /* setup all the supported gpii */ 2192 + INIT_LIST_HEAD(&gpi_dev->dma_device.channels); 2193 + for (i = 0; i < gpi_dev->max_gpii; i++) { 2194 + struct gpii *gpii = &gpi_dev->gpiis[i]; 2195 + int chan; 2196 + 2197 + if (!((1 << i) & gpi_dev->gpii_mask)) 2198 + continue; 2199 + 2200 + /* set up ev cntxt register map */ 2201 + gpii->ev_cntxt_base_reg = gpi_dev->ee_base + GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0); 2202 + gpii->ev_cntxt_db_reg = gpi_dev->ee_base + GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0); 2203 + gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg + CNTXT_4_RING_RP_LSB; 2204 + gpii->ev_cmd_reg = gpi_dev->ee_base + GPII_n_EV_CH_CMD_OFFS(i); 2205 + gpii->ieob_clr_reg = gpi_dev->ee_base + GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i); 2206 + 2207 + /* set up irq */ 2208 + ret = platform_get_irq(pdev, i); 2209 + if (ret < 0) { 2210 + dev_err(gpi_dev->dev, "platform_get_irq failed for %d:%d\n", i, ret); 2211 + return ret; 2212 + } 2213 + gpii->irq = ret; 2214 + 2215 + /* set up channel specific register info */ 2216 + for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) { 2217 + struct gchan *gchan = &gpii->gchan[chan]; 2218 + 2219 + /* set up ch cntxt register map */ 2220 + gchan->ch_cntxt_base_reg = gpi_dev->ee_base + 2221 + GPII_n_CH_k_CNTXT_0_OFFS(i, chan); 2222 + gchan->ch_cntxt_db_reg = gpi_dev->ee_base + 2223 + GPII_n_CH_k_DOORBELL_0_OFFS(i, chan); 2224 + gchan->ch_cmd_reg = gpi_dev->ee_base + GPII_n_CH_CMD_OFFS(i); 2225 + 2226 + /* vchan setup */ 2227 + vchan_init(&gchan->vc, &gpi_dev->dma_device); 2228 + gchan->vc.desc_free = gpi_desc_free; 2229 + gchan->chid = chan; 2230 + gchan->gpii = gpii; 2231 + gchan->dir = GPII_CHAN_DIR[chan]; 2232 + } 2233 + mutex_init(&gpii->ctrl_lock); 2234 + rwlock_init(&gpii->pm_lock); 2235 + tasklet_init(&gpii->ev_task, gpi_ev_tasklet, 2236 + (unsigned long)gpii); 2237 + init_completion(&gpii->cmd_completion); 2238 + gpii->gpii_id = i; 2239 + gpii->regs = gpi_dev->ee_base; 2240 + gpii->gpi_dev = gpi_dev; 2241 + } 2242 + 2243 + platform_set_drvdata(pdev, gpi_dev); 2244 + 2245 + /* clear and Set capabilities */ 2246 + dma_cap_zero(gpi_dev->dma_device.cap_mask); 2247 + dma_cap_set(DMA_SLAVE, gpi_dev->dma_device.cap_mask); 2248 + 2249 + /* configure dmaengine apis */ 2250 + gpi_dev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 2251 + gpi_dev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 2252 + gpi_dev->dma_device.src_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES; 2253 + gpi_dev->dma_device.dst_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES; 2254 + gpi_dev->dma_device.device_alloc_chan_resources = gpi_alloc_chan_resources; 2255 + gpi_dev->dma_device.device_free_chan_resources = gpi_free_chan_resources; 2256 + gpi_dev->dma_device.device_tx_status = dma_cookie_status; 2257 + gpi_dev->dma_device.device_issue_pending = gpi_issue_pending; 2258 + gpi_dev->dma_device.device_prep_slave_sg = gpi_prep_slave_sg; 2259 + gpi_dev->dma_device.device_config = gpi_peripheral_config; 2260 + gpi_dev->dma_device.device_terminate_all = gpi_terminate_all; 2261 + gpi_dev->dma_device.dev = gpi_dev->dev; 2262 + gpi_dev->dma_device.device_pause = gpi_pause; 2263 + gpi_dev->dma_device.device_resume = gpi_resume; 2264 + 2265 + /* register with dmaengine framework */ 2266 + ret = dma_async_device_register(&gpi_dev->dma_device); 2267 + if (ret) { 2268 + dev_err(gpi_dev->dev, "async_device_register failed ret:%d", ret); 2269 + return ret; 2270 + } 2271 + 2272 + ret = of_dma_controller_register(gpi_dev->dev->of_node, 2273 + gpi_of_dma_xlate, gpi_dev); 2274 + if (ret) { 2275 + dev_err(gpi_dev->dev, "of_dma_controller_reg failed ret:%d", ret); 2276 + return ret; 2277 + } 2278 + 2279 + return ret; 2280 + } 2281 + 2282 + static const struct of_device_id gpi_of_match[] = { 2283 + { .compatible = "qcom,sdm845-gpi-dma" }, 2284 + { }, 2285 + }; 2286 + MODULE_DEVICE_TABLE(of, gpi_of_match); 2287 + 2288 + static struct platform_driver gpi_driver = { 2289 + .probe = gpi_probe, 2290 + .driver = { 2291 + .name = KBUILD_MODNAME, 2292 + .of_match_table = gpi_of_match, 2293 + }, 2294 + }; 2295 + 2296 + static int __init gpi_init(void) 2297 + { 2298 + return platform_driver_register(&gpi_driver); 2299 + } 2300 + subsys_initcall(gpi_init) 2301 + 2302 + MODULE_DESCRIPTION("QCOM GPI DMA engine driver"); 2303 + MODULE_LICENSE("GPL v2");
+83
include/linux/dma/qcom-gpi-dma.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2020, Linaro Limited 4 + */ 5 + 6 + #ifndef QCOM_GPI_DMA_H 7 + #define QCOM_GPI_DMA_H 8 + 9 + /** 10 + * enum spi_transfer_cmd - spi transfer commands 11 + */ 12 + enum spi_transfer_cmd { 13 + SPI_TX = 1, 14 + SPI_RX, 15 + SPI_DUPLEX, 16 + }; 17 + 18 + /** 19 + * struct gpi_spi_config - spi config for peripheral 20 + * 21 + * @loopback_en: spi loopback enable when set 22 + * @clock_pol_high: clock polarity 23 + * @data_pol_high: data polarity 24 + * @pack_en: process tx/rx buffers as packed 25 + * @word_len: spi word length 26 + * @clk_div: source clock divider 27 + * @clk_src: serial clock 28 + * @cmd: spi cmd 29 + * @fragmentation: keep CS assserted at end of sequence 30 + * @cs: chip select toggle 31 + * @set_config: set peripheral config 32 + * @rx_len: receive length for buffer 33 + */ 34 + struct gpi_spi_config { 35 + u8 set_config; 36 + u8 loopback_en; 37 + u8 clock_pol_high; 38 + u8 data_pol_high; 39 + u8 pack_en; 40 + u8 word_len; 41 + u8 fragmentation; 42 + u8 cs; 43 + u32 clk_div; 44 + u32 clk_src; 45 + enum spi_transfer_cmd cmd; 46 + u32 rx_len; 47 + }; 48 + 49 + enum i2c_op { 50 + I2C_WRITE = 1, 51 + I2C_READ, 52 + }; 53 + 54 + /** 55 + * struct gpi_i2c_config - i2c config for peripheral 56 + * 57 + * @pack_enable: process tx/rx buffers as packed 58 + * @cycle_count: clock cycles to be sent 59 + * @high_count: high period of clock 60 + * @low_count: low period of clock 61 + * @clk_div: source clock divider 62 + * @addr: i2c bus address 63 + * @stretch: stretch the clock at eot 64 + * @set_config: set peripheral config 65 + * @rx_len: receive length for buffer 66 + * @op: i2c cmd 67 + * @muli-msg: is part of multi i2c r-w msgs 68 + */ 69 + struct gpi_i2c_config { 70 + u8 set_config; 71 + u8 pack_enable; 72 + u8 cycle_count; 73 + u8 high_count; 74 + u8 low_count; 75 + u8 addr; 76 + u8 stretch; 77 + u16 clk_div; 78 + u32 rx_len; 79 + enum i2c_op op; 80 + bool multi_msg; 81 + }; 82 + 83 + #endif /* QCOM_GPI_DMA_H */