Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Add few updates to the STM32 SPI driver

Merge series from Clément Le Goffic <clement.legoffic@foss.st.com>:

This series aims to improve the STM32 SPI driver in different areas.
It adds SPI_READY mode, fixes an issue raised by a kernel bot,
add the ability to use DMA-MDMA chaining for RX and deprecate an ST bindings
vendor property.

+325 -34
+1
Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
··· 115 115 maxItems: 4 116 116 117 117 st,spi-midi-ns: 118 + deprecated: true 118 119 description: | 119 120 Only for STM32H7, (Master Inter-Data Idleness) minimum time 120 121 delay in nanoseconds inserted between two consecutive data frames.
+46 -2
Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
··· 18 18 19 19 allOf: 20 20 - $ref: spi-controller.yaml# 21 + - if: 22 + properties: 23 + compatible: 24 + contains: 25 + const: st,stm32f4-spi 26 + 27 + then: 28 + properties: 29 + st,spi-midi-ns: false 30 + sram: false 31 + dmas: 32 + maxItems: 2 33 + dma-names: 34 + items: 35 + - const: rx 36 + - const: tx 37 + 38 + - if: 39 + properties: 40 + compatible: 41 + contains: 42 + const: st,stm32mp25-spi 43 + 44 + then: 45 + properties: 46 + sram: false 47 + dmas: 48 + maxItems: 2 49 + dma-names: 50 + items: 51 + - const: rx 52 + - const: tx 21 53 22 54 properties: 23 55 compatible: ··· 73 41 74 42 dmas: 75 43 description: | 76 - DMA specifiers for tx and rx dma. DMA fifo mode must be used. See 77 - the STM32 DMA controllers bindings Documentation/devicetree/bindings/dma/stm32/*.yaml. 44 + DMA specifiers for tx and rx channels. DMA fifo mode must be used. See 45 + the STM32 DMA bindings Documentation/devicetree/bindings/dma/stm32/st,*dma.yaml 46 + minItems: 2 78 47 items: 79 48 - description: rx DMA channel 80 49 - description: tx DMA channel 50 + - description: rxm2m MDMA channel 81 51 82 52 dma-names: 53 + minItems: 2 83 54 items: 84 55 - const: rx 85 56 - const: tx 57 + - const: rxm2m 58 + 59 + sram: 60 + $ref: /schemas/types.yaml#/definitions/phandle 61 + description: | 62 + Phandles to a reserved SRAM region which is used as temporary 63 + storage memory between DMA and MDMA engines. 64 + The region should be defined as child node of the AHB SRAM node 65 + as per the generic bindings in Documentation/devicetree/bindings/sram/sram.yaml 86 66 87 67 access-controllers: 88 68 minItems: 1
+278 -32
drivers/spi/spi-stm32.c
··· 9 9 #include <linux/debugfs.h> 10 10 #include <linux/clk.h> 11 11 #include <linux/delay.h> 12 + #include <linux/dma-mapping.h> 12 13 #include <linux/dmaengine.h> 14 + #include <linux/genalloc.h> 13 15 #include <linux/interrupt.h> 14 16 #include <linux/iopoll.h> 15 17 #include <linux/module.h> ··· 156 154 /* STM32H7_SPI_I2SCFGR bit fields */ 157 155 #define STM32H7_SPI_I2SCFGR_I2SMOD BIT(0) 158 156 157 + /* STM32MP25_SPICFG2 bit fields */ 158 + #define STM32MP25_SPI_CFG2_RDIOM BIT(13) 159 + 159 160 /* STM32MP25 SPI registers bit fields */ 160 161 #define STM32MP25_SPI_HWCFGR1 0x3F0 161 162 ··· 227 222 * @rx: SPI RX data register 228 223 * @tx: SPI TX data register 229 224 * @fullcfg: SPI full or limited feature set register 225 + * @rdy_en: SPI ready feature register 230 226 */ 231 227 struct stm32_spi_regspec { 232 228 const struct stm32_spi_reg en; ··· 241 235 const struct stm32_spi_reg rx; 242 236 const struct stm32_spi_reg tx; 243 237 const struct stm32_spi_reg fullcfg; 238 + const struct stm32_spi_reg rdy_en; 244 239 }; 245 240 246 241 struct stm32_spi; ··· 283 276 int (*config)(struct stm32_spi *spi); 284 277 void (*set_bpw)(struct stm32_spi *spi); 285 278 int (*set_mode)(struct stm32_spi *spi, unsigned int comm_type); 286 - void (*set_data_idleness)(struct stm32_spi *spi, u32 length); 279 + void (*set_data_idleness)(struct stm32_spi *spi, struct spi_transfer *xfer); 287 280 int (*set_number_of_data)(struct stm32_spi *spi, u32 length); 288 281 void (*write_tx)(struct stm32_spi *spi); 289 282 void (*read_rx)(struct stm32_spi *spi); ··· 330 323 * @dma_rx: dma channel for RX transfer 331 324 * @phys_addr: SPI registers physical base address 332 325 * @device_mode: the controller is configured as SPI device 326 + * @sram_pool: SRAM pool for DMA transfers 327 + * @sram_rx_buf_size: size of SRAM buffer for RX transfer 328 + * @sram_rx_buf: SRAM buffer for RX transfer 329 + * @sram_dma_rx_buf: SRAM buffer physical address for RX transfer 330 + * @mdma_rx: MDMA channel for RX transfer 333 331 */ 334 332 struct stm32_spi { 335 333 struct device *dev; ··· 369 357 dma_addr_t phys_addr; 370 358 371 359 bool device_mode; 360 + 361 + struct gen_pool *sram_pool; 362 + size_t sram_rx_buf_size; 363 + void *sram_rx_buf; 364 + dma_addr_t sram_dma_rx_buf; 365 + struct dma_chan *mdma_rx; 372 366 }; 373 367 374 368 static const struct stm32_spi_regspec stm32fx_spi_regspec = { ··· 433 415 .tx = { STM32H7_SPI_TXDR }, 434 416 435 417 .fullcfg = { STM32MP25_SPI_HWCFGR1, STM32MP25_SPI_HWCFGR1_FULLCFG }, 418 + 419 + .rdy_en = { STM32H7_SPI_CFG2, STM32MP25_SPI_CFG2_RDIOM }, 436 420 }; 437 421 438 422 static inline void stm32_spi_set_bits(struct stm32_spi *spi, ··· 898 878 899 879 if (spi->cur_usedma && spi->dma_tx) 900 880 dmaengine_terminate_async(spi->dma_tx); 901 - if (spi->cur_usedma && spi->dma_rx) 881 + if (spi->cur_usedma && spi->dma_rx) { 902 882 dmaengine_terminate_async(spi->dma_rx); 883 + if (spi->mdma_rx) 884 + dmaengine_terminate_async(spi->mdma_rx); 885 + } 903 886 904 887 stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE); 905 888 ··· 1114 1091 } 1115 1092 1116 1093 if (sr & STM32H7_SPI_SR_EOT) { 1094 + dev_dbg(spi->dev, "End of transfer\n"); 1117 1095 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) 1118 1096 stm32h7_spi_read_rxfifo(spi); 1119 1097 if (!spi->cur_usedma || 1120 - (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX)) 1098 + (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) || 1099 + (spi->mdma_rx && (spi->cur_comm == SPI_SIMPLEX_RX || 1100 + spi->cur_comm == SPI_FULL_DUPLEX))) 1121 1101 end = true; 1122 1102 } 1123 1103 ··· 1137 1111 spin_unlock_irqrestore(&spi->lock, flags); 1138 1112 1139 1113 if (end) { 1114 + if (spi->cur_usedma && spi->mdma_rx) { 1115 + dmaengine_pause(spi->dma_rx); 1116 + /* Wait for callback */ 1117 + return IRQ_HANDLED; 1118 + } 1140 1119 stm32h7_spi_disable(spi); 1141 1120 spi_finalize_current_transfer(ctrl); 1142 1121 } ··· 1203 1172 else 1204 1173 clrb |= spi->cfg->regs->cs_high.mask; 1205 1174 1206 - dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n", 1175 + if (spi_dev->mode & SPI_READY) 1176 + setb |= spi->cfg->regs->rdy_en.mask; 1177 + else 1178 + clrb |= spi->cfg->regs->rdy_en.mask; 1179 + 1180 + dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d rdy=%d\n", 1207 1181 !!(spi_dev->mode & SPI_CPOL), 1208 1182 !!(spi_dev->mode & SPI_CPHA), 1209 1183 !!(spi_dev->mode & SPI_LSB_FIRST), 1210 - !!(spi_dev->mode & SPI_CS_HIGH)); 1184 + !!(spi_dev->mode & SPI_CS_HIGH), 1185 + !!(spi_dev->mode & SPI_READY)); 1211 1186 1212 1187 spin_lock_irqsave(&spi->lock, flags); 1213 1188 1214 - /* CPOL, CPHA and LSB FIRST bits have common register */ 1189 + /* CPOL, CPHA, LSB FIRST, CS_HIGH and RDY_EN bits have common register */ 1215 1190 if (clrb || setb) 1216 1191 writel_relaxed( 1217 1192 (readl_relaxed(spi->base + spi->cfg->regs->cpol.reg) & ··· 1447 1410 /* Enable the interrupts */ 1448 1411 if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) 1449 1412 ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE; 1413 + if (spi->mdma_rx && (spi->cur_comm == SPI_SIMPLEX_RX || spi->cur_comm == SPI_FULL_DUPLEX)) 1414 + ier |= STM32H7_SPI_IER_EOTIE; 1450 1415 1451 1416 stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier); 1452 1417 ··· 1456 1417 1457 1418 if (STM32_SPI_HOST_MODE(spi)) 1458 1419 stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART); 1420 + } 1421 + 1422 + /** 1423 + * stm32_spi_prepare_rx_dma_mdma_chaining - Prepare RX DMA and MDMA chaining 1424 + * @spi: pointer to the spi controller data structure 1425 + * @xfer: pointer to the spi transfer 1426 + * @rx_dma_conf: pointer to the DMA configuration for RX channel 1427 + * @rx_dma_desc: pointer to the RX DMA descriptor 1428 + * @rx_mdma_desc: pointer to the RX MDMA descriptor 1429 + * 1430 + * It must return 0 if the chaining is possible or an error code if not. 1431 + */ 1432 + static int stm32_spi_prepare_rx_dma_mdma_chaining(struct stm32_spi *spi, 1433 + struct spi_transfer *xfer, 1434 + struct dma_slave_config *rx_dma_conf, 1435 + struct dma_async_tx_descriptor **rx_dma_desc, 1436 + struct dma_async_tx_descriptor **rx_mdma_desc) 1437 + { 1438 + struct dma_slave_config rx_mdma_conf = {0}; 1439 + u32 sram_period, nents = 0, spi_s_len; 1440 + struct sg_table dma_sgt, mdma_sgt; 1441 + struct scatterlist *spi_s, *s; 1442 + dma_addr_t dma_buf; 1443 + int i, ret; 1444 + 1445 + sram_period = spi->sram_rx_buf_size / 2; 1446 + 1447 + /* Configure MDMA RX channel */ 1448 + rx_mdma_conf.direction = rx_dma_conf->direction; 1449 + rx_mdma_conf.src_addr = spi->sram_dma_rx_buf; 1450 + rx_mdma_conf.peripheral_config = rx_dma_conf->peripheral_config; 1451 + rx_mdma_conf.peripheral_size = rx_dma_conf->peripheral_size; 1452 + dmaengine_slave_config(spi->mdma_rx, &rx_mdma_conf); 1453 + 1454 + /* Count the number of entries needed */ 1455 + for_each_sg(xfer->rx_sg.sgl, spi_s, xfer->rx_sg.nents, i) 1456 + if (sg_dma_len(spi_s) > sram_period) 1457 + nents += DIV_ROUND_UP(sg_dma_len(spi_s), sram_period); 1458 + else 1459 + nents++; 1460 + 1461 + /* Prepare DMA slave_sg DBM transfer DEV_TO_MEM (RX>MEM=SRAM) */ 1462 + ret = sg_alloc_table(&dma_sgt, nents, GFP_ATOMIC); 1463 + if (ret) 1464 + return ret; 1465 + 1466 + spi_s = xfer->rx_sg.sgl; 1467 + spi_s_len = sg_dma_len(spi_s); 1468 + dma_buf = spi->sram_dma_rx_buf; 1469 + for_each_sg(dma_sgt.sgl, s, dma_sgt.nents, i) { 1470 + size_t bytes = min_t(size_t, spi_s_len, sram_period); 1471 + 1472 + sg_dma_len(s) = bytes; 1473 + sg_dma_address(s) = dma_buf; 1474 + spi_s_len -= bytes; 1475 + 1476 + if (!spi_s_len && sg_next(spi_s)) { 1477 + spi_s = sg_next(spi_s); 1478 + spi_s_len = sg_dma_len(spi_s); 1479 + dma_buf = spi->sram_dma_rx_buf; 1480 + } else { /* DMA configured in DBM: it will swap between the SRAM periods */ 1481 + if (i & 1) 1482 + dma_buf += sram_period; 1483 + else 1484 + dma_buf = spi->sram_dma_rx_buf; 1485 + } 1486 + } 1487 + 1488 + *rx_dma_desc = dmaengine_prep_slave_sg(spi->dma_rx, dma_sgt.sgl, 1489 + dma_sgt.nents, rx_dma_conf->direction, 1490 + DMA_PREP_INTERRUPT); 1491 + sg_free_table(&dma_sgt); 1492 + 1493 + if (!rx_dma_desc) 1494 + return -EINVAL; 1495 + 1496 + /* Prepare MDMA slave_sg transfer MEM_TO_MEM (SRAM>DDR) */ 1497 + ret = sg_alloc_table(&mdma_sgt, nents, GFP_ATOMIC); 1498 + if (ret) { 1499 + rx_dma_desc = NULL; 1500 + return ret; 1501 + } 1502 + 1503 + spi_s = xfer->rx_sg.sgl; 1504 + spi_s_len = sg_dma_len(spi_s); 1505 + dma_buf = sg_dma_address(spi_s); 1506 + for_each_sg(mdma_sgt.sgl, s, mdma_sgt.nents, i) { 1507 + size_t bytes = min_t(size_t, spi_s_len, sram_period); 1508 + 1509 + sg_dma_len(s) = bytes; 1510 + sg_dma_address(s) = dma_buf; 1511 + spi_s_len -= bytes; 1512 + 1513 + if (!spi_s_len && sg_next(spi_s)) { 1514 + spi_s = sg_next(spi_s); 1515 + spi_s_len = sg_dma_len(spi_s); 1516 + dma_buf = sg_dma_address(spi_s); 1517 + } else { 1518 + dma_buf += bytes; 1519 + } 1520 + } 1521 + 1522 + *rx_mdma_desc = dmaengine_prep_slave_sg(spi->mdma_rx, mdma_sgt.sgl, 1523 + mdma_sgt.nents, rx_mdma_conf.direction, 1524 + DMA_PREP_INTERRUPT); 1525 + sg_free_table(&mdma_sgt); 1526 + 1527 + if (!rx_mdma_desc) { 1528 + rx_dma_desc = NULL; 1529 + return -EINVAL; 1530 + } 1531 + 1532 + return 0; 1459 1533 } 1460 1534 1461 1535 /** ··· 1582 1430 static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, 1583 1431 struct spi_transfer *xfer) 1584 1432 { 1433 + struct dma_async_tx_descriptor *rx_mdma_desc = NULL, *rx_dma_desc = NULL; 1434 + struct dma_async_tx_descriptor *tx_dma_desc = NULL; 1585 1435 struct dma_slave_config tx_dma_conf, rx_dma_conf; 1586 - struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc; 1587 1436 unsigned long flags; 1437 + int ret = 0; 1588 1438 1589 1439 spin_lock_irqsave(&spi->lock, flags); 1590 1440 1591 - rx_dma_desc = NULL; 1592 1441 if (spi->rx_buf && spi->dma_rx) { 1593 1442 stm32_spi_dma_config(spi, spi->dma_rx, &rx_dma_conf, DMA_DEV_TO_MEM); 1594 - dmaengine_slave_config(spi->dma_rx, &rx_dma_conf); 1443 + if (spi->mdma_rx) { 1444 + rx_dma_conf.peripheral_size = 1; 1445 + dmaengine_slave_config(spi->dma_rx, &rx_dma_conf); 1595 1446 1596 - /* Enable Rx DMA request */ 1597 - stm32_spi_set_bits(spi, spi->cfg->regs->dma_rx_en.reg, 1598 - spi->cfg->regs->dma_rx_en.mask); 1599 - 1600 - rx_dma_desc = dmaengine_prep_slave_sg( 1601 - spi->dma_rx, xfer->rx_sg.sgl, 1602 - xfer->rx_sg.nents, 1603 - rx_dma_conf.direction, 1604 - DMA_PREP_INTERRUPT); 1447 + ret = stm32_spi_prepare_rx_dma_mdma_chaining(spi, xfer, &rx_dma_conf, 1448 + &rx_dma_desc, &rx_mdma_desc); 1449 + if (ret) { /* RX DMA MDMA chaining not possible, fallback to DMA only */ 1450 + rx_dma_conf.peripheral_config = 0; 1451 + rx_dma_desc = NULL; 1452 + } 1453 + } 1454 + if (!rx_dma_desc) { 1455 + dmaengine_slave_config(spi->dma_rx, &rx_dma_conf); 1456 + rx_dma_desc = dmaengine_prep_slave_sg(spi->dma_rx, xfer->rx_sg.sgl, 1457 + xfer->rx_sg.nents, 1458 + rx_dma_conf.direction, 1459 + DMA_PREP_INTERRUPT); 1460 + } 1605 1461 } 1606 1462 1607 - tx_dma_desc = NULL; 1608 1463 if (spi->tx_buf && spi->dma_tx) { 1609 1464 stm32_spi_dma_config(spi, spi->dma_tx, &tx_dma_conf, DMA_MEM_TO_DEV); 1610 1465 dmaengine_slave_config(spi->dma_tx, &tx_dma_conf); 1611 - 1612 - tx_dma_desc = dmaengine_prep_slave_sg( 1613 - spi->dma_tx, xfer->tx_sg.sgl, 1614 - xfer->tx_sg.nents, 1615 - tx_dma_conf.direction, 1616 - DMA_PREP_INTERRUPT); 1466 + tx_dma_desc = dmaengine_prep_slave_sg(spi->dma_tx, xfer->tx_sg.sgl, 1467 + xfer->tx_sg.nents, 1468 + tx_dma_conf.direction, 1469 + DMA_PREP_INTERRUPT); 1617 1470 } 1618 1471 1619 1472 if ((spi->tx_buf && spi->dma_tx && !tx_dma_desc) || ··· 1629 1472 goto dma_desc_error; 1630 1473 1631 1474 if (rx_dma_desc) { 1632 - rx_dma_desc->callback = spi->cfg->dma_rx_cb; 1633 - rx_dma_desc->callback_param = spi; 1475 + if (rx_mdma_desc) { 1476 + rx_mdma_desc->callback = spi->cfg->dma_rx_cb; 1477 + rx_mdma_desc->callback_param = spi; 1478 + } else { 1479 + rx_dma_desc->callback = spi->cfg->dma_rx_cb; 1480 + rx_dma_desc->callback_param = spi; 1481 + } 1634 1482 1483 + /* Enable Rx DMA request */ 1484 + stm32_spi_set_bits(spi, spi->cfg->regs->dma_rx_en.reg, 1485 + spi->cfg->regs->dma_rx_en.mask); 1486 + if (rx_mdma_desc) { 1487 + if (dma_submit_error(dmaengine_submit(rx_mdma_desc))) { 1488 + dev_err(spi->dev, "Rx MDMA submit failed\n"); 1489 + goto dma_desc_error; 1490 + } 1491 + /* Enable Rx MDMA channel */ 1492 + dma_async_issue_pending(spi->mdma_rx); 1493 + } 1635 1494 if (dma_submit_error(dmaengine_submit(rx_dma_desc))) { 1636 1495 dev_err(spi->dev, "Rx DMA submit failed\n"); 1637 1496 goto dma_desc_error; ··· 1682 1509 return 1; 1683 1510 1684 1511 dma_submit_error: 1512 + if (spi->mdma_rx) 1513 + dmaengine_terminate_sync(spi->mdma_rx); 1685 1514 if (spi->dma_rx) 1686 1515 dmaengine_terminate_sync(spi->dma_rx); 1687 1516 ··· 1694 1519 spin_unlock_irqrestore(&spi->lock, flags); 1695 1520 1696 1521 dev_info(spi->dev, "DMA issue: fall back to irq transfer\n"); 1522 + 1523 + if (spi->sram_rx_buf) 1524 + memset(spi->sram_rx_buf, 0, spi->sram_rx_buf_size); 1697 1525 1698 1526 spi->cur_usedma = false; 1699 1527 return spi->cfg->transfer_one_irq(spi); ··· 1880 1702 * stm32h7_spi_data_idleness - configure minimum time delay inserted between two 1881 1703 * consecutive data frames in host mode 1882 1704 * @spi: pointer to the spi controller data structure 1883 - * @len: transfer len 1705 + * @xfer: pointer to spi transfer 1884 1706 */ 1885 - static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len) 1707 + static void stm32h7_spi_data_idleness(struct stm32_spi *spi, struct spi_transfer *xfer) 1886 1708 { 1887 1709 u32 cfg2_clrb = 0, cfg2_setb = 0; 1710 + u32 len = xfer->len; 1711 + u32 spi_delay_ns; 1712 + 1713 + spi_delay_ns = spi_delay_to_ns(&xfer->word_delay, xfer); 1714 + 1715 + if (spi->cur_midi != 0) { 1716 + dev_warn(spi->dev, "st,spi-midi-ns DT property is deprecated\n"); 1717 + if (spi_delay_ns) { 1718 + dev_warn(spi->dev, "Overriding st,spi-midi-ns with word_delay_ns %d\n", 1719 + spi_delay_ns); 1720 + spi->cur_midi = spi_delay_ns; 1721 + } 1722 + } else { 1723 + spi->cur_midi = spi_delay_ns; 1724 + } 1888 1725 1889 1726 cfg2_clrb |= STM32H7_SPI_CFG2_MIDI; 1890 1727 if ((len > 1) && (spi->cur_midi > 0)) { ··· 1961 1768 spi->cur_bpw = transfer->bits_per_word; 1962 1769 spi->cfg->set_bpw(spi); 1963 1770 1771 + if (spi_dev->mode & SPI_READY && spi->cur_bpw < 8) { 1772 + writel_relaxed(readl_relaxed(spi->base + spi->cfg->regs->rdy_en.reg) & 1773 + ~spi->cfg->regs->rdy_en.mask, 1774 + spi->base + spi->cfg->regs->rdy_en.reg); 1775 + dev_dbg(spi->dev, "RDY logic disabled as bits per word < 8\n"); 1776 + } 1777 + 1964 1778 /* Update spi->cur_speed with real clock speed */ 1965 1779 if (STM32_SPI_HOST_MODE(spi)) { 1966 1780 mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz, ··· 1990 1790 spi->cur_comm = comm_type; 1991 1791 1992 1792 if (STM32_SPI_HOST_MODE(spi) && spi->cfg->set_data_idleness) 1993 - spi->cfg->set_data_idleness(spi, transfer->len); 1793 + spi->cfg->set_data_idleness(spi, transfer); 1994 1794 1995 1795 if (spi->cur_bpw <= 8) 1996 1796 nb_words = transfer->len; ··· 2070 1870 struct stm32_spi *spi = spi_controller_get_devdata(ctrl); 2071 1871 2072 1872 spi->cfg->disable(spi); 1873 + 1874 + if (spi->sram_rx_buf) 1875 + memset(spi->sram_rx_buf, 0, spi->sram_rx_buf_size); 2073 1876 2074 1877 return 0; 2075 1878 } ··· 2272 2069 struct resource *res; 2273 2070 struct reset_control *rst; 2274 2071 struct device_node *np = pdev->dev.of_node; 2072 + const struct stm32_spi_cfg *cfg; 2275 2073 bool device_mode; 2276 2074 int ret; 2277 - const struct stm32_spi_cfg *cfg = of_device_get_match_data(&pdev->dev); 2075 + 2076 + cfg = of_device_get_match_data(&pdev->dev); 2077 + if (!cfg) { 2078 + dev_err(&pdev->dev, "Failed to get match data for platform\n"); 2079 + return -ENODEV; 2080 + } 2278 2081 2279 2082 device_mode = of_property_read_bool(np, "spi-slave"); 2280 2083 if (!cfg->has_device_mode && device_mode) { ··· 2388 2179 ctrl->auto_runtime_pm = true; 2389 2180 ctrl->bus_num = pdev->id; 2390 2181 ctrl->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST | 2391 - SPI_3WIRE; 2182 + SPI_3WIRE | SPI_READY; 2392 2183 ctrl->bits_per_word_mask = spi->cfg->get_bpw_mask(spi); 2393 2184 ctrl->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min; 2394 2185 ctrl->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max; ··· 2428 2219 if (spi->dma_tx || spi->dma_rx) 2429 2220 ctrl->can_dma = stm32_spi_can_dma; 2430 2221 2222 + spi->sram_pool = of_gen_pool_get(pdev->dev.of_node, "sram", 0); 2223 + if (spi->sram_pool) { 2224 + spi->sram_rx_buf_size = gen_pool_size(spi->sram_pool); 2225 + dev_info(&pdev->dev, "SRAM pool: %zu KiB for RX DMA/MDMA chaining\n", 2226 + spi->sram_rx_buf_size / 1024); 2227 + spi->sram_rx_buf = gen_pool_dma_zalloc(spi->sram_pool, spi->sram_rx_buf_size, 2228 + &spi->sram_dma_rx_buf); 2229 + if (!spi->sram_rx_buf) { 2230 + dev_err(&pdev->dev, "failed to allocate SRAM buffer\n"); 2231 + } else { 2232 + spi->mdma_rx = dma_request_chan(spi->dev, "rxm2m"); 2233 + if (IS_ERR(spi->mdma_rx)) { 2234 + ret = PTR_ERR(spi->mdma_rx); 2235 + spi->mdma_rx = NULL; 2236 + if (ret == -EPROBE_DEFER) { 2237 + goto err_pool_free; 2238 + } else { 2239 + gen_pool_free(spi->sram_pool, 2240 + (unsigned long)spi->sram_rx_buf, 2241 + spi->sram_rx_buf_size); 2242 + dev_warn(&pdev->dev, 2243 + "failed to request rx mdma channel, DMA only\n"); 2244 + } 2245 + } 2246 + } 2247 + } 2248 + 2431 2249 pm_runtime_set_autosuspend_delay(&pdev->dev, 2432 2250 STM32_SPI_AUTOSUSPEND_DELAY); 2433 2251 pm_runtime_use_autosuspend(&pdev->dev); ··· 2482 2246 pm_runtime_put_noidle(&pdev->dev); 2483 2247 pm_runtime_set_suspended(&pdev->dev); 2484 2248 pm_runtime_dont_use_autosuspend(&pdev->dev); 2249 + 2250 + if (spi->mdma_rx) 2251 + dma_release_channel(spi->mdma_rx); 2252 + err_pool_free: 2253 + gen_pool_free(spi->sram_pool, (unsigned long)spi->sram_rx_buf, spi->sram_rx_buf_size); 2485 2254 err_dma_release: 2486 2255 if (spi->dma_tx) 2487 2256 dma_release_channel(spi->dma_tx); ··· 2517 2276 dma_release_channel(ctrl->dma_tx); 2518 2277 if (ctrl->dma_rx) 2519 2278 dma_release_channel(ctrl->dma_rx); 2279 + if (spi->mdma_rx) 2280 + dma_release_channel(spi->mdma_rx); 2281 + if (spi->sram_rx_buf) 2282 + gen_pool_free(spi->sram_pool, (unsigned long)spi->sram_rx_buf, 2283 + spi->sram_rx_buf_size); 2520 2284 2521 2285 clk_disable_unprepare(spi->clk); 2522 2286