Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

spi: Rework DMA mapped flag

Merge series from Andy Shevchenko <andriy.shevchenko@linux.intel.com>:

The first part of the series (patches 1 to 7) is an introduction
of a new helper followed by the user conversion.

This consolidates the same code and also makes patch 8 (last one)
be localised to the SPI core part.

The last patch is the main rework to get rid of a recently introduced
hack with a dummy SG list and move to the transfer-based DMA mapped
flag.

That said, the patches 1 to 7 may be applied right away since they
have no functional change intended, while the last one needs more
testing and reviewing.

+59 -69
+8
drivers/spi/internals.h
··· 40 40 } 41 41 #endif /* CONFIG_HAS_DMA */ 42 42 43 + static inline bool spi_xfer_is_dma_mapped(struct spi_controller *ctlr, 44 + struct spi_device *spi, 45 + struct spi_transfer *xfer) 46 + { 47 + return ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer) && 48 + (xfer->tx_sg_mapped || xfer->rx_sg_mapped); 49 + } 50 + 43 51 #endif /* __LINUX_SPI_INTERNALS_H */
+2 -2
drivers/spi/spi-dw-core.c
··· 19 19 #include <linux/string.h> 20 20 #include <linux/of.h> 21 21 22 + #include "internals.h" 22 23 #include "spi-dw.h" 23 24 24 25 #ifdef CONFIG_DEBUG_FS ··· 439 438 transfer->effective_speed_hz = dws->current_freq; 440 439 441 440 /* Check if current transfer is a DMA transaction */ 442 - if (host->can_dma && host->can_dma(host, spi, transfer)) 443 - dws->dma_mapped = host->cur_msg_mapped; 441 + dws->dma_mapped = spi_xfer_is_dma_mapped(host, spi, transfer); 444 442 445 443 /* For poll mode just disable all interrupts */ 446 444 dw_spi_mask_intr(dws, 0xff);
+2 -2
drivers/spi/spi-ingenic.c
··· 16 16 #include <linux/platform_device.h> 17 17 #include <linux/regmap.h> 18 18 #include <linux/spi/spi.h> 19 + #include "internals.h" 19 20 20 21 #define REG_SSIDR 0x0 21 22 #define REG_SSICR0 0x4 ··· 243 242 { 244 243 struct ingenic_spi *priv = spi_controller_get_devdata(ctlr); 245 244 unsigned int bits = xfer->bits_per_word ?: spi->bits_per_word; 246 - bool can_dma = ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer); 247 245 248 246 spi_ingenic_prepare_transfer(priv, spi, xfer); 249 247 250 - if (ctlr->cur_msg_mapped && can_dma) 248 + if (spi_xfer_is_dma_mapped(ctlr, spi, xfer)) 251 249 return spi_ingenic_dma_tx(ctlr, xfer, bits); 252 250 253 251 if (bits > 16)
+4 -4
drivers/spi/spi-omap2-mcspi.c
··· 27 27 28 28 #include <linux/spi/spi.h> 29 29 30 + #include "internals.h" 31 + 30 32 #include <linux/platform_data/spi-omap2-mcspi.h> 31 33 32 34 #define OMAP2_MCSPI_MAX_FREQ 48000000 ··· 1210 1208 unsigned count; 1211 1209 1212 1210 if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && 1213 - ctlr->cur_msg_mapped && 1214 - ctlr->can_dma(ctlr, spi, t)) 1211 + spi_xfer_is_dma_mapped(ctlr, spi, t)) 1215 1212 omap2_mcspi_set_fifo(spi, t, 1); 1216 1213 1217 1214 omap2_mcspi_set_enable(spi, 1); ··· 1221 1220 + OMAP2_MCSPI_TX0); 1222 1221 1223 1222 if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && 1224 - ctlr->cur_msg_mapped && 1225 - ctlr->can_dma(ctlr, spi, t)) 1223 + spi_xfer_is_dma_mapped(ctlr, spi, t)) 1226 1224 count = omap2_mcspi_txrx_dma(spi, t); 1227 1225 else 1228 1226 count = omap2_mcspi_txrx_pio(spi, t);
+3 -2
drivers/spi/spi-pci1xxxx.c
··· 6 6 7 7 8 8 #include <linux/bitfield.h> 9 + #include <linux/delay.h> 9 10 #include <linux/dma-mapping.h> 10 11 #include <linux/iopoll.h> 11 12 #include <linux/irq.h> ··· 16 15 #include <linux/pci.h> 17 16 #include <linux/spinlock.h> 18 17 #include <linux/spi/spi.h> 19 - #include <linux/delay.h> 18 + #include "internals.h" 20 19 21 20 #define DRV_NAME "spi-pci1xxxx" 22 21 ··· 568 567 static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr, 569 568 struct spi_device *spi, struct spi_transfer *xfer) 570 569 { 571 - if (spi_ctlr->can_dma(spi_ctlr, spi, xfer) && spi_ctlr->cur_msg_mapped) 570 + if (spi_xfer_is_dma_mapped(spi_ctlr, spi, xfer)) 572 571 return pci1xxxx_spi_transfer_with_dma(spi_ctlr, spi, xfer); 573 572 else 574 573 return pci1xxxx_spi_transfer_with_io(spi_ctlr, spi, xfer);
+2 -4
drivers/spi/spi-pxa2xx.c
··· 26 26 27 27 #include <linux/spi/spi.h> 28 28 29 + #include "internals.h" 29 30 #include "spi-pxa2xx.h" 30 31 31 32 #define TIMOUT_DFLT 1000 ··· 994 993 } 995 994 996 995 dma_thresh = SSCR1_RxTresh(RX_THRESH_DFLT) | SSCR1_TxTresh(TX_THRESH_DFLT); 997 - dma_mapped = controller->can_dma && 998 - controller->can_dma(controller, spi, transfer) && 999 - controller->cur_msg_mapped; 996 + dma_mapped = spi_xfer_is_dma_mapped(controller, spi, transfer); 1000 997 if (dma_mapped) { 1001 - 1002 998 /* Ensure we have the correct interrupt handler */ 1003 999 drv_data->transfer_handler = pxa2xx_spi_dma_transfer; 1004 1000
+4 -5
drivers/spi/spi-qup.c
··· 5 5 6 6 #include <linux/clk.h> 7 7 #include <linux/delay.h> 8 + #include <linux/dma-mapping.h> 9 + #include <linux/dmaengine.h> 8 10 #include <linux/err.h> 9 11 #include <linux/interconnect.h> 10 12 #include <linux/interrupt.h> ··· 18 16 #include <linux/pm_opp.h> 19 17 #include <linux/pm_runtime.h> 20 18 #include <linux/spi/spi.h> 21 - #include <linux/dmaengine.h> 22 - #include <linux/dma-mapping.h> 19 + #include "internals.h" 23 20 24 21 #define QUP_CONFIG 0x0000 25 22 #define QUP_STATE 0x0004 ··· 710 709 711 710 if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32))) 712 711 controller->mode = QUP_IO_M_MODE_FIFO; 713 - else if (spi->controller->can_dma && 714 - spi->controller->can_dma(spi->controller, spi, xfer) && 715 - spi->controller->cur_msg_mapped) 712 + else if (spi_xfer_is_dma_mapped(spi->controller, spi, xfer)) 716 713 controller->mode = QUP_IO_M_MODE_BAM; 717 714 else 718 715 controller->mode = QUP_IO_M_MODE_BLOCK;
+27 -46
drivers/spi/spi.c
··· 1220 1220 spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0); 1221 1221 } 1222 1222 1223 - /* Dummy SG for unidirect transfers */ 1224 - static struct scatterlist dummy_sg = { 1225 - .page_link = SG_END, 1226 - }; 1227 - 1228 1223 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 1229 1224 { 1230 1225 struct device *tx_dev, *rx_dev; ··· 1258 1263 attrs); 1259 1264 if (ret != 0) 1260 1265 return ret; 1261 - } else { 1262 - xfer->tx_sg.sgl = &dummy_sg; 1266 + 1267 + xfer->tx_sg_mapped = true; 1263 1268 } 1264 1269 1265 1270 if (xfer->rx_buf != NULL) { ··· 1273 1278 1274 1279 return ret; 1275 1280 } 1276 - } else { 1277 - xfer->rx_sg.sgl = &dummy_sg; 1281 + 1282 + xfer->rx_sg_mapped = true; 1278 1283 } 1279 1284 } 1280 1285 /* No transfer has been mapped, bail out with success */ ··· 1283 1288 1284 1289 ctlr->cur_rx_dma_dev = rx_dev; 1285 1290 ctlr->cur_tx_dma_dev = tx_dev; 1286 - ctlr->cur_msg_mapped = true; 1287 1291 1288 1292 return 0; 1289 1293 } ··· 1293 1299 struct device *tx_dev = ctlr->cur_tx_dma_dev; 1294 1300 struct spi_transfer *xfer; 1295 1301 1296 - if (!ctlr->cur_msg_mapped || !ctlr->can_dma) 1297 - return 0; 1298 - 1299 1302 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1300 1303 /* The sync has already been done after each transfer. */ 1301 1304 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; 1302 1305 1303 - if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 1304 - continue; 1306 + if (xfer->rx_sg_mapped) 1307 + spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, 1308 + DMA_FROM_DEVICE, attrs); 1309 + xfer->rx_sg_mapped = false; 1305 1310 1306 - spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, 1307 - DMA_FROM_DEVICE, attrs); 1308 - spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, 1309 - DMA_TO_DEVICE, attrs); 1311 + if (xfer->tx_sg_mapped) 1312 + spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, 1313 + DMA_TO_DEVICE, attrs); 1314 + xfer->tx_sg_mapped = false; 1310 1315 } 1311 - 1312 - ctlr->cur_msg_mapped = false; 1313 1316 1314 1317 return 0; 1315 1318 } 1316 1319 1317 - static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct spi_message *msg, 1320 + static void spi_dma_sync_for_device(struct spi_controller *ctlr, 1318 1321 struct spi_transfer *xfer) 1319 1322 { 1320 1323 struct device *rx_dev = ctlr->cur_rx_dma_dev; 1321 1324 struct device *tx_dev = ctlr->cur_tx_dma_dev; 1322 1325 1323 - if (!ctlr->cur_msg_mapped) 1324 - return; 1325 - 1326 - if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 1327 - return; 1328 - 1329 - dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 1330 - dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 1326 + if (xfer->tx_sg_mapped) 1327 + dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 1328 + if (xfer->rx_sg_mapped) 1329 + dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 1331 1330 } 1332 1331 1333 - static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct spi_message *msg, 1332 + static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, 1334 1333 struct spi_transfer *xfer) 1335 1334 { 1336 1335 struct device *rx_dev = ctlr->cur_rx_dma_dev; 1337 1336 struct device *tx_dev = ctlr->cur_tx_dma_dev; 1338 1337 1339 - if (!ctlr->cur_msg_mapped) 1340 - return; 1341 - 1342 - if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 1343 - return; 1344 - 1345 - dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 1346 - dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 1338 + if (xfer->rx_sg_mapped) 1339 + dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 1340 + if (xfer->tx_sg_mapped) 1341 + dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 1347 1342 } 1348 1343 #else /* !CONFIG_HAS_DMA */ 1349 1344 static inline int __spi_map_msg(struct spi_controller *ctlr, ··· 1348 1365 } 1349 1366 1350 1367 static void spi_dma_sync_for_device(struct spi_controller *ctrl, 1351 - struct spi_message *msg, 1352 1368 struct spi_transfer *xfer) 1353 1369 { 1354 1370 } 1355 1371 1356 1372 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl, 1357 - struct spi_message *msg, 1358 1373 struct spi_transfer *xfer) 1359 1374 { 1360 1375 } ··· 1624 1643 reinit_completion(&ctlr->xfer_completion); 1625 1644 1626 1645 fallback_pio: 1627 - spi_dma_sync_for_device(ctlr, msg, xfer); 1646 + spi_dma_sync_for_device(ctlr, xfer); 1628 1647 ret = ctlr->transfer_one(ctlr, msg->spi, xfer); 1629 1648 if (ret < 0) { 1630 - spi_dma_sync_for_cpu(ctlr, msg, xfer); 1649 + spi_dma_sync_for_cpu(ctlr, xfer); 1631 1650 1632 - if (ctlr->cur_msg_mapped && 1633 - (xfer->error & SPI_TRANS_FAIL_NO_START)) { 1651 + if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) && 1652 + (xfer->error & SPI_TRANS_FAIL_NO_START)) { 1634 1653 __spi_unmap_msg(ctlr, msg); 1635 1654 ctlr->fallback = true; 1636 1655 xfer->error &= ~SPI_TRANS_FAIL_NO_START; ··· 1652 1671 msg->status = ret; 1653 1672 } 1654 1673 1655 - spi_dma_sync_for_cpu(ctlr, msg, xfer); 1674 + spi_dma_sync_for_cpu(ctlr, xfer); 1656 1675 } else { 1657 1676 if (xfer->len) 1658 1677 dev_err(&msg->spi->dev,
+7 -4
include/linux/spi/spi.h
··· 447 447 * @cur_msg_need_completion: Flag used internally to opportunistically skip 448 448 * the @cur_msg_completion. This flag is used to signal the context that 449 449 * is running spi_finalize_current_message() that it needs to complete() 450 - * @cur_msg_mapped: message has been mapped for DMA 451 450 * @fallback: fallback to PIO if DMA transfer return failure with 452 451 * SPI_TRANS_FAIL_NO_START. 453 452 * @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs. ··· 707 708 bool running; 708 709 bool rt; 709 710 bool auto_runtime_pm; 710 - bool cur_msg_mapped; 711 711 bool fallback; 712 712 bool last_cs_mode_high; 713 713 s8 last_cs[SPI_CS_CNT_MAX]; ··· 979 981 * transfer this transfer. Set to 0 if the SPI bus driver does 980 982 * not support it. 981 983 * @transfer_list: transfers are sequenced through @spi_message.transfers 984 + * @tx_sg_mapped: If true, the @tx_sg is mapped for DMA 985 + * @rx_sg_mapped: If true, the @rx_sg is mapped for DMA 982 986 * @tx_sg: Scatterlist for transmit, currently not for client use 983 987 * @rx_sg: Scatterlist for receive, currently not for client use 984 988 * @ptp_sts_word_pre: The word (subject to bits_per_word semantics) offset ··· 1077 1077 #define SPI_TRANS_FAIL_IO BIT(1) 1078 1078 u16 error; 1079 1079 1080 - dma_addr_t tx_dma; 1081 - dma_addr_t rx_dma; 1080 + bool tx_sg_mapped; 1081 + bool rx_sg_mapped; 1082 + 1082 1083 struct sg_table tx_sg; 1083 1084 struct sg_table rx_sg; 1085 + dma_addr_t tx_dma; 1086 + dma_addr_t rx_dma; 1084 1087 1085 1088 unsigned dummy_data:1; 1086 1089 unsigned cs_off:1;