Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: xilinx_dma: Add Xilinx AXI MCDMA Engine driver support

Add support for AXI Multichannel Direct Memory Access (AXI MCDMA)
core, which is a soft Xilinx IP core that provides high-bandwidth
direct memory access between memory and AXI4-Stream target peripherals.
The AXI MCDMA core provides scatter-gather interface with multiple
independent transmit and receive channels. The driver supports
device_prep_slave_sg slave transfer mode.

Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
Link: https://lore.kernel.org/r/1571763622-29281-7-git-send-email-radhey.shyam.pandey@xilinx.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Radhey Shyam Pandey and committed by
Vinod Koul
6ccd692b c2f6b67d

+455 -9
+4
drivers/dma/Kconfig
··· 655 655 destination address. 656 656 AXI DMA engine provides high-bandwidth one dimensional direct 657 657 memory access between memory and AXI4-Stream target peripherals. 658 + AXI MCDMA engine provides high-bandwidth direct memory access 659 + between memory and AXI4-Stream target peripherals. It provides 660 + the scatter gather interface with multiple channels independent 661 + configuration support. 658 662 659 663 config XILINX_ZYNQMP_DMA 660 664 tristate "Xilinx ZynqMP DMA Engine"
+451 -9
drivers/dma/xilinx/xilinx_dma.c
··· 25 25 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory 26 26 * Access (DMA) between a memory-mapped source address and a memory-mapped 27 27 * destination address. 28 + * 29 + * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft 30 + * Xilinx IP that provides high-bandwidth direct memory access between 31 + * memory and AXI4-Stream target peripherals. It provides scatter gather 32 + * (SG) interface with multiple channels independent configuration support. 33 + * 28 34 */ 29 35 30 36 #include <linux/bitops.h> ··· 122 116 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0) 123 117 124 118 /* HW specific definitions */ 125 - #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 119 + #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 126 120 127 121 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ 128 122 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ ··· 185 179 186 180 #define xilinx_prep_dma_addr_t(addr) \ 187 181 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr))) 182 + 183 + /* AXI MCDMA Specific Registers/Offsets */ 184 + #define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000 185 + #define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500 186 + #define XILINX_MCDMA_CHEN_OFFSET 0x0008 187 + #define XILINX_MCDMA_CH_ERR_OFFSET 0x0010 188 + #define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020 189 + #define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028 190 + #define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40) 191 + #define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40) 192 + #define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40) 193 + #define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40) 194 + 195 + /* AXI MCDMA Specific Masks/Shifts */ 196 + #define XILINX_MCDMA_COALESCE_SHIFT 16 197 + #define XILINX_MCDMA_COALESCE_MAX 24 198 + #define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5) 199 + #define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16) 200 + #define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0) 201 + #define XILINX_MCDMA_IRQ_IOC_MASK BIT(5) 202 + #define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6) 203 + #define XILINX_MCDMA_IRQ_ERR_MASK BIT(7) 204 + #define XILINX_MCDMA_BD_EOP BIT(30) 205 + #define XILINX_MCDMA_BD_SOP BIT(31) 206 + 188 207 /** 189 208 * struct xilinx_vdma_desc_hw - Hardware Descriptor 190 209 * @next_desc: Next Descriptor Pointer @0x00 ··· 256 225 } __aligned(64); 257 226 258 227 /** 228 + * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA 229 + * @next_desc: Next Descriptor Pointer @0x00 230 + * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 231 + * @buf_addr: Buffer address @0x08 232 + * @buf_addr_msb: MSB of Buffer address @0x0C 233 + * @rsvd: Reserved field @0x10 234 + * @control: Control Information field @0x14 235 + * @status: Status field @0x18 236 + * @sideband_status: Status of sideband signals @0x1C 237 + * @app: APP Fields @0x20 - 0x30 238 + */ 239 + struct xilinx_aximcdma_desc_hw { 240 + u32 next_desc; 241 + u32 next_desc_msb; 242 + u32 buf_addr; 243 + u32 buf_addr_msb; 244 + u32 rsvd; 245 + u32 control; 246 + u32 status; 247 + u32 sideband_status; 248 + u32 app[XILINX_DMA_NUM_APP_WORDS]; 249 + } __aligned(64); 250 + 251 + /** 259 252 * struct xilinx_cdma_desc_hw - Hardware Descriptor 260 253 * @next_desc: Next Descriptor Pointer @0x00 261 254 * @next_desc_msb: Next Descriptor Pointer MSB @0x04 ··· 321 266 */ 322 267 struct xilinx_axidma_tx_segment { 323 268 struct xilinx_axidma_desc_hw hw; 269 + struct list_head node; 270 + dma_addr_t phys; 271 + } __aligned(64); 272 + 273 + /** 274 + * struct xilinx_aximcdma_tx_segment - Descriptor segment 275 + * @hw: Hardware descriptor 276 + * @node: Node in the descriptor segments list 277 + * @phys: Physical address of segment 278 + */ 279 + struct xilinx_aximcdma_tx_segment { 280 + struct xilinx_aximcdma_desc_hw hw; 324 281 struct list_head node; 325 282 dma_addr_t phys; 326 283 } __aligned(64); ··· 396 329 * @ext_addr: Indicates 64 bit addressing is supported by dma channel 397 330 * @desc_submitcount: Descriptor h/w submitted count 398 331 * @seg_v: Statically allocated segments base 332 + * @seg_mv: Statically allocated segments base for MCDMA 399 333 * @seg_p: Physical allocated segments base 400 334 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers 401 335 * @cyclic_seg_p: Physical allocated segments base for cyclic dma 402 336 * @start_transfer: Differentiate b/w DMA IP's transfer 403 337 * @stop_transfer: Differentiate b/w DMA IP's quiesce 338 + * @tdest: TDEST value for mcdma 404 339 * @has_vflip: S2MM vertical flip 405 340 */ 406 341 struct xilinx_dma_chan { ··· 433 364 bool ext_addr; 434 365 u32 desc_submitcount; 435 366 struct xilinx_axidma_tx_segment *seg_v; 367 + struct xilinx_aximcdma_tx_segment *seg_mv; 436 368 dma_addr_t seg_p; 437 369 struct xilinx_axidma_tx_segment *cyclic_seg_v; 438 370 dma_addr_t cyclic_seg_p; 439 371 void (*start_transfer)(struct xilinx_dma_chan *chan); 440 372 int (*stop_transfer)(struct xilinx_dma_chan *chan); 373 + u16 tdest; 441 374 bool has_vflip; 442 375 }; 443 376 ··· 449 378 * @XDMA_TYPE_AXIDMA: Axi dma ip. 450 379 * @XDMA_TYPE_CDMA: Axi cdma ip. 451 380 * @XDMA_TYPE_VDMA: Axi vdma ip. 381 + * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip. 452 382 * 453 383 */ 454 384 enum xdma_ip_type { 455 385 XDMA_TYPE_AXIDMA = 0, 456 386 XDMA_TYPE_CDMA, 457 387 XDMA_TYPE_VDMA, 388 + XDMA_TYPE_AXIMCDMA 458 389 }; 459 390 460 391 struct xilinx_dma_config { ··· 485 412 * @nr_channels: Number of channels DMA device supports 486 413 * @chan_id: DMA channel identifier 487 414 * @max_buffer_len: Max buffer length 415 + * @s2mm_index: S2MM channel index 488 416 */ 489 417 struct xilinx_dma_device { 490 418 void __iomem *regs; ··· 504 430 u32 nr_channels; 505 431 u32 chan_id; 506 432 u32 max_buffer_len; 433 + u32 s2mm_index; 507 434 }; 508 435 509 436 /* Macros */ ··· 605 530 } 606 531 } 607 532 533 + static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan, 534 + struct xilinx_aximcdma_desc_hw *hw, 535 + dma_addr_t buf_addr, size_t sg_used) 536 + { 537 + if (chan->ext_addr) { 538 + hw->buf_addr = lower_32_bits(buf_addr + sg_used); 539 + hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used); 540 + } else { 541 + hw->buf_addr = buf_addr + sg_used; 542 + } 543 + } 544 + 608 545 /* ----------------------------------------------------------------------------- 609 546 * Descriptors and segments alloc and free 610 547 */ ··· 690 603 return segment; 691 604 } 692 605 606 + /** 607 + * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment 608 + * @chan: Driver specific DMA channel 609 + * 610 + * Return: The allocated segment on success and NULL on failure. 611 + */ 612 + static struct xilinx_aximcdma_tx_segment * 613 + xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 614 + { 615 + struct xilinx_aximcdma_tx_segment *segment = NULL; 616 + unsigned long flags; 617 + 618 + spin_lock_irqsave(&chan->lock, flags); 619 + if (!list_empty(&chan->free_seg_list)) { 620 + segment = list_first_entry(&chan->free_seg_list, 621 + struct xilinx_aximcdma_tx_segment, 622 + node); 623 + list_del(&segment->node); 624 + } 625 + spin_unlock_irqrestore(&chan->lock, flags); 626 + 627 + return segment; 628 + } 629 + 693 630 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw) 694 631 { 695 632 u32 next_desc = hw->next_desc; 696 633 u32 next_desc_msb = hw->next_desc_msb; 697 634 698 635 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw)); 636 + 637 + hw->next_desc = next_desc; 638 + hw->next_desc_msb = next_desc_msb; 639 + } 640 + 641 + static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw) 642 + { 643 + u32 next_desc = hw->next_desc; 644 + u32 next_desc_msb = hw->next_desc_msb; 645 + 646 + memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw)); 699 647 700 648 hw->next_desc = next_desc; 701 649 hw->next_desc_msb = next_desc_msb; ··· 745 623 struct xilinx_axidma_tx_segment *segment) 746 624 { 747 625 xilinx_dma_clean_hw_desc(&segment->hw); 626 + 627 + list_add_tail(&segment->node, &chan->free_seg_list); 628 + } 629 + 630 + /** 631 + * xilinx_mcdma_free_tx_segment - Free transaction segment 632 + * @chan: Driver specific DMA channel 633 + * @segment: DMA transaction segment 634 + */ 635 + static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan, 636 + struct xilinx_aximcdma_tx_segment * 637 + segment) 638 + { 639 + xilinx_mcdma_clean_hw_desc(&segment->hw); 748 640 749 641 list_add_tail(&segment->node, &chan->free_seg_list); 750 642 } ··· 817 681 struct xilinx_vdma_tx_segment *segment, *next; 818 682 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; 819 683 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; 684 + struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next; 820 685 821 686 if (!desc) 822 687 return; ··· 833 696 list_del(&cdma_segment->node); 834 697 xilinx_cdma_free_tx_segment(chan, cdma_segment); 835 698 } 836 - } else { 699 + } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 837 700 list_for_each_entry_safe(axidma_segment, axidma_next, 838 701 &desc->segments, node) { 839 702 list_del(&axidma_segment->node); 840 703 xilinx_dma_free_tx_segment(chan, axidma_segment); 704 + } 705 + } else { 706 + list_for_each_entry_safe(aximcdma_segment, aximcdma_next, 707 + &desc->segments, node) { 708 + list_del(&aximcdma_segment->node); 709 + xilinx_mcdma_free_tx_segment(chan, aximcdma_segment); 841 710 } 842 711 } 843 712 ··· 913 770 chan->cyclic_seg_v, chan->cyclic_seg_p); 914 771 } 915 772 916 - if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) { 773 + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { 774 + spin_lock_irqsave(&chan->lock, flags); 775 + INIT_LIST_HEAD(&chan->free_seg_list); 776 + spin_unlock_irqrestore(&chan->lock, flags); 777 + 778 + /* Free memory that is allocated for BD */ 779 + dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) * 780 + XILINX_DMA_NUM_DESCS, chan->seg_mv, 781 + chan->seg_p); 782 + } 783 + 784 + if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA && 785 + chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) { 917 786 dma_pool_destroy(chan->desc_pool); 918 787 chan->desc_pool = NULL; 919 788 } 789 + 920 790 } 921 791 922 792 /** ··· 1111 955 list_add_tail(&chan->seg_v[i].node, 1112 956 &chan->free_seg_list); 1113 957 } 958 + } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { 959 + /* Allocate the buffer descriptors. */ 960 + chan->seg_mv = dma_alloc_coherent(chan->dev, 961 + sizeof(*chan->seg_mv) * 962 + XILINX_DMA_NUM_DESCS, 963 + &chan->seg_p, GFP_KERNEL); 964 + if (!chan->seg_mv) { 965 + dev_err(chan->dev, 966 + "unable to allocate channel %d descriptors\n", 967 + chan->id); 968 + return -ENOMEM; 969 + } 970 + for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { 971 + chan->seg_mv[i].hw.next_desc = 972 + lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) * 973 + ((i + 1) % XILINX_DMA_NUM_DESCS)); 974 + chan->seg_mv[i].hw.next_desc_msb = 975 + upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) * 976 + ((i + 1) % XILINX_DMA_NUM_DESCS)); 977 + chan->seg_mv[i].phys = chan->seg_p + 978 + sizeof(*chan->seg_v) * i; 979 + list_add_tail(&chan->seg_mv[i].node, 980 + &chan->free_seg_list); 981 + } 1114 982 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 1115 983 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", 1116 984 chan->dev, ··· 1150 970 } 1151 971 1152 972 if (!chan->desc_pool && 1153 - (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) { 973 + ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) && 974 + chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) { 1154 975 dev_err(chan->dev, 1155 976 "unable to allocate channel %d descriptor pool\n", 1156 977 chan->id); ··· 1549 1368 } 1550 1369 1551 1370 /** 1371 + * xilinx_mcdma_start_transfer - Starts MCDMA transfer 1372 + * @chan: Driver specific channel struct pointer 1373 + */ 1374 + static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan) 1375 + { 1376 + struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 1377 + struct xilinx_axidma_tx_segment *tail_segment; 1378 + u32 reg; 1379 + 1380 + /* 1381 + * lock has been held by calling functions, so we don't need it 1382 + * to take it here again. 1383 + */ 1384 + 1385 + if (chan->err) 1386 + return; 1387 + 1388 + if (!chan->idle) 1389 + return; 1390 + 1391 + if (list_empty(&chan->pending_list)) 1392 + return; 1393 + 1394 + head_desc = list_first_entry(&chan->pending_list, 1395 + struct xilinx_dma_tx_descriptor, node); 1396 + tail_desc = list_last_entry(&chan->pending_list, 1397 + struct xilinx_dma_tx_descriptor, node); 1398 + tail_segment = list_last_entry(&tail_desc->segments, 1399 + struct xilinx_axidma_tx_segment, node); 1400 + 1401 + reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest)); 1402 + 1403 + if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) { 1404 + reg &= ~XILINX_MCDMA_COALESCE_MASK; 1405 + reg |= chan->desc_pendingcount << 1406 + XILINX_MCDMA_COALESCE_SHIFT; 1407 + } 1408 + 1409 + reg |= XILINX_MCDMA_IRQ_ALL_MASK; 1410 + dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg); 1411 + 1412 + /* Program current descriptor */ 1413 + xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest), 1414 + head_desc->async_tx.phys); 1415 + 1416 + /* Program channel enable register */ 1417 + reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET); 1418 + reg |= BIT(chan->tdest); 1419 + dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg); 1420 + 1421 + /* Start the fetch of BDs for the channel */ 1422 + reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest)); 1423 + reg |= XILINX_MCDMA_CR_RUNSTOP_MASK; 1424 + dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg); 1425 + 1426 + xilinx_dma_start(chan); 1427 + 1428 + if (chan->err) 1429 + return; 1430 + 1431 + /* Start the transfer */ 1432 + xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest), 1433 + tail_segment->phys); 1434 + 1435 + list_splice_tail_init(&chan->pending_list, &chan->active_list); 1436 + chan->desc_pendingcount = 0; 1437 + chan->idle = false; 1438 + } 1439 + 1440 + /** 1552 1441 * xilinx_dma_issue_pending - Issue pending transactions 1553 1442 * @dchan: DMA channel 1554 1443 */ ··· 1714 1463 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 1715 1464 1716 1465 return 0; 1466 + } 1467 + 1468 + /** 1469 + * xilinx_mcdma_irq_handler - MCDMA Interrupt handler 1470 + * @irq: IRQ number 1471 + * @data: Pointer to the Xilinx MCDMA channel structure 1472 + * 1473 + * Return: IRQ_HANDLED/IRQ_NONE 1474 + */ 1475 + static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data) 1476 + { 1477 + struct xilinx_dma_chan *chan = data; 1478 + u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id; 1479 + 1480 + if (chan->direction == DMA_DEV_TO_MEM) 1481 + ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET; 1482 + else 1483 + ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET; 1484 + 1485 + /* Read the channel id raising the interrupt*/ 1486 + chan_sermask = dma_ctrl_read(chan, ser_offset); 1487 + chan_id = ffs(chan_sermask); 1488 + 1489 + if (!chan_id) 1490 + return IRQ_NONE; 1491 + 1492 + if (chan->direction == DMA_DEV_TO_MEM) 1493 + chan_offset = chan->xdev->s2mm_index; 1494 + 1495 + chan_offset = chan_offset + (chan_id - 1); 1496 + chan = chan->xdev->chan[chan_offset]; 1497 + /* Read the status and ack the interrupts. */ 1498 + status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest)); 1499 + if (!(status & XILINX_MCDMA_IRQ_ALL_MASK)) 1500 + return IRQ_NONE; 1501 + 1502 + dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest), 1503 + status & XILINX_MCDMA_IRQ_ALL_MASK); 1504 + 1505 + if (status & XILINX_MCDMA_IRQ_ERR_MASK) { 1506 + dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n", 1507 + chan, 1508 + dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET), 1509 + dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET 1510 + (chan->tdest)), 1511 + dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET 1512 + (chan->tdest))); 1513 + chan->err = true; 1514 + } 1515 + 1516 + if (status & XILINX_MCDMA_IRQ_DELAY_MASK) { 1517 + /* 1518 + * Device takes too long to do the transfer when user requires 1519 + * responsiveness. 1520 + */ 1521 + dev_dbg(chan->dev, "Inter-packet latency too long\n"); 1522 + } 1523 + 1524 + if (status & XILINX_MCDMA_IRQ_IOC_MASK) { 1525 + spin_lock(&chan->lock); 1526 + xilinx_dma_complete_descriptor(chan); 1527 + chan->idle = true; 1528 + chan->start_transfer(chan); 1529 + spin_unlock(&chan->lock); 1530 + } 1531 + 1532 + tasklet_schedule(&chan->tasklet); 1533 + return IRQ_HANDLED; 1717 1534 } 1718 1535 1719 1536 /** ··· 2291 1972 } 2292 1973 2293 1974 /** 1975 + * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 1976 + * @dchan: DMA channel 1977 + * @sgl: scatterlist to transfer to/from 1978 + * @sg_len: number of entries in @scatterlist 1979 + * @direction: DMA direction 1980 + * @flags: transfer ack flags 1981 + * @context: APP words of the descriptor 1982 + * 1983 + * Return: Async transaction descriptor on success and NULL on failure 1984 + */ 1985 + static struct dma_async_tx_descriptor * 1986 + xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, 1987 + unsigned int sg_len, 1988 + enum dma_transfer_direction direction, 1989 + unsigned long flags, void *context) 1990 + { 1991 + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 1992 + struct xilinx_dma_tx_descriptor *desc; 1993 + struct xilinx_aximcdma_tx_segment *segment = NULL; 1994 + u32 *app_w = (u32 *)context; 1995 + struct scatterlist *sg; 1996 + size_t copy; 1997 + size_t sg_used; 1998 + unsigned int i; 1999 + 2000 + if (!is_slave_direction(direction)) 2001 + return NULL; 2002 + 2003 + /* Allocate a transaction descriptor. */ 2004 + desc = xilinx_dma_alloc_tx_descriptor(chan); 2005 + if (!desc) 2006 + return NULL; 2007 + 2008 + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 2009 + desc->async_tx.tx_submit = xilinx_dma_tx_submit; 2010 + 2011 + /* Build transactions using information in the scatter gather list */ 2012 + for_each_sg(sgl, sg, sg_len, i) { 2013 + sg_used = 0; 2014 + 2015 + /* Loop until the entire scatterlist entry is used */ 2016 + while (sg_used < sg_dma_len(sg)) { 2017 + struct xilinx_aximcdma_desc_hw *hw; 2018 + 2019 + /* Get a free segment */ 2020 + segment = xilinx_aximcdma_alloc_tx_segment(chan); 2021 + if (!segment) 2022 + goto error; 2023 + 2024 + /* 2025 + * Calculate the maximum number of bytes to transfer, 2026 + * making sure it is less than the hw limit 2027 + */ 2028 + copy = min_t(size_t, sg_dma_len(sg) - sg_used, 2029 + chan->xdev->max_buffer_len); 2030 + hw = &segment->hw; 2031 + 2032 + /* Fill in the descriptor */ 2033 + xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg), 2034 + sg_used); 2035 + hw->control = copy; 2036 + 2037 + if (chan->direction == DMA_MEM_TO_DEV && app_w) { 2038 + memcpy(hw->app, app_w, sizeof(u32) * 2039 + XILINX_DMA_NUM_APP_WORDS); 2040 + } 2041 + 2042 + sg_used += copy; 2043 + /* 2044 + * Insert the segment into the descriptor segments 2045 + * list. 2046 + */ 2047 + list_add_tail(&segment->node, &desc->segments); 2048 + } 2049 + } 2050 + 2051 + segment = list_first_entry(&desc->segments, 2052 + struct xilinx_aximcdma_tx_segment, node); 2053 + desc->async_tx.phys = segment->phys; 2054 + 2055 + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 2056 + if (chan->direction == DMA_MEM_TO_DEV) { 2057 + segment->hw.control |= XILINX_MCDMA_BD_SOP; 2058 + segment = list_last_entry(&desc->segments, 2059 + struct xilinx_aximcdma_tx_segment, 2060 + node); 2061 + segment->hw.control |= XILINX_MCDMA_BD_EOP; 2062 + } 2063 + 2064 + return &desc->async_tx; 2065 + 2066 + error: 2067 + xilinx_dma_free_tx_descriptor(chan, desc); 2068 + 2069 + return NULL; 2070 + } 2071 + 2072 + /** 2294 2073 * xilinx_dma_terminate_all - Halt the channel and free descriptors 2295 2074 * @dchan: Driver specific DMA Channel pointer 2296 2075 * ··· 2780 2363 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { 2781 2364 chan->direction = DMA_MEM_TO_DEV; 2782 2365 chan->id = chan_id; 2366 + chan->tdest = chan_id; 2783 2367 2784 2368 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; 2785 2369 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { ··· 2797 2379 "xlnx,axi-dma-s2mm-channel")) { 2798 2380 chan->direction = DMA_DEV_TO_MEM; 2799 2381 chan->id = chan_id; 2382 + xdev->s2mm_index = xdev->nr_channels; 2383 + chan->tdest = chan_id - xdev->nr_channels; 2800 2384 chan->has_vflip = of_property_read_bool(node, 2801 2385 "xlnx,enable-vert-flip"); 2802 2386 if (chan->has_vflip) { ··· 2807 2387 XILINX_VDMA_ENABLE_VERTICAL_FLIP; 2808 2388 } 2809 2389 2810 - chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; 2390 + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) 2391 + chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET; 2392 + else 2393 + chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; 2394 + 2811 2395 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2812 2396 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; 2813 2397 chan->config.park = 1; ··· 2826 2402 } 2827 2403 2828 2404 /* Request the interrupt */ 2829 - chan->irq = irq_of_parse_and_map(node, 0); 2405 + chan->irq = irq_of_parse_and_map(node, chan->tdest); 2830 2406 err = request_irq(chan->irq, xdev->dma_config->irq_handler, 2831 2407 IRQF_SHARED, "xilinx-dma-controller", chan); 2832 2408 if (err) { ··· 2836 2412 2837 2413 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2838 2414 chan->start_transfer = xilinx_dma_start_transfer; 2415 + chan->stop_transfer = xilinx_dma_stop_transfer; 2416 + } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { 2417 + chan->start_transfer = xilinx_mcdma_start_transfer; 2839 2418 chan->stop_transfer = xilinx_dma_stop_transfer; 2840 2419 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 2841 2420 chan->start_transfer = xilinx_cdma_start_transfer; ··· 2893 2466 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, 2894 2467 struct device_node *node) 2895 2468 { 2896 - int i, nr_channels = 1; 2469 + int ret, i, nr_channels = 1; 2470 + 2471 + ret = of_property_read_u32(node, "dma-channels", &nr_channels); 2472 + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0) 2473 + dev_warn(xdev->dev, "missing dma-channels property\n"); 2897 2474 2898 2475 for (i = 0; i < nr_channels; i++) 2899 2476 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); ··· 2932 2501 .irq_handler = xilinx_dma_irq_handler, 2933 2502 }; 2934 2503 2504 + static const struct xilinx_dma_config aximcdma_config = { 2505 + .dmatype = XDMA_TYPE_AXIMCDMA, 2506 + .clk_init = axidma_clk_init, 2507 + .irq_handler = xilinx_mcdma_irq_handler, 2508 + }; 2935 2509 static const struct xilinx_dma_config axicdma_config = { 2936 2510 .dmatype = XDMA_TYPE_CDMA, 2937 2511 .clk_init = axicdma_clk_init, ··· 2953 2517 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, 2954 2518 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, 2955 2519 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, 2520 + { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config }, 2956 2521 {} 2957 2522 }; 2958 2523 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); ··· 3004 2567 /* Retrieve the DMA engine properties from the device tree */ 3005 2568 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); 3006 2569 3007 - if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2570 + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA || 2571 + xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { 3008 2572 if (!of_property_read_u32(node, "xlnx,sg-length-width", 3009 2573 &len_width)) { 3010 2574 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || ··· 3078 2640 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; 3079 2641 /* Residue calculation is supported by only AXI DMA and CDMA */ 3080 2642 xdev->common.residue_granularity = 3081 - DMA_RESIDUE_GRANULARITY_SEGMENT; 2643 + DMA_RESIDUE_GRANULARITY_SEGMENT; 2644 + } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { 2645 + xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg; 3082 2646 } else { 3083 2647 xdev->common.device_prep_interleaved_dma = 3084 2648 xilinx_vdma_dma_prep_interleaved; ··· 3116 2676 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n"); 3117 2677 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) 3118 2678 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n"); 2679 + else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) 2680 + dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n"); 3119 2681 else 3120 2682 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); 3121 2683