Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: owl: Add Slave and Cyclic mode support for Actions Semi Owl S900 SoC

Add Slave and Cyclic mode support for Actions Semi Owl S900 SoC. The slave
mode supports bus width of 4 bytes common for all peripherals and 1 byte
specific for UART.

The cyclic mode supports only block mode transfer.

Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Manivannan Sadhasivam and committed by
Vinod Koul
d64e1b3f 5b394b2d

+272 -7
+272 -7
drivers/dma/owl-dma.c
··· 21 21 #include <linux/mm.h> 22 22 #include <linux/module.h> 23 23 #include <linux/of_device.h> 24 + #include <linux/of_dma.h> 24 25 #include <linux/slab.h> 25 26 #include "virt-dma.h" 26 27 ··· 166 165 struct owl_dma_txd { 167 166 struct virt_dma_desc vd; 168 167 struct list_head lli_list; 168 + bool cyclic; 169 169 }; 170 170 171 171 /** ··· 193 191 struct virt_dma_chan vc; 194 192 struct owl_dma_pchan *pchan; 195 193 struct owl_dma_txd *txd; 194 + struct dma_slave_config cfg; 195 + u8 drq; 196 196 }; 197 197 198 198 /** ··· 340 336 341 337 static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd, 342 338 struct owl_dma_lli *prev, 343 - struct owl_dma_lli *next) 339 + struct owl_dma_lli *next, 340 + bool is_cyclic) 344 341 { 345 - list_add_tail(&next->node, &txd->lli_list); 342 + if (!is_cyclic) 343 + list_add_tail(&next->node, &txd->lli_list); 346 344 347 345 if (prev) { 348 346 prev->hw.next_lli = next->phys; ··· 357 351 static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan, 358 352 struct owl_dma_lli *lli, 359 353 dma_addr_t src, dma_addr_t dst, 360 - u32 len, enum dma_transfer_direction dir) 354 + u32 len, enum dma_transfer_direction dir, 355 + struct dma_slave_config *sconfig, 356 + bool is_cyclic) 361 357 { 362 358 struct owl_dma_lli_hw *hw = &lli->hw; 363 359 u32 mode; ··· 371 363 mode |= OWL_DMA_MODE_TS(0) | OWL_DMA_MODE_ST_DCU | 372 364 OWL_DMA_MODE_DT_DCU | OWL_DMA_MODE_SAM_INC | 373 365 OWL_DMA_MODE_DAM_INC; 366 + 367 + break; 368 + case DMA_MEM_TO_DEV: 369 + mode |= OWL_DMA_MODE_TS(vchan->drq) 370 + | OWL_DMA_MODE_ST_DCU | OWL_DMA_MODE_DT_DEV 371 + | OWL_DMA_MODE_SAM_INC | OWL_DMA_MODE_DAM_CONST; 372 + 373 + /* 374 + * Hardware only supports 32bit and 8bit buswidth. Since the 375 + * default is 32bit, select 8bit only when requested. 376 + */ 377 + if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) 378 + mode |= OWL_DMA_MODE_NDDBW_8BIT; 379 + 380 + break; 381 + case DMA_DEV_TO_MEM: 382 + mode |= OWL_DMA_MODE_TS(vchan->drq) 383 + | OWL_DMA_MODE_ST_DEV | OWL_DMA_MODE_DT_DCU 384 + | OWL_DMA_MODE_SAM_CONST | OWL_DMA_MODE_DAM_INC; 385 + 386 + /* 387 + * Hardware only supports 32bit and 8bit buswidth. Since the 388 + * default is 32bit, select 8bit only when requested. 389 + */ 390 + if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) 391 + mode |= OWL_DMA_MODE_NDDBW_8BIT; 374 392 375 393 break; 376 394 default: ··· 415 381 OWL_DMA_LLC_SAV_LOAD_NEXT | 416 382 OWL_DMA_LLC_DAV_LOAD_NEXT); 417 383 418 - hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK); 384 + if (is_cyclic) 385 + hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_BLOCK); 386 + else 387 + hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK); 419 388 420 389 return 0; 421 390 } ··· 480 443 spin_unlock_irqrestore(&od->lock, flags); 481 444 } 482 445 446 + static void owl_dma_pause_pchan(struct owl_dma_pchan *pchan) 447 + { 448 + pchan_writel(pchan, 1, OWL_DMAX_PAUSE); 449 + } 450 + 451 + static void owl_dma_resume_pchan(struct owl_dma_pchan *pchan) 452 + { 453 + pchan_writel(pchan, 0, OWL_DMAX_PAUSE); 454 + } 455 + 483 456 static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan) 484 457 { 485 458 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); ··· 511 464 lli = list_first_entry(&txd->lli_list, 512 465 struct owl_dma_lli, node); 513 466 514 - int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK; 467 + if (txd->cyclic) 468 + int_ctl = OWL_DMA_INTCTL_BLOCK; 469 + else 470 + int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK; 515 471 516 472 pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME); 517 473 pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL, ··· 677 627 return 0; 678 628 } 679 629 630 + static int owl_dma_config(struct dma_chan *chan, 631 + struct dma_slave_config *config) 632 + { 633 + struct owl_dma_vchan *vchan = to_owl_vchan(chan); 634 + 635 + /* Reject definitely invalid configurations */ 636 + if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 637 + config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 638 + return -EINVAL; 639 + 640 + memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config)); 641 + 642 + return 0; 643 + } 644 + 645 + static int owl_dma_pause(struct dma_chan *chan) 646 + { 647 + struct owl_dma_vchan *vchan = to_owl_vchan(chan); 648 + unsigned long flags; 649 + 650 + spin_lock_irqsave(&vchan->vc.lock, flags); 651 + 652 + owl_dma_pause_pchan(vchan->pchan); 653 + 654 + spin_unlock_irqrestore(&vchan->vc.lock, flags); 655 + 656 + return 0; 657 + } 658 + 659 + static int owl_dma_resume(struct dma_chan *chan) 660 + { 661 + struct owl_dma_vchan *vchan = to_owl_vchan(chan); 662 + unsigned long flags; 663 + 664 + if (!vchan->pchan && !vchan->txd) 665 + return 0; 666 + 667 + dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc); 668 + 669 + spin_lock_irqsave(&vchan->vc.lock, flags); 670 + 671 + owl_dma_resume_pchan(vchan->pchan); 672 + 673 + spin_unlock_irqrestore(&vchan->vc.lock, flags); 674 + 675 + return 0; 676 + } 677 + 680 678 static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan) 681 679 { 682 680 struct owl_dma_pchan *pchan; ··· 852 754 bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH); 853 755 854 756 ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset, 855 - bytes, DMA_MEM_TO_MEM); 757 + bytes, DMA_MEM_TO_MEM, 758 + &vchan->cfg, txd->cyclic); 856 759 if (ret) { 857 760 dev_warn(chan2dev(chan), "failed to config lli\n"); 858 761 goto err_txd_free; 859 762 } 860 763 861 - prev = owl_dma_add_lli(txd, prev, lli); 764 + prev = owl_dma_add_lli(txd, prev, lli, false); 862 765 } 863 766 864 767 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); 865 768 866 769 err_txd_free: 867 770 owl_dma_free_txd(od, txd); 771 + return NULL; 772 + } 773 + 774 + static struct dma_async_tx_descriptor 775 + *owl_dma_prep_slave_sg(struct dma_chan *chan, 776 + struct scatterlist *sgl, 777 + unsigned int sg_len, 778 + enum dma_transfer_direction dir, 779 + unsigned long flags, void *context) 780 + { 781 + struct owl_dma *od = to_owl_dma(chan->device); 782 + struct owl_dma_vchan *vchan = to_owl_vchan(chan); 783 + struct dma_slave_config *sconfig = &vchan->cfg; 784 + struct owl_dma_txd *txd; 785 + struct owl_dma_lli *lli, *prev = NULL; 786 + struct scatterlist *sg; 787 + dma_addr_t addr, src = 0, dst = 0; 788 + size_t len; 789 + int ret, i; 790 + 791 + txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 792 + if (!txd) 793 + return NULL; 794 + 795 + INIT_LIST_HEAD(&txd->lli_list); 796 + 797 + for_each_sg(sgl, sg, sg_len, i) { 798 + addr = sg_dma_address(sg); 799 + len = sg_dma_len(sg); 800 + 801 + if (len > OWL_DMA_FRAME_MAX_LENGTH) { 802 + dev_err(od->dma.dev, 803 + "frame length exceeds max supported length"); 804 + goto err_txd_free; 805 + } 806 + 807 + lli = owl_dma_alloc_lli(od); 808 + if (!lli) { 809 + dev_err(chan2dev(chan), "failed to allocate lli"); 810 + goto err_txd_free; 811 + } 812 + 813 + if (dir == DMA_MEM_TO_DEV) { 814 + src = addr; 815 + dst = sconfig->dst_addr; 816 + } else { 817 + src = sconfig->src_addr; 818 + dst = addr; 819 + } 820 + 821 + ret = owl_dma_cfg_lli(vchan, lli, src, dst, len, dir, sconfig, 822 + txd->cyclic); 823 + if (ret) { 824 + dev_warn(chan2dev(chan), "failed to config lli"); 825 + goto err_txd_free; 826 + } 827 + 828 + prev = owl_dma_add_lli(txd, prev, lli, false); 829 + } 830 + 831 + return vchan_tx_prep(&vchan->vc, &txd->vd, flags); 832 + 833 + err_txd_free: 834 + owl_dma_free_txd(od, txd); 835 + 836 + return NULL; 837 + } 838 + 839 + static struct dma_async_tx_descriptor 840 + *owl_prep_dma_cyclic(struct dma_chan *chan, 841 + dma_addr_t buf_addr, size_t buf_len, 842 + size_t period_len, 843 + enum dma_transfer_direction dir, 844 + unsigned long flags) 845 + { 846 + struct owl_dma *od = to_owl_dma(chan->device); 847 + struct owl_dma_vchan *vchan = to_owl_vchan(chan); 848 + struct dma_slave_config *sconfig = &vchan->cfg; 849 + struct owl_dma_txd *txd; 850 + struct owl_dma_lli *lli, *prev = NULL, *first = NULL; 851 + dma_addr_t src = 0, dst = 0; 852 + unsigned int periods = buf_len / period_len; 853 + int ret, i; 854 + 855 + txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 856 + if (!txd) 857 + return NULL; 858 + 859 + INIT_LIST_HEAD(&txd->lli_list); 860 + txd->cyclic = true; 861 + 862 + for (i = 0; i < periods; i++) { 863 + lli = owl_dma_alloc_lli(od); 864 + if (!lli) { 865 + dev_warn(chan2dev(chan), "failed to allocate lli"); 866 + goto err_txd_free; 867 + } 868 + 869 + if (dir == DMA_MEM_TO_DEV) { 870 + src = buf_addr + (period_len * i); 871 + dst = sconfig->dst_addr; 872 + } else if (dir == DMA_DEV_TO_MEM) { 873 + src = sconfig->src_addr; 874 + dst = buf_addr + (period_len * i); 875 + } 876 + 877 + ret = owl_dma_cfg_lli(vchan, lli, src, dst, period_len, 878 + dir, sconfig, txd->cyclic); 879 + if (ret) { 880 + dev_warn(chan2dev(chan), "failed to config lli"); 881 + goto err_txd_free; 882 + } 883 + 884 + if (!first) 885 + first = lli; 886 + 887 + prev = owl_dma_add_lli(txd, prev, lli, false); 888 + } 889 + 890 + /* close the cyclic list */ 891 + owl_dma_add_lli(txd, prev, first, true); 892 + 893 + return vchan_tx_prep(&vchan->vc, &txd->vd, flags); 894 + 895 + err_txd_free: 896 + owl_dma_free_txd(od, txd); 897 + 868 898 return NULL; 869 899 } 870 900 ··· 1014 788 list_del(&vchan->vc.chan.device_node); 1015 789 tasklet_kill(&vchan->vc.task); 1016 790 } 791 + } 792 + 793 + static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec, 794 + struct of_dma *ofdma) 795 + { 796 + struct owl_dma *od = ofdma->of_dma_data; 797 + struct owl_dma_vchan *vchan; 798 + struct dma_chan *chan; 799 + u8 drq = dma_spec->args[0]; 800 + 801 + if (drq > od->nr_vchans) 802 + return NULL; 803 + 804 + chan = dma_get_any_slave_channel(&od->dma); 805 + if (!chan) 806 + return NULL; 807 + 808 + vchan = to_owl_vchan(chan); 809 + vchan->drq = drq; 810 + 811 + return chan; 1017 812 } 1018 813 1019 814 static int owl_dma_probe(struct platform_device *pdev) ··· 1080 833 spin_lock_init(&od->lock); 1081 834 1082 835 dma_cap_set(DMA_MEMCPY, od->dma.cap_mask); 836 + dma_cap_set(DMA_SLAVE, od->dma.cap_mask); 837 + dma_cap_set(DMA_CYCLIC, od->dma.cap_mask); 1083 838 1084 839 od->dma.dev = &pdev->dev; 1085 840 od->dma.device_free_chan_resources = owl_dma_free_chan_resources; 1086 841 od->dma.device_tx_status = owl_dma_tx_status; 1087 842 od->dma.device_issue_pending = owl_dma_issue_pending; 1088 843 od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy; 844 + od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg; 845 + od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic; 846 + od->dma.device_config = owl_dma_config; 847 + od->dma.device_pause = owl_dma_pause; 848 + od->dma.device_resume = owl_dma_resume; 1089 849 od->dma.device_terminate_all = owl_dma_terminate_all; 1090 850 od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1091 851 od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); ··· 1164 910 goto err_pool_free; 1165 911 } 1166 912 913 + /* Device-tree DMA controller registration */ 914 + ret = of_dma_controller_register(pdev->dev.of_node, 915 + owl_dma_of_xlate, od); 916 + if (ret) { 917 + dev_err(&pdev->dev, "of_dma_controller_register failed\n"); 918 + goto err_dma_unregister; 919 + } 920 + 1167 921 return 0; 1168 922 923 + err_dma_unregister: 924 + dma_async_device_unregister(&od->dma); 1169 925 err_pool_free: 1170 926 clk_disable_unprepare(od->clk); 1171 927 dma_pool_destroy(od->lli_pool); ··· 1187 923 { 1188 924 struct owl_dma *od = platform_get_drvdata(pdev); 1189 925 926 + of_dma_controller_free(pdev->dev.of_node); 1190 927 dma_async_device_unregister(&od->dma); 1191 928 1192 929 /* Mask all interrupts for this execution environment */