Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: qcom-bam-dma: Add pm_runtime support

Adds pm_runtime support for BAM DMA so that clock is enabled only
when there is a transaction going on to help save power.

Signed-off-by: Pramod Gurav <pramod.gurav@linaro.org>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>

authored by

Pramod Gurav and committed by
Vinod Koul
7d254559 1a695a90

+109 -1
+109 -1
drivers/dma/qcom/bam_dma.c
··· 48 48 #include <linux/of_dma.h> 49 49 #include <linux/clk.h> 50 50 #include <linux/dmaengine.h> 51 + #include <linux/pm_runtime.h> 51 52 52 53 #include "../dmaengine.h" 53 54 #include "../virt-dma.h" ··· 58 57 __le16 size; /* Buffer size in bytes */ 59 58 __le16 flags; 60 59 }; 60 + 61 + #define BAM_DMA_AUTOSUSPEND_DELAY 100 61 62 62 63 #define DESC_FLAG_INT BIT(15) 63 64 #define DESC_FLAG_EOT BIT(14) ··· 530 527 struct bam_device *bdev = bchan->bdev; 531 528 u32 val; 532 529 unsigned long flags; 530 + int ret; 531 + 532 + ret = pm_runtime_get_sync(bdev->dev); 533 + if (ret < 0) 534 + return; 533 535 534 536 vchan_free_chan_resources(to_virt_chan(chan)); 535 537 536 538 if (bchan->curr_txd) { 537 539 dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); 538 - return; 540 + goto err; 539 541 } 540 542 541 543 spin_lock_irqsave(&bchan->vc.lock, flags); ··· 558 550 559 551 /* disable irq */ 560 552 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); 553 + 554 + err: 555 + pm_runtime_mark_last_busy(bdev->dev); 556 + pm_runtime_put_autosuspend(bdev->dev); 561 557 } 562 558 563 559 /** ··· 708 696 struct bam_chan *bchan = to_bam_chan(chan); 709 697 struct bam_device *bdev = bchan->bdev; 710 698 unsigned long flag; 699 + int ret; 700 + 701 + ret = pm_runtime_get_sync(bdev->dev); 702 + if (ret < 0) 703 + return ret; 711 704 712 705 spin_lock_irqsave(&bchan->vc.lock, flag); 713 706 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); 714 707 bchan->paused = 1; 715 708 spin_unlock_irqrestore(&bchan->vc.lock, flag); 709 + pm_runtime_mark_last_busy(bdev->dev); 710 + pm_runtime_put_autosuspend(bdev->dev); 716 711 717 712 return 0; 718 713 } ··· 734 715 struct bam_chan *bchan = to_bam_chan(chan); 735 716 struct bam_device *bdev = bchan->bdev; 736 717 unsigned long flag; 718 + int ret; 719 + 720 + ret = pm_runtime_get_sync(bdev->dev); 721 + if (ret < 0) 722 + return ret; 737 723 738 724 spin_lock_irqsave(&bchan->vc.lock, flag); 739 725 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); 740 726 bchan->paused = 0; 741 727 spin_unlock_irqrestore(&bchan->vc.lock, flag); 728 + pm_runtime_mark_last_busy(bdev->dev); 729 + pm_runtime_put_autosuspend(bdev->dev); 742 730 743 731 return 0; 744 732 } ··· 821 795 { 822 796 struct bam_device *bdev = data; 823 797 u32 clr_mask = 0, srcs = 0; 798 + int ret; 824 799 825 800 srcs |= process_channel_irqs(bdev); 826 801 827 802 /* kick off tasklet to start next dma transfer */ 828 803 if (srcs & P_IRQ) 829 804 tasklet_schedule(&bdev->task); 805 + 806 + ret = pm_runtime_get_sync(bdev->dev); 807 + if (ret < 0) 808 + return ret; 830 809 831 810 if (srcs & BAM_IRQ) { 832 811 clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); ··· 844 813 845 814 writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); 846 815 } 816 + 817 + pm_runtime_mark_last_busy(bdev->dev); 818 + pm_runtime_put_autosuspend(bdev->dev); 847 819 848 820 return IRQ_HANDLED; 849 821 } ··· 927 893 struct bam_desc_hw *desc; 928 894 struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, 929 895 sizeof(struct bam_desc_hw)); 896 + int ret; 930 897 931 898 lockdep_assert_held(&bchan->vc.lock); 932 899 ··· 938 903 939 904 async_desc = container_of(vd, struct bam_async_desc, vd); 940 905 bchan->curr_txd = async_desc; 906 + 907 + ret = pm_runtime_get_sync(bdev->dev); 908 + if (ret < 0) 909 + return; 941 910 942 911 /* on first use, initialize the channel hardware */ 943 912 if (!bchan->initialized) ··· 985 946 wmb(); 986 947 writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), 987 948 bam_addr(bdev, bchan->id, BAM_P_EVNT_REG)); 949 + 950 + pm_runtime_mark_last_busy(bdev->dev); 951 + pm_runtime_put_autosuspend(bdev->dev); 988 952 } 989 953 990 954 /** ··· 1012 970 bam_start_dma(bchan); 1013 971 spin_unlock_irqrestore(&bchan->vc.lock, flags); 1014 972 } 973 + 1015 974 } 1016 975 1017 976 /** ··· 1256 1213 if (ret) 1257 1214 goto err_unregister_dma; 1258 1215 1216 + pm_runtime_irq_safe(&pdev->dev); 1217 + pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY); 1218 + pm_runtime_use_autosuspend(&pdev->dev); 1219 + pm_runtime_mark_last_busy(&pdev->dev); 1220 + pm_runtime_set_active(&pdev->dev); 1221 + pm_runtime_enable(&pdev->dev); 1222 + 1259 1223 return 0; 1260 1224 1261 1225 err_unregister_dma: ··· 1282 1232 { 1283 1233 struct bam_device *bdev = platform_get_drvdata(pdev); 1284 1234 u32 i; 1235 + 1236 + pm_runtime_force_suspend(&pdev->dev); 1285 1237 1286 1238 of_dma_controller_free(pdev->dev.of_node); 1287 1239 dma_async_device_unregister(&bdev->common); ··· 1312 1260 return 0; 1313 1261 } 1314 1262 1263 + static int bam_dma_runtime_suspend(struct device *dev) 1264 + { 1265 + struct bam_device *bdev = dev_get_drvdata(dev); 1266 + 1267 + clk_disable(bdev->bamclk); 1268 + 1269 + return 0; 1270 + } 1271 + 1272 + static int bam_dma_runtime_resume(struct device *dev) 1273 + { 1274 + struct bam_device *bdev = dev_get_drvdata(dev); 1275 + int ret; 1276 + 1277 + ret = clk_enable(bdev->bamclk); 1278 + if (ret < 0) { 1279 + dev_err(dev, "clk_enable failed: %d\n", ret); 1280 + return ret; 1281 + } 1282 + 1283 + return 0; 1284 + } 1285 + #ifdef CONFIG_PM_SLEEP 1286 + static int bam_dma_suspend(struct device *dev) 1287 + { 1288 + struct bam_device *bdev = dev_get_drvdata(dev); 1289 + 1290 + pm_runtime_force_suspend(dev); 1291 + 1292 + clk_unprepare(bdev->bamclk); 1293 + 1294 + return 0; 1295 + } 1296 + 1297 + static int bam_dma_resume(struct device *dev) 1298 + { 1299 + struct bam_device *bdev = dev_get_drvdata(dev); 1300 + int ret; 1301 + 1302 + ret = clk_prepare(bdev->bamclk); 1303 + if (ret) 1304 + return ret; 1305 + 1306 + pm_runtime_force_resume(dev); 1307 + 1308 + return 0; 1309 + } 1310 + #endif 1311 + 1312 + static const struct dev_pm_ops bam_dma_pm_ops = { 1313 + SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume) 1314 + SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume, 1315 + NULL) 1316 + }; 1317 + 1315 1318 static struct platform_driver bam_dma_driver = { 1316 1319 .probe = bam_dma_probe, 1317 1320 .remove = bam_dma_remove, 1318 1321 .driver = { 1319 1322 .name = "bam-dma-engine", 1323 + .pm = &bam_dma_pm_ops, 1320 1324 .of_match_table = bam_of_match, 1321 1325 }, 1322 1326 };