Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: qcom: bam_dma: fix runtime PM underflow

Commit dbad41e7bb5f ("dmaengine: qcom: bam_dma: check if the runtime pm enabled")
caused unbalanced pm_runtime_get/put() calls when the bam is
controlled remotely. This commit reverts it and just enables pm_runtime
in all cases, the clk_* functions already just nop when the clock is NULL.

Also clean up a bit by removing unnecessary bamclk null checks.

Suggested-by: Stephan Gerhold <stephan@gerhold.net>
Fixes: dbad41e7bb5f ("dmaengine: qcom: bam_dma: check if the runtime pm enabled")
Signed-off-by: Caleb Connolly <caleb.connolly@linaro.org>
Link: https://lore.kernel.org/r/20220629140559.118537-1-caleb.connolly@linaro.org
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Caleb Connolly and committed by
Vinod Koul
0ac9c3dd a7cd3cf0

+11 -28
+11 -28
drivers/dma/qcom/bam_dma.c
··· 558 558 return 0; 559 559 } 560 560 561 - static int bam_pm_runtime_get_sync(struct device *dev) 562 - { 563 - if (pm_runtime_enabled(dev)) 564 - return pm_runtime_get_sync(dev); 565 - 566 - return 0; 567 - } 568 - 569 561 /** 570 562 * bam_free_chan - Frees dma resources associated with specific channel 571 563 * @chan: specified channel ··· 573 581 unsigned long flags; 574 582 int ret; 575 583 576 - ret = bam_pm_runtime_get_sync(bdev->dev); 584 + ret = pm_runtime_get_sync(bdev->dev); 577 585 if (ret < 0) 578 586 return; 579 587 ··· 776 784 unsigned long flag; 777 785 int ret; 778 786 779 - ret = bam_pm_runtime_get_sync(bdev->dev); 787 + ret = pm_runtime_get_sync(bdev->dev); 780 788 if (ret < 0) 781 789 return ret; 782 790 ··· 802 810 unsigned long flag; 803 811 int ret; 804 812 805 - ret = bam_pm_runtime_get_sync(bdev->dev); 813 + ret = pm_runtime_get_sync(bdev->dev); 806 814 if (ret < 0) 807 815 return ret; 808 816 ··· 911 919 if (srcs & P_IRQ) 912 920 tasklet_schedule(&bdev->task); 913 921 914 - ret = bam_pm_runtime_get_sync(bdev->dev); 922 + ret = pm_runtime_get_sync(bdev->dev); 915 923 if (ret < 0) 916 924 return IRQ_NONE; 917 925 ··· 1029 1037 if (!vd) 1030 1038 return; 1031 1039 1032 - ret = bam_pm_runtime_get_sync(bdev->dev); 1040 + ret = pm_runtime_get_sync(bdev->dev); 1033 1041 if (ret < 0) 1034 1042 return; 1035 1043 ··· 1366 1374 if (ret) 1367 1375 goto err_unregister_dma; 1368 1376 1369 - if (!bdev->bamclk) { 1370 - pm_runtime_disable(&pdev->dev); 1371 - return 0; 1372 - } 1373 - 1374 1377 pm_runtime_irq_safe(&pdev->dev); 1375 1378 pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY); 1376 1379 pm_runtime_use_autosuspend(&pdev->dev); ··· 1449 1462 { 1450 1463 struct bam_device *bdev = dev_get_drvdata(dev); 1451 1464 1452 - if (bdev->bamclk) { 1453 - pm_runtime_force_suspend(dev); 1454 - clk_unprepare(bdev->bamclk); 1455 - } 1465 + pm_runtime_force_suspend(dev); 1466 + clk_unprepare(bdev->bamclk); 1456 1467 1457 1468 return 0; 1458 1469 } ··· 1460 1475 struct bam_device *bdev = dev_get_drvdata(dev); 1461 1476 int ret; 1462 1477 1463 - if (bdev->bamclk) { 1464 - ret = clk_prepare(bdev->bamclk); 1465 - if (ret) 1466 - return ret; 1478 + ret = clk_prepare(bdev->bamclk); 1479 + if (ret) 1480 + return ret; 1467 1481 1468 - pm_runtime_force_resume(dev); 1469 - } 1482 + pm_runtime_force_resume(dev); 1470 1483 1471 1484 return 0; 1472 1485 }