Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: wwan: t7xx: Runtime PM

Enables runtime power management callbacks including runtime_suspend
and runtime_resume. Autosuspend is used to prevent overhead by frequent
wake-ups.

Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Co-developed-by: Eliot Lee <eliot.lee@intel.com>
Signed-off-by: Eliot Lee <eliot.lee@intel.com>
Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Haijun Liu and committed by
David S. Miller
d10b3a69 46e8f49e

+68
+14
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
··· 33 33 #include <linux/list.h> 34 34 #include <linux/netdevice.h> 35 35 #include <linux/pci.h> 36 + #include <linux/pm_runtime.h> 36 37 #include <linux/sched.h> 37 38 #include <linux/skbuff.h> 38 39 #include <linux/slab.h> ··· 252 251 t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info); 253 252 t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX); 254 253 t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX); 254 + pm_runtime_mark_last_busy(md_ctrl->dev); 255 + pm_runtime_put_autosuspend(md_ctrl->dev); 255 256 } 256 257 257 258 static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue) ··· 363 360 t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX); 364 361 } 365 362 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 363 + 364 + pm_runtime_mark_last_busy(md_ctrl->dev); 365 + pm_runtime_put_autosuspend(md_ctrl->dev); 366 366 } 367 367 368 368 static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl, ··· 574 568 if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { 575 569 for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) { 576 570 if (i < CLDMA_TXQ_NUM) { 571 + pm_runtime_get(md_ctrl->dev); 577 572 t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX); 578 573 t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX); 579 574 queue_work(md_ctrl->txq[i].worker, ··· 599 592 if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { 600 593 l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM; 601 594 for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) { 595 + pm_runtime_get(md_ctrl->dev); 602 596 t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX); 603 597 t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX); 604 598 queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work); ··· 930 922 if (qno >= CLDMA_TXQ_NUM) 931 923 return -EINVAL; 932 924 925 + ret = pm_runtime_resume_and_get(md_ctrl->dev); 926 + if (ret < 0 && ret != -EACCES) 927 + return ret; 928 + 933 929 queue = &md_ctrl->txq[qno]; 934 930 935 931 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ··· 977 965 } while (!ret); 978 966 979 967 allow_sleep: 968 + pm_runtime_mark_last_busy(md_ctrl->dev); 969 + pm_runtime_put_autosuspend(md_ctrl->dev); 980 970 return ret; 981 971 } 982 972
+17
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
··· 32 32 #include <linux/minmax.h> 33 33 #include <linux/mm.h> 34 34 #include <linux/netdevice.h> 35 + #include <linux/pm_runtime.h> 35 36 #include <linux/sched.h> 36 37 #include <linux/skbuff.h> 37 38 #include <linux/slab.h> ··· 911 910 { 912 911 struct dpmaif_rx_queue *rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work); 913 912 struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl; 913 + int ret; 914 914 915 915 atomic_set(&rxq->rx_processing, 1); 916 916 /* Ensure rx_processing is changed to 1 before actually begin RX flow */ ··· 923 921 return; 924 922 } 925 923 924 + ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); 925 + if (ret < 0 && ret != -EACCES) 926 + return; 927 + 926 928 t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq); 929 + 930 + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); 931 + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); 927 932 atomic_set(&rxq->rx_processing, 0); 928 933 } 929 934 ··· 1132 1123 { 1133 1124 struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work); 1134 1125 struct dpmaif_rx_queue *rxq; 1126 + int ret; 1127 + 1128 + ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); 1129 + if (ret < 0 && ret != -EACCES) 1130 + return; 1135 1131 1136 1132 /* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */ 1137 1133 rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT]; 1138 1134 t7xx_dpmaif_bat_release_and_add(rxq); 1139 1135 t7xx_dpmaif_frag_bat_release_and_add(rxq); 1136 + 1137 + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); 1138 + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); 1140 1139 } 1141 1140 1142 1141 int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl)
+15
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
··· 28 28 #include <linux/list.h> 29 29 #include <linux/minmax.h> 30 30 #include <linux/netdevice.h> 31 + #include <linux/pm_runtime.h> 31 32 #include <linux/sched.h> 32 33 #include <linux/spinlock.h> 33 34 #include <linux/skbuff.h> ··· 162 161 struct dpmaif_hw_info *hw_info; 163 162 int ret; 164 163 164 + ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); 165 + if (ret < 0 && ret != -EACCES) 166 + return; 167 + 165 168 hw_info = &dpmaif_ctrl->hw_info; 166 169 ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt); 167 170 if (ret == -EAGAIN || ··· 179 174 t7xx_dpmaif_clr_ip_busy_sts(hw_info); 180 175 t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index); 181 176 } 177 + 178 + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); 179 + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); 182 180 } 183 181 184 182 static void t7xx_setup_msg_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num, ··· 431 423 static int t7xx_dpmaif_tx_hw_push_thread(void *arg) 432 424 { 433 425 struct dpmaif_ctrl *dpmaif_ctrl = arg; 426 + int ret; 434 427 435 428 while (!kthread_should_stop()) { 436 429 if (t7xx_tx_lists_are_all_empty(dpmaif_ctrl) || ··· 446 437 break; 447 438 } 448 439 440 + ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); 441 + if (ret < 0 && ret != -EACCES) 442 + return ret; 443 + 449 444 t7xx_do_tx_hw_push(dpmaif_ctrl); 445 + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); 446 + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); 450 447 } 451 448 452 449 return 0;
+22
drivers/net/wwan/t7xx/t7xx_pci.c
··· 31 31 #include <linux/mutex.h> 32 32 #include <linux/pci.h> 33 33 #include <linux/pm.h> 34 + #include <linux/pm_runtime.h> 34 35 #include <linux/pm_wakeup.h> 35 36 36 37 #include "t7xx_mhccif.h" ··· 45 44 #define T7XX_PCI_EREG_BASE 2 46 45 47 46 #define PM_ACK_TIMEOUT_MS 1500 47 + #define PM_AUTOSUSPEND_MS 20000 48 48 #define PM_RESOURCE_POLL_TIMEOUT_US 10000 49 49 #define PM_RESOURCE_POLL_STEP_US 100 50 50 ··· 84 82 DPM_FLAG_NO_DIRECT_COMPLETE); 85 83 86 84 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); 85 + pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS); 86 + pm_runtime_use_autosuspend(&pdev->dev); 87 87 88 88 return t7xx_wait_pm_config(t7xx_dev); 89 89 } ··· 100 96 D2H_INT_RESUME_ACK_AP); 101 97 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); 102 98 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); 99 + 100 + pm_runtime_put_noidle(&t7xx_dev->pdev->dev); 103 101 } 104 102 105 103 static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev) ··· 110 104 * so just roll back PM setting to the init setting. 111 105 */ 112 106 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); 107 + 108 + pm_runtime_get_noresume(&t7xx_dev->pdev->dev); 109 + 113 110 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); 114 111 return t7xx_wait_pm_config(t7xx_dev); 115 112 } ··· 412 403 t7xx_dev->rgu_pci_irq_en = true; 413 404 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); 414 405 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); 406 + pm_runtime_mark_last_busy(&pdev->dev); 415 407 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); 416 408 417 409 return ret; ··· 449 439 return __t7xx_pci_pm_resume(to_pci_dev(dev), false); 450 440 } 451 441 442 + static int t7xx_pci_pm_runtime_suspend(struct device *dev) 443 + { 444 + return __t7xx_pci_pm_suspend(to_pci_dev(dev)); 445 + } 446 + 447 + static int t7xx_pci_pm_runtime_resume(struct device *dev) 448 + { 449 + return __t7xx_pci_pm_resume(to_pci_dev(dev), true); 450 + } 451 + 452 452 static const struct dev_pm_ops t7xx_pci_pm_ops = { 453 453 .suspend = t7xx_pci_pm_suspend, 454 454 .resume = t7xx_pci_pm_resume, ··· 468 448 .poweroff = t7xx_pci_pm_suspend, 469 449 .restore = t7xx_pci_pm_resume, 470 450 .restore_noirq = t7xx_pci_pm_resume_noirq, 451 + .runtime_suspend = t7xx_pci_pm_runtime_suspend, 452 + .runtime_resume = t7xx_pci_pm_runtime_resume 471 453 }; 472 454 473 455 static int t7xx_request_irq(struct pci_dev *pdev)