Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: wwan: t7xx: Introduce power management

Implements suspend, resumes, freeze, thaw, poweroff, and restore
`dev_pm_ops` callbacks.

From the host point of view, the t7xx driver is one entity. But, the
device has several modules that need to be addressed in different ways
during power management (PM) flows.
The driver uses the term 'PM entities' to refer to the 2 DPMA and
2 CLDMA HW blocks that need to be managed during PM flows.
When a dev_pm_ops function is called, the PM entities list is iterated
and the matching function is called for each entry in the list.

Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Co-developed-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Haijun Liu and committed by
David S. Miller
46e8f49e 05d19bf5

+700 -1
+122 -1
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
··· 1076 1076 return 0; 1077 1077 } 1078 1078 1079 + static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param) 1080 + { 1081 + struct cldma_ctrl *md_ctrl = entity_param; 1082 + struct t7xx_cldma_hw *hw_info; 1083 + unsigned long flags; 1084 + int qno_t; 1085 + 1086 + hw_info = &md_ctrl->hw_info; 1087 + 1088 + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 1089 + t7xx_cldma_hw_restore(hw_info); 1090 + for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) { 1091 + t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr, 1092 + MTK_TX); 1093 + t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr, 1094 + MTK_RX); 1095 + } 1096 + t7xx_cldma_enable_irq(md_ctrl); 1097 + t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX); 1098 + md_ctrl->rxq_active |= TXRX_STATUS_BITMASK; 1099 + t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX); 1100 + t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX); 1101 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 1102 + } 1103 + 1104 + static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param) 1105 + { 1106 + struct cldma_ctrl *md_ctrl = entity_param; 1107 + unsigned long flags; 1108 + 1109 + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 1110 + md_ctrl->txq_active |= TXRX_STATUS_BITMASK; 1111 + t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX); 1112 + t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX); 1113 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 1114 + 1115 + if (md_ctrl->hif_id == CLDMA_ID_MD) 1116 + t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK); 1117 + 1118 + return 0; 1119 + } 1120 + 1121 + static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param) 1122 + { 1123 + struct cldma_ctrl *md_ctrl = entity_param; 1124 + struct t7xx_cldma_hw *hw_info; 1125 + unsigned long flags; 1126 + 1127 + hw_info = &md_ctrl->hw_info; 1128 + 1129 + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 1130 + t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX); 1131 + t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX); 1132 + md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK; 1133 + t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX); 1134 + t7xx_cldma_clear_ip_busy(hw_info); 1135 + t7xx_cldma_disable_irq(md_ctrl); 1136 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 1137 + } 1138 + 1139 + static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param) 1140 + { 1141 + struct cldma_ctrl *md_ctrl = entity_param; 1142 + struct t7xx_cldma_hw *hw_info; 1143 + unsigned long flags; 1144 + 1145 + if (md_ctrl->hif_id == CLDMA_ID_MD) 1146 + t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK); 1147 + 1148 + hw_info = &md_ctrl->hw_info; 1149 + 1150 + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 1151 + t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX); 1152 + t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX); 1153 + md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK; 1154 + t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX); 1155 + md_ctrl->txq_started = 0; 1156 + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 1157 + 1158 + return 0; 1159 + } 1160 + 1161 + static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl) 1162 + { 1163 + md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL); 1164 + if (!md_ctrl->pm_entity) 1165 + return -ENOMEM; 1166 + 1167 + md_ctrl->pm_entity->entity_param = md_ctrl; 1168 + 1169 + if (md_ctrl->hif_id == CLDMA_ID_MD) 1170 + md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1; 1171 + else 1172 + md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2; 1173 + 1174 + md_ctrl->pm_entity->suspend = t7xx_cldma_suspend; 1175 + md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late; 1176 + md_ctrl->pm_entity->resume = t7xx_cldma_resume; 1177 + md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early; 1178 + 1179 + return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity); 1180 + } 1181 + 1182 + static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl) 1183 + { 1184 + if (!md_ctrl->pm_entity) 1185 + return -EINVAL; 1186 + 1187 + t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity); 1188 + kfree(md_ctrl->pm_entity); 1189 + md_ctrl->pm_entity = NULL; 1190 + return 0; 1191 + } 1192 + 1079 1193 void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl) 1080 1194 { 1081 1195 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; ··· 1240 1126 * t7xx_cldma_init() - Initialize CLDMA. 1241 1127 * @md_ctrl: CLDMA context structure. 1242 1128 * 1129 + * Allocate and initialize device power management entity. 1243 1130 * Initialize HIF TX/RX queue structure. 1244 1131 * Register CLDMA callback ISR with PCIe driver. 1245 1132 * ··· 1251 1136 int t7xx_cldma_init(struct cldma_ctrl *md_ctrl) 1252 1137 { 1253 1138 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 1254 - int i; 1139 + int ret, i; 1255 1140 1256 1141 md_ctrl->txq_active = 0; 1257 1142 md_ctrl->rxq_active = 0; 1258 1143 md_ctrl->is_late_init = false; 1144 + 1145 + ret = t7xx_cldma_pm_init(md_ctrl); 1146 + if (ret) 1147 + return ret; 1259 1148 1260 1149 spin_lock_init(&md_ctrl->cldma_lock); 1261 1150 ··· 1295 1176 1296 1177 err_workqueue: 1297 1178 t7xx_cldma_destroy_wqs(md_ctrl); 1179 + t7xx_cldma_pm_uninit(md_ctrl); 1298 1180 return -ENOMEM; 1299 1181 } 1300 1182 ··· 1310 1190 t7xx_cldma_stop(md_ctrl); 1311 1191 t7xx_cldma_late_release(md_ctrl); 1312 1192 t7xx_cldma_destroy_wqs(md_ctrl); 1193 + t7xx_cldma_pm_uninit(md_ctrl); 1313 1194 }
+1
drivers/net/wwan/t7xx/t7xx_hif_cldma.h
··· 98 98 struct dma_pool *gpd_dmapool; 99 99 struct cldma_ring tx_ring[CLDMA_TXQ_NUM]; 100 100 struct cldma_ring rx_ring[CLDMA_RXQ_NUM]; 101 + struct md_pm_entity *pm_entity; 101 102 struct t7xx_cldma_hw hw_info; 102 103 bool is_late_init; 103 104 int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
+90
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
··· 398 398 return 0; 399 399 } 400 400 401 + static int t7xx_dpmaif_suspend(struct t7xx_pci_dev *t7xx_dev, void *param) 402 + { 403 + struct dpmaif_ctrl *dpmaif_ctrl = param; 404 + 405 + t7xx_dpmaif_tx_stop(dpmaif_ctrl); 406 + t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info); 407 + t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info); 408 + t7xx_dpmaif_disable_irq(dpmaif_ctrl); 409 + t7xx_dpmaif_rx_stop(dpmaif_ctrl); 410 + return 0; 411 + } 412 + 413 + static void t7xx_dpmaif_unmask_dlq_intr(struct dpmaif_ctrl *dpmaif_ctrl) 414 + { 415 + int qno; 416 + 417 + for (qno = 0; qno < DPMAIF_RXQ_NUM; qno++) 418 + t7xx_dpmaif_dlq_unmask_rx_done(&dpmaif_ctrl->hw_info, qno); 419 + } 420 + 421 + static void t7xx_dpmaif_start_txrx_qs(struct dpmaif_ctrl *dpmaif_ctrl) 422 + { 423 + struct dpmaif_rx_queue *rxq; 424 + struct dpmaif_tx_queue *txq; 425 + unsigned int que_cnt; 426 + 427 + for (que_cnt = 0; que_cnt < DPMAIF_TXQ_NUM; que_cnt++) { 428 + txq = &dpmaif_ctrl->txq[que_cnt]; 429 + txq->que_started = true; 430 + } 431 + 432 + for (que_cnt = 0; que_cnt < DPMAIF_RXQ_NUM; que_cnt++) { 433 + rxq = &dpmaif_ctrl->rxq[que_cnt]; 434 + rxq->que_started = true; 435 + } 436 + } 437 + 438 + static int t7xx_dpmaif_resume(struct t7xx_pci_dev *t7xx_dev, void *param) 439 + { 440 + struct dpmaif_ctrl *dpmaif_ctrl = param; 441 + 442 + if (!dpmaif_ctrl) 443 + return 0; 444 + 445 + t7xx_dpmaif_start_txrx_qs(dpmaif_ctrl); 446 + t7xx_dpmaif_enable_irq(dpmaif_ctrl); 447 + t7xx_dpmaif_unmask_dlq_intr(dpmaif_ctrl); 448 + t7xx_dpmaif_start_hw(&dpmaif_ctrl->hw_info); 449 + wake_up(&dpmaif_ctrl->tx_wq); 450 + return 0; 451 + } 452 + 453 + static int t7xx_dpmaif_pm_entity_init(struct dpmaif_ctrl *dpmaif_ctrl) 454 + { 455 + struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity; 456 + int ret; 457 + 458 + INIT_LIST_HEAD(&dpmaif_pm_entity->entity); 459 + dpmaif_pm_entity->suspend = &t7xx_dpmaif_suspend; 460 + dpmaif_pm_entity->suspend_late = NULL; 461 + dpmaif_pm_entity->resume_early = NULL; 462 + dpmaif_pm_entity->resume = &t7xx_dpmaif_resume; 463 + dpmaif_pm_entity->id = PM_ENTITY_ID_DATA; 464 + dpmaif_pm_entity->entity_param = dpmaif_ctrl; 465 + 466 + ret = t7xx_pci_pm_entity_register(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity); 467 + if (ret) 468 + dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n"); 469 + 470 + return ret; 471 + } 472 + 473 + static int t7xx_dpmaif_pm_entity_release(struct dpmaif_ctrl *dpmaif_ctrl) 474 + { 475 + struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity; 476 + int ret; 477 + 478 + ret = t7xx_pci_pm_entity_unregister(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity); 479 + if (ret < 0) 480 + dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n"); 481 + 482 + return ret; 483 + } 484 + 401 485 int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state) 402 486 { 403 487 int ret = 0; ··· 545 461 dpmaif_ctrl->hw_info.pcie_base = t7xx_dev->base_addr.pcie_ext_reg_base - 546 462 t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; 547 463 464 + ret = t7xx_dpmaif_pm_entity_init(dpmaif_ctrl); 465 + if (ret) 466 + return NULL; 467 + 548 468 t7xx_dpmaif_register_pcie_irq(dpmaif_ctrl); 549 469 t7xx_dpmaif_disable_irq(dpmaif_ctrl); 550 470 551 471 ret = t7xx_dpmaif_rxtx_sw_allocs(dpmaif_ctrl); 552 472 if (ret) { 473 + t7xx_dpmaif_pm_entity_release(dpmaif_ctrl); 553 474 dev_err(dev, "Failed to allocate RX/TX SW resources: %d\n", ret); 554 475 return NULL; 555 476 } ··· 567 478 { 568 479 if (dpmaif_ctrl->dpmaif_sw_init_done) { 569 480 t7xx_dpmaif_stop(dpmaif_ctrl); 481 + t7xx_dpmaif_pm_entity_release(dpmaif_ctrl); 570 482 t7xx_dpmaif_sw_release(dpmaif_ctrl); 571 483 dpmaif_ctrl->dpmaif_sw_init_done = false; 572 484 }
+1
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h
··· 174 174 struct dpmaif_ctrl { 175 175 struct device *dev; 176 176 struct t7xx_pci_dev *t7xx_dev; 177 + struct md_pm_entity dpmaif_pm_entity; 177 178 enum dpmaif_state state; 178 179 bool dpmaif_sw_init_done; 179 180 struct dpmaif_hw_info hw_info;
+17
drivers/net/wwan/t7xx/t7xx_mhccif.c
··· 24 24 #include "t7xx_pcie_mac.h" 25 25 #include "t7xx_reg.h" 26 26 27 + #define D2H_INT_SR_ACK (D2H_INT_SUSPEND_ACK | \ 28 + D2H_INT_RESUME_ACK | \ 29 + D2H_INT_SUSPEND_ACK_AP | \ 30 + D2H_INT_RESUME_ACK_AP) 31 + 27 32 static void t7xx_mhccif_clear_interrupts(struct t7xx_pci_dev *t7xx_dev, u32 mask) 28 33 { 29 34 void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base; ··· 58 53 } 59 54 60 55 t7xx_mhccif_clear_interrupts(t7xx_dev, int_status); 56 + 57 + if (int_status & D2H_INT_SR_ACK) 58 + complete(&t7xx_dev->pm_sr_ack); 59 + 60 + iowrite32(T7XX_L1_BIT(1), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); 61 + 62 + int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev); 63 + if (!int_status) { 64 + val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1); 65 + iowrite32(val, IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); 66 + } 67 + 61 68 t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); 62 69 return IRQ_HANDLED; 63 70 }
+421
drivers/net/wwan/t7xx/t7xx_pci.c
··· 18 18 19 19 #include <linux/atomic.h> 20 20 #include <linux/bits.h> 21 + #include <linux/completion.h> 21 22 #include <linux/device.h> 22 23 #include <linux/dma-mapping.h> 23 24 #include <linux/gfp.h> 24 25 #include <linux/interrupt.h> 25 26 #include <linux/io.h> 27 + #include <linux/iopoll.h> 28 + #include <linux/jiffies.h> 29 + #include <linux/list.h> 26 30 #include <linux/module.h> 31 + #include <linux/mutex.h> 27 32 #include <linux/pci.h> 33 + #include <linux/pm.h> 34 + #include <linux/pm_wakeup.h> 28 35 29 36 #include "t7xx_mhccif.h" 30 37 #include "t7xx_modem_ops.h" 31 38 #include "t7xx_pci.h" 32 39 #include "t7xx_pcie_mac.h" 33 40 #include "t7xx_reg.h" 41 + #include "t7xx_state_monitor.h" 34 42 35 43 #define T7XX_PCI_IREG_BASE 0 36 44 #define T7XX_PCI_EREG_BASE 2 45 + 46 + #define PM_ACK_TIMEOUT_MS 1500 47 + #define PM_RESOURCE_POLL_TIMEOUT_US 10000 48 + #define PM_RESOURCE_POLL_STEP_US 100 49 + 50 + enum t7xx_pm_state { 51 + MTK_PM_EXCEPTION, 52 + MTK_PM_INIT, /* Device initialized, but handshake not completed */ 53 + MTK_PM_SUSPENDED, 54 + MTK_PM_RESUMED, 55 + }; 56 + 57 + static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev) 58 + { 59 + int ret, val; 60 + 61 + ret = read_poll_timeout(ioread32, val, 62 + (val & T7XX_PCIE_RESOURCE_STS_MSK) == T7XX_PCIE_RESOURCE_STS_MSK, 63 + PM_RESOURCE_POLL_STEP_US, PM_RESOURCE_POLL_TIMEOUT_US, true, 64 + IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS); 65 + if (ret == -ETIMEDOUT) 66 + dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n"); 67 + 68 + return ret; 69 + } 70 + 71 + static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev) 72 + { 73 + struct pci_dev *pdev = t7xx_dev->pdev; 74 + 75 + INIT_LIST_HEAD(&t7xx_dev->md_pm_entities); 76 + mutex_init(&t7xx_dev->md_pm_entity_mtx); 77 + init_completion(&t7xx_dev->pm_sr_ack); 78 + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); 79 + 80 + device_init_wakeup(&pdev->dev, true); 81 + dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags | 82 + DPM_FLAG_NO_DIRECT_COMPLETE); 83 + 84 + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); 85 + 86 + return t7xx_wait_pm_config(t7xx_dev); 87 + } 88 + 89 + void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev) 90 + { 91 + /* Enable the PCIe resource lock only after MD deep sleep is done */ 92 + t7xx_mhccif_mask_clr(t7xx_dev, 93 + D2H_INT_SUSPEND_ACK | 94 + D2H_INT_RESUME_ACK | 95 + D2H_INT_SUSPEND_ACK_AP | 96 + D2H_INT_RESUME_ACK_AP); 97 + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); 98 + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); 99 + } 100 + 101 + static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev) 102 + { 103 + /* The device is kept in FSM re-init flow 104 + * so just roll back PM setting to the init setting. 105 + */ 106 + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); 107 + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); 108 + return t7xx_wait_pm_config(t7xx_dev); 109 + } 110 + 111 + void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev) 112 + { 113 + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); 114 + t7xx_wait_pm_config(t7xx_dev); 115 + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION); 116 + } 117 + 118 + int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity) 119 + { 120 + struct md_pm_entity *entity; 121 + 122 + mutex_lock(&t7xx_dev->md_pm_entity_mtx); 123 + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { 124 + if (entity->id == pm_entity->id) { 125 + mutex_unlock(&t7xx_dev->md_pm_entity_mtx); 126 + return -EEXIST; 127 + } 128 + } 129 + 130 + list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities); 131 + mutex_unlock(&t7xx_dev->md_pm_entity_mtx); 132 + return 0; 133 + } 134 + 135 + int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity) 136 + { 137 + struct md_pm_entity *entity, *tmp_entity; 138 + 139 + mutex_lock(&t7xx_dev->md_pm_entity_mtx); 140 + list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) { 141 + if (entity->id == pm_entity->id) { 142 + list_del(&pm_entity->entity); 143 + mutex_unlock(&t7xx_dev->md_pm_entity_mtx); 144 + return 0; 145 + } 146 + } 147 + 148 + mutex_unlock(&t7xx_dev->md_pm_entity_mtx); 149 + 150 + return -ENXIO; 151 + } 152 + 153 + static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request) 154 + { 155 + unsigned long wait_ret; 156 + 157 + reinit_completion(&t7xx_dev->pm_sr_ack); 158 + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, request); 159 + wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack, 160 + msecs_to_jiffies(PM_ACK_TIMEOUT_MS)); 161 + if (!wait_ret) 162 + return -ETIMEDOUT; 163 + 164 + return 0; 165 + } 166 + 167 + static int __t7xx_pci_pm_suspend(struct pci_dev *pdev) 168 + { 169 + enum t7xx_pm_id entity_id = PM_ENTITY_ID_INVALID; 170 + struct t7xx_pci_dev *t7xx_dev; 171 + struct md_pm_entity *entity; 172 + int ret; 173 + 174 + t7xx_dev = pci_get_drvdata(pdev); 175 + if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) { 176 + dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n"); 177 + return -EFAULT; 178 + } 179 + 180 + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); 181 + ret = t7xx_wait_pm_config(t7xx_dev); 182 + if (ret) { 183 + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); 184 + return ret; 185 + } 186 + 187 + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); 188 + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); 189 + t7xx_dev->rgu_pci_irq_en = false; 190 + 191 + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { 192 + if (!entity->suspend) 193 + continue; 194 + 195 + ret = entity->suspend(t7xx_dev, entity->entity_param); 196 + if (ret) { 197 + entity_id = entity->id; 198 + dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id); 199 + goto abort_suspend; 200 + } 201 + } 202 + 203 + ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ); 204 + if (ret) { 205 + dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret); 206 + goto abort_suspend; 207 + } 208 + 209 + ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ_AP); 210 + if (ret) { 211 + t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ); 212 + dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret); 213 + goto abort_suspend; 214 + } 215 + 216 + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { 217 + if (entity->suspend_late) 218 + entity->suspend_late(t7xx_dev, entity->entity_param); 219 + } 220 + 221 + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); 222 + return 0; 223 + 224 + abort_suspend: 225 + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { 226 + if (entity_id == entity->id) 227 + break; 228 + 229 + if (entity->resume) 230 + entity->resume(t7xx_dev, entity->entity_param); 231 + } 232 + 233 + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); 234 + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); 235 + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); 236 + return ret; 237 + } 238 + 239 + static void t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev *t7xx_dev) 240 + { 241 + t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM); 242 + 243 + /* Disable interrupt first and let the IPs enable them */ 244 + iowrite32(MSIX_MSK_SET_ALL, IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0); 245 + 246 + /* Device disables PCIe interrupts during resume and 247 + * following function will re-enable PCIe interrupts. 248 + */ 249 + t7xx_pcie_mac_interrupts_en(t7xx_dev); 250 + t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); 251 + } 252 + 253 + static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3) 254 + { 255 + int ret; 256 + 257 + ret = pcim_enable_device(t7xx_dev->pdev); 258 + if (ret) 259 + return ret; 260 + 261 + t7xx_pcie_mac_atr_init(t7xx_dev); 262 + t7xx_pcie_interrupt_reinit(t7xx_dev); 263 + 264 + if (is_d3) { 265 + t7xx_mhccif_init(t7xx_dev); 266 + return t7xx_pci_pm_reinit(t7xx_dev); 267 + } 268 + 269 + return 0; 270 + } 271 + 272 + static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event) 273 + { 274 + struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl; 275 + struct device *dev = &t7xx_dev->pdev->dev; 276 + int ret = -EINVAL; 277 + 278 + switch (event) { 279 + case FSM_CMD_STOP: 280 + ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); 281 + break; 282 + 283 + case FSM_CMD_START: 284 + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); 285 + t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT); 286 + t7xx_dev->rgu_pci_irq_en = true; 287 + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); 288 + ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_START, 0); 289 + break; 290 + 291 + default: 292 + break; 293 + } 294 + 295 + if (ret) 296 + dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret); 297 + 298 + return ret; 299 + } 300 + 301 + static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check) 302 + { 303 + struct t7xx_pci_dev *t7xx_dev; 304 + struct md_pm_entity *entity; 305 + u32 prev_state; 306 + int ret = 0; 307 + 308 + t7xx_dev = pci_get_drvdata(pdev); 309 + if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) { 310 + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); 311 + return 0; 312 + } 313 + 314 + t7xx_pcie_mac_interrupts_en(t7xx_dev); 315 + prev_state = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_PM_RESUME_STATE); 316 + 317 + if (state_check) { 318 + /* For D3/L3 resume, the device could boot so quickly that the 319 + * initial value of the dummy register might be overwritten. 320 + * Identify new boots if the ATR source address register is not initialized. 321 + */ 322 + u32 atr_reg_val = ioread32(IREG_BASE(t7xx_dev) + 323 + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR); 324 + if (prev_state == PM_RESUME_REG_STATE_L3 || 325 + (prev_state == PM_RESUME_REG_STATE_INIT && 326 + atr_reg_val == ATR_SRC_ADDR_INVALID)) { 327 + ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP); 328 + if (ret) 329 + return ret; 330 + 331 + ret = t7xx_pcie_reinit(t7xx_dev, true); 332 + if (ret) 333 + return ret; 334 + 335 + t7xx_clear_rgu_irq(t7xx_dev); 336 + return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START); 337 + } 338 + 339 + if (prev_state == PM_RESUME_REG_STATE_EXP || 340 + prev_state == PM_RESUME_REG_STATE_L2_EXP) { 341 + if (prev_state == PM_RESUME_REG_STATE_L2_EXP) { 342 + ret = t7xx_pcie_reinit(t7xx_dev, false); 343 + if (ret) 344 + return ret; 345 + } 346 + 347 + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); 348 + t7xx_dev->rgu_pci_irq_en = true; 349 + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); 350 + 351 + t7xx_mhccif_mask_clr(t7xx_dev, 352 + D2H_INT_EXCEPTION_INIT | 353 + D2H_INT_EXCEPTION_INIT_DONE | 354 + D2H_INT_EXCEPTION_CLEARQ_DONE | 355 + D2H_INT_EXCEPTION_ALLQ_RESET | 356 + D2H_INT_PORT_ENUM); 357 + 358 + return ret; 359 + } 360 + 361 + if (prev_state == PM_RESUME_REG_STATE_L2) { 362 + ret = t7xx_pcie_reinit(t7xx_dev, false); 363 + if (ret) 364 + return ret; 365 + 366 + } else if (prev_state != PM_RESUME_REG_STATE_L1 && 367 + prev_state != PM_RESUME_REG_STATE_INIT) { 368 + ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP); 369 + if (ret) 370 + return ret; 371 + 372 + t7xx_clear_rgu_irq(t7xx_dev); 373 + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); 374 + return 0; 375 + } 376 + } 377 + 378 + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); 379 + t7xx_wait_pm_config(t7xx_dev); 380 + 381 + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { 382 + if (entity->resume_early) 383 + entity->resume_early(t7xx_dev, entity->entity_param); 384 + } 385 + 386 + ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ); 387 + if (ret) 388 + dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret); 389 + 390 + ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ_AP); 391 + if (ret) 392 + dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret); 393 + 394 + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { 395 + if (entity->resume) { 396 + ret = entity->resume(t7xx_dev, entity->entity_param); 397 + if (ret) 398 + dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n", 399 + entity->id, ret); 400 + } 401 + } 402 + 403 + t7xx_dev->rgu_pci_irq_en = true; 404 + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); 405 + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); 406 + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); 407 + 408 + return ret; 409 + } 410 + 411 + static int t7xx_pci_pm_resume_noirq(struct device *dev) 412 + { 413 + struct pci_dev *pdev = to_pci_dev(dev); 414 + struct t7xx_pci_dev *t7xx_dev; 415 + 416 + t7xx_dev = pci_get_drvdata(pdev); 417 + t7xx_pcie_mac_interrupts_dis(t7xx_dev); 418 + 419 + return 0; 420 + } 421 + 422 + static void t7xx_pci_shutdown(struct pci_dev *pdev) 423 + { 424 + __t7xx_pci_pm_suspend(pdev); 425 + } 426 + 427 + static int t7xx_pci_pm_suspend(struct device *dev) 428 + { 429 + return __t7xx_pci_pm_suspend(to_pci_dev(dev)); 430 + } 431 + 432 + static int t7xx_pci_pm_resume(struct device *dev) 433 + { 434 + return __t7xx_pci_pm_resume(to_pci_dev(dev), true); 435 + } 436 + 437 + static int t7xx_pci_pm_thaw(struct device *dev) 438 + { 439 + return __t7xx_pci_pm_resume(to_pci_dev(dev), false); 440 + } 441 + 442 + static const struct dev_pm_ops t7xx_pci_pm_ops = { 443 + .suspend = t7xx_pci_pm_suspend, 444 + .resume = t7xx_pci_pm_resume, 445 + .resume_noirq = t7xx_pci_pm_resume_noirq, 446 + .freeze = t7xx_pci_pm_suspend, 447 + .thaw = t7xx_pci_pm_thaw, 448 + .poweroff = t7xx_pci_pm_suspend, 449 + .restore = t7xx_pci_pm_resume, 450 + .restore_noirq = t7xx_pci_pm_resume_noirq, 451 + }; 37 452 38 453 static int t7xx_request_irq(struct pci_dev *pdev) 39 454 { ··· 580 165 IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE]; 581 166 t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE]; 582 167 168 + ret = t7xx_pci_pm_init(t7xx_dev); 169 + if (ret) 170 + return ret; 171 + 583 172 t7xx_pcie_mac_atr_init(t7xx_dev); 584 173 t7xx_pci_infracfg_ao_calc(t7xx_dev); 585 174 t7xx_mhccif_init(t7xx_dev); ··· 635 216 .id_table = t7xx_pci_table, 636 217 .probe = t7xx_pci_probe, 637 218 .remove = t7xx_pci_remove, 219 + .driver.pm = &t7xx_pci_pm_ops, 220 + .shutdown = t7xx_pci_shutdown, 638 221 }; 639 222 640 223 module_pci_driver(t7xx_pci_driver);
+46
drivers/net/wwan/t7xx/t7xx_pci.h
··· 17 17 #ifndef __T7XX_PCI_H__ 18 18 #define __T7XX_PCI_H__ 19 19 20 + #include <linux/completion.h> 20 21 #include <linux/irqreturn.h> 22 + #include <linux/mutex.h> 21 23 #include <linux/pci.h> 22 24 #include <linux/types.h> 23 25 ··· 51 49 * @md: modem interface 52 50 * @ccmni_ctlb: context structure used to control the network data path 53 51 * @rgu_pci_irq_en: RGU callback ISR registered and active 52 + * @md_pm_entities: list of pm entities 53 + * @md_pm_entity_mtx: protects md_pm_entities list 54 + * @pm_sr_ack: ack from the device when went to sleep or woke up 55 + * @md_pm_state: state for resume/suspend 54 56 */ 55 57 struct t7xx_pci_dev { 56 58 t7xx_intr_callback intr_handler[EXT_INT_NUM]; ··· 65 59 struct t7xx_modem *md; 66 60 struct t7xx_ccmni_ctrl *ccmni_ctlb; 67 61 bool rgu_pci_irq_en; 62 + 63 + /* Low Power Items */ 64 + struct list_head md_pm_entities; 65 + struct mutex md_pm_entity_mtx; /* Protects MD PM entities list */ 66 + struct completion pm_sr_ack; 67 + atomic_t md_pm_state; 68 68 }; 69 + 70 + enum t7xx_pm_id { 71 + PM_ENTITY_ID_CTRL1, 72 + PM_ENTITY_ID_CTRL2, 73 + PM_ENTITY_ID_DATA, 74 + PM_ENTITY_ID_INVALID 75 + }; 76 + 77 + /* struct md_pm_entity - device power management entity 78 + * @entity: list of PM Entities 79 + * @suspend: callback invoked before sending D3 request to device 80 + * @suspend_late: callback invoked after getting D3 ACK from device 81 + * @resume_early: callback invoked before sending the resume request to device 82 + * @resume: callback invoked after getting resume ACK from device 83 + * @id: unique PM entity identifier 84 + * @entity_param: parameter passed to the registered callbacks 85 + * 86 + * This structure is used to indicate PM operations required by internal 87 + * HW modules such as CLDMA and DPMA. 88 + */ 89 + struct md_pm_entity { 90 + struct list_head entity; 91 + int (*suspend)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); 92 + void (*suspend_late)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); 93 + void (*resume_early)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); 94 + int (*resume)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); 95 + enum t7xx_pm_id id; 96 + void *entity_param; 97 + }; 98 + 99 + int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity); 100 + int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity); 101 + void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev); 102 + void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev); 69 103 70 104 #endif /* __T7XX_PCI_H__ */
+2
drivers/net/wwan/t7xx/t7xx_state_monitor.c
··· 188 188 case EXCEPTION_EVENT: 189 189 dev_err(dev, "Exception event\n"); 190 190 t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION); 191 + t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev); 191 192 t7xx_md_exception_handshake(ctl->md); 192 193 193 194 fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX, ··· 301 300 return -ETIMEDOUT; 302 301 } 303 302 303 + t7xx_pci_pm_init_late(md->t7xx_dev); 304 304 fsm_routine_ready(ctl); 305 305 return 0; 306 306 }