Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: wwan: t7xx: Device deep sleep lock/unlock

Introduce the mechanism to lock/unlock the device 'deep sleep' mode.
When the PCIe link state is L1.2 or L2, the host side still can keep
the device is in D0 state from the host side point of view. At the same
time, if the device's 'deep sleep' mode is unlocked, the device will
go to 'deep sleep' while it is still in D0 state on the host side.

Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Co-developed-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Haijun Liu and committed by
David S. Miller
de49ea38 d10b3a69

+158 -15
+12
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
··· 934 934 if (ret < 0 && ret != -EACCES) 935 935 return ret; 936 936 937 + t7xx_pci_disable_sleep(md_ctrl->t7xx_dev); 937 938 queue = &md_ctrl->txq[qno]; 938 939 939 940 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); ··· 956 955 queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry); 957 956 spin_unlock_irqrestore(&queue->ring_lock, flags); 958 957 958 + if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) { 959 + ret = -ETIMEDOUT; 960 + break; 961 + } 962 + 959 963 /* Protect the access to the modem for queues operations (resume/start) 960 964 * which access shared locations by all the queues. 961 965 * cldma_lock is independent of ring_lock which is per queue. ··· 973 967 } 974 968 spin_unlock_irqrestore(&queue->ring_lock, flags); 975 969 970 + if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) { 971 + ret = -ETIMEDOUT; 972 + break; 973 + } 974 + 976 975 if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) { 977 976 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 978 977 t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX); ··· 988 977 } while (!ret); 989 978 990 979 allow_sleep: 980 + t7xx_pci_enable_sleep(md_ctrl->t7xx_dev); 991 981 pm_runtime_mark_last_busy(md_ctrl->dev); 992 982 pm_runtime_put_autosuspend(md_ctrl->dev); 993 983 return ret;
+11 -3
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
··· 927 927 if (ret < 0 && ret != -EACCES) 928 928 return; 929 929 930 - t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq); 930 + t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); 931 + if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) 932 + t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq); 931 933 934 + t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); 932 935 pm_runtime_mark_last_busy(dpmaif_ctrl->dev); 933 936 pm_runtime_put_autosuspend(dpmaif_ctrl->dev); 934 937 atomic_set(&rxq->rx_processing, 0); ··· 1141 1138 if (ret < 0 && ret != -EACCES) 1142 1139 return; 1143 1140 1141 + t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); 1142 + 1144 1143 /* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */ 1145 1144 rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT]; 1146 - t7xx_dpmaif_bat_release_and_add(rxq); 1147 - t7xx_dpmaif_frag_bat_release_and_add(rxq); 1145 + if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) { 1146 + t7xx_dpmaif_bat_release_and_add(rxq); 1147 + t7xx_dpmaif_frag_bat_release_and_add(rxq); 1148 + } 1148 1149 1150 + t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); 1149 1151 pm_runtime_mark_last_busy(dpmaif_ctrl->dev); 1150 1152 pm_runtime_put_autosuspend(dpmaif_ctrl->dev); 1151 1153 }
+29 -12
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
··· 166 166 if (ret < 0 && ret != -EACCES) 167 167 return; 168 168 169 - hw_info = &dpmaif_ctrl->hw_info; 170 - ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt); 171 - if (ret == -EAGAIN || 172 - (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) && 173 - t7xx_dpmaif_drb_ring_not_empty(txq))) { 174 - queue_work(dpmaif_ctrl->txq[txq->index].worker, 175 - &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work); 176 - /* Give the device time to enter the low power state */ 177 - t7xx_dpmaif_clr_ip_busy_sts(hw_info); 178 - } else { 179 - t7xx_dpmaif_clr_ip_busy_sts(hw_info); 180 - t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index); 169 + /* The device may be in low power state. Disable sleep if needed */ 170 + t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); 171 + if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) { 172 + hw_info = &dpmaif_ctrl->hw_info; 173 + ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt); 174 + if (ret == -EAGAIN || 175 + (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) && 176 + t7xx_dpmaif_drb_ring_not_empty(txq))) { 177 + queue_work(dpmaif_ctrl->txq[txq->index].worker, 178 + &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work); 179 + /* Give the device time to enter the low power state */ 180 + t7xx_dpmaif_clr_ip_busy_sts(hw_info); 181 + } else { 182 + t7xx_dpmaif_clr_ip_busy_sts(hw_info); 183 + t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index); 184 + } 181 185 } 182 186 187 + t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); 183 188 pm_runtime_mark_last_busy(dpmaif_ctrl->dev); 184 189 pm_runtime_put_autosuspend(dpmaif_ctrl->dev); 185 190 } ··· 410 405 411 406 static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl) 412 407 { 408 + bool wait_disable_sleep = true; 409 + 413 410 do { 414 411 struct dpmaif_tx_queue *txq; 415 412 int drb_send_cnt; ··· 425 418 usleep_range(10, 20); 426 419 cond_resched(); 427 420 continue; 421 + } 422 + 423 + /* Wait for the PCIe resource to unlock */ 424 + if (wait_disable_sleep) { 425 + if (!t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) 426 + return; 427 + 428 + wait_disable_sleep = false; 428 429 } 429 430 430 431 t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index, ··· 465 450 if (ret < 0 && ret != -EACCES) 466 451 return ret; 467 452 453 + t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); 468 454 t7xx_do_tx_hw_push(dpmaif_ctrl); 455 + t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); 469 456 pm_runtime_mark_last_busy(dpmaif_ctrl->dev); 470 457 pm_runtime_put_autosuspend(dpmaif_ctrl->dev); 471 458 }
+3
drivers/net/wwan/t7xx/t7xx_mhccif.c
··· 59 59 60 60 t7xx_mhccif_clear_interrupts(t7xx_dev, int_status); 61 61 62 + if (int_status & D2H_INT_DS_LOCK_ACK) 63 + complete_all(&t7xx_dev->sleep_lock_acquire); 64 + 62 65 if (int_status & D2H_INT_SR_ACK) 63 66 complete(&t7xx_dev->pm_sr_ack); 64 67
+93
drivers/net/wwan/t7xx/t7xx_pci.c
··· 33 33 #include <linux/pm.h> 34 34 #include <linux/pm_runtime.h> 35 35 #include <linux/pm_wakeup.h> 36 + #include <linux/spinlock.h> 36 37 37 38 #include "t7xx_mhccif.h" 38 39 #include "t7xx_modem_ops.h" ··· 45 44 #define T7XX_PCI_IREG_BASE 0 46 45 #define T7XX_PCI_EREG_BASE 2 47 46 47 + #define PM_SLEEP_DIS_TIMEOUT_MS 20 48 48 #define PM_ACK_TIMEOUT_MS 1500 49 49 #define PM_AUTOSUSPEND_MS 20000 50 50 #define PM_RESOURCE_POLL_TIMEOUT_US 10000 ··· 57 55 MTK_PM_SUSPENDED, 58 56 MTK_PM_RESUMED, 59 57 }; 58 + 59 + static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable) 60 + { 61 + void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL; 62 + u32 value; 63 + 64 + value = ioread32(ctrl_reg); 65 + 66 + if (enable) 67 + value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS; 68 + else 69 + value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS; 70 + 71 + iowrite32(value, ctrl_reg); 72 + } 60 73 61 74 static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev) 62 75 { ··· 93 76 94 77 INIT_LIST_HEAD(&t7xx_dev->md_pm_entities); 95 78 mutex_init(&t7xx_dev->md_pm_entity_mtx); 79 + spin_lock_init(&t7xx_dev->md_pm_lock); 80 + init_completion(&t7xx_dev->sleep_lock_acquire); 96 81 init_completion(&t7xx_dev->pm_sr_ack); 97 82 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); 98 83 ··· 113 94 { 114 95 /* Enable the PCIe resource lock only after MD deep sleep is done */ 115 96 t7xx_mhccif_mask_clr(t7xx_dev, 97 + D2H_INT_DS_LOCK_ACK | 116 98 D2H_INT_SUSPEND_ACK | 117 99 D2H_INT_RESUME_ACK | 118 100 D2H_INT_SUSPEND_ACK_AP | ··· 177 157 mutex_unlock(&t7xx_dev->md_pm_entity_mtx); 178 158 179 159 return -ENXIO; 160 + } 161 + 162 + int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev) 163 + { 164 + struct device *dev = &t7xx_dev->pdev->dev; 165 + int ret; 166 + 167 + ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire, 168 + msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS)); 169 + if (!ret) 170 + dev_err_ratelimited(dev, "Resource wait complete timed out\n"); 171 + 172 + return ret; 173 + } 174 + 175 + /** 176 + * t7xx_pci_disable_sleep() - Disable deep sleep capability. 177 + * @t7xx_dev: MTK device. 178 + * 179 + * Lock the deep sleep capability, note that the device can still go into deep sleep 180 + * state while device is in D0 state, from the host's point-of-view. 181 + * 182 + * If device is in deep sleep state, wake up the device and disable deep sleep capability. 183 + */ 184 + void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev) 185 + { 186 + unsigned long flags; 187 + 188 + spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); 189 + t7xx_dev->sleep_disable_count++; 190 + if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) 191 + goto unlock_and_complete; 192 + 193 + if (t7xx_dev->sleep_disable_count == 1) { 194 + u32 status; 195 + 196 + reinit_completion(&t7xx_dev->sleep_lock_acquire); 197 + t7xx_dev_set_sleep_capability(t7xx_dev, false); 198 + 199 + status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS); 200 + if (status & T7XX_PCIE_RESOURCE_STS_MSK) 201 + goto unlock_and_complete; 202 + 203 + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK); 204 + } 205 + spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); 206 + return; 207 + 208 + unlock_and_complete: 209 + spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); 210 + complete_all(&t7xx_dev->sleep_lock_acquire); 211 + } 212 + 213 + /** 214 + * t7xx_pci_enable_sleep() - Enable deep sleep capability. 215 + * @t7xx_dev: MTK device. 216 + * 217 + * After enabling deep sleep, device can enter into deep sleep state. 218 + */ 219 + void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev) 220 + { 221 + unsigned long flags; 222 + 223 + spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); 224 + t7xx_dev->sleep_disable_count--; 225 + if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) 226 + goto unlock; 227 + 228 + if (t7xx_dev->sleep_disable_count == 0) 229 + t7xx_dev_set_sleep_capability(t7xx_dev, true); 230 + 231 + unlock: 232 + spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); 180 233 } 181 234 182 235 static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request)
+10
drivers/net/wwan/t7xx/t7xx_pci.h
··· 21 21 #include <linux/irqreturn.h> 22 22 #include <linux/mutex.h> 23 23 #include <linux/pci.h> 24 + #include <linux/spinlock.h> 24 25 #include <linux/types.h> 25 26 26 27 #include "t7xx_reg.h" ··· 56 55 * @md_pm_entity_mtx: protects md_pm_entities list 57 56 * @pm_sr_ack: ack from the device when went to sleep or woke up 58 57 * @md_pm_state: state for resume/suspend 58 + * @md_pm_lock: protects PCIe sleep lock 59 + * @sleep_disable_count: PCIe L1.2 lock counter 60 + * @sleep_lock_acquire: indicates that sleep has been disabled 59 61 */ 60 62 struct t7xx_pci_dev { 61 63 t7xx_intr_callback intr_handler[EXT_INT_NUM]; ··· 75 71 struct mutex md_pm_entity_mtx; /* Protects MD PM entities list */ 76 72 struct completion pm_sr_ack; 77 73 atomic_t md_pm_state; 74 + spinlock_t md_pm_lock; /* Protects PCI resource lock */ 75 + unsigned int sleep_disable_count; 76 + struct completion sleep_lock_acquire; 78 77 }; 79 78 80 79 enum t7xx_pm_id { ··· 109 102 void *entity_param; 110 103 }; 111 104 105 + void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev); 106 + void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev); 107 + int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev); 112 108 int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity); 113 109 int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity); 114 110 void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev);