Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: wwan: t7xx: Add core components

Registers the t7xx device driver with the kernel. Setup all the core
components: PCIe layer, Modem Host Cross Core Interface (MHCCIF),
modem control operations, modem state machine, and build
infrastructure.

* PCIe layer code implements driver probe and removal.
* MHCCIF provides interrupt channels to communicate events
such as handshake, PM and port enumeration.
* Modem control implements the entry point for modem init,
reset and exit.
* The modem status monitor is a state machine used by modem control
to complete initialization and stop. It is used also to propagate
exception events reported by other components.

Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Co-developed-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: Loic Poulain <loic.poulain@linaro.org>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Haijun Liu and committed by
David S. Miller
13e920d9 39d43904

+2108
+14
drivers/net/wwan/Kconfig
··· 105 105 106 106 If unsure, say N. 107 107 108 + config MTK_T7XX 109 + tristate "MediaTek PCIe 5G WWAN modem T7xx device" 110 + depends on PCI 111 + help 112 + Enables MediaTek PCIe based 5G WWAN modem (T7xx series) device. 113 + Adapts WWAN framework and provides network interface like wwan0 114 + and tty interfaces like wwan0at0 (AT protocol), wwan0mbim0 115 + (MBIM protocol), etc. 116 + 117 + To compile this driver as a module, choose M here: the module will be 118 + called mtk_t7xx. 119 + 120 + If unsure, say N. 121 + 108 122 endif # WWAN 109 123 110 124 endmenu
+1
drivers/net/wwan/Makefile
··· 13 13 obj-$(CONFIG_QCOM_BAM_DMUX) += qcom_bam_dmux.o 14 14 obj-$(CONFIG_RPMSG_WWAN_CTRL) += rpmsg_wwan_ctrl.o 15 15 obj-$(CONFIG_IOSM) += iosm/ 16 + obj-$(CONFIG_MTK_T7XX) += t7xx/
+12
drivers/net/wwan/t7xx/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + 3 + ccflags-y += -Werror 4 + 5 + obj-${CONFIG_MTK_T7XX} := mtk_t7xx.o 6 + mtk_t7xx-y:= t7xx_pci.o \ 7 + t7xx_pcie_mac.o \ 8 + t7xx_mhccif.o \ 9 + t7xx_state_monitor.o \ 10 + t7xx_modem_ops.o \ 11 + t7xx_cldma.o \ 12 + t7xx_hif_cldma.o \
+102
drivers/net/wwan/t7xx/t7xx_mhccif.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2021, MediaTek Inc. 4 + * Copyright (c) 2021-2022, Intel Corporation. 5 + * 6 + * Authors: 7 + * Haijun Liu <haijun.liu@mediatek.com> 8 + * Sreehari Kancharla <sreehari.kancharla@intel.com> 9 + * 10 + * Contributors: 11 + * Amir Hanania <amir.hanania@intel.com> 12 + * Ricardo Martinez <ricardo.martinez@linux.intel.com> 13 + */ 14 + 15 + #include <linux/bits.h> 16 + #include <linux/completion.h> 17 + #include <linux/dev_printk.h> 18 + #include <linux/io.h> 19 + #include <linux/irqreturn.h> 20 + 21 + #include "t7xx_mhccif.h" 22 + #include "t7xx_modem_ops.h" 23 + #include "t7xx_pci.h" 24 + #include "t7xx_pcie_mac.h" 25 + #include "t7xx_reg.h" 26 + 27 + static void t7xx_mhccif_clear_interrupts(struct t7xx_pci_dev *t7xx_dev, u32 mask) 28 + { 29 + void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base; 30 + 31 + /* Clear level 2 interrupt */ 32 + iowrite32(mask, mhccif_pbase + REG_EP2RC_SW_INT_ACK); 33 + /* Ensure write is complete */ 34 + t7xx_mhccif_read_sw_int_sts(t7xx_dev); 35 + /* Clear level 1 interrupt */ 36 + t7xx_pcie_mac_clear_int_status(t7xx_dev, MHCCIF_INT); 37 + } 38 + 39 + static irqreturn_t t7xx_mhccif_isr_thread(int irq, void *data) 40 + { 41 + struct t7xx_pci_dev *t7xx_dev = data; 42 + u32 int_status, val; 43 + 44 + val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1); 45 + iowrite32(val, IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); 46 + 47 + int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev); 48 + if (int_status & D2H_SW_INT_MASK) { 49 + int ret = t7xx_pci_mhccif_isr(t7xx_dev); 50 + 51 + if (ret) 52 + dev_err(&t7xx_dev->pdev->dev, "PCI MHCCIF ISR failure: %d", ret); 53 + } 54 + 55 + t7xx_mhccif_clear_interrupts(t7xx_dev, int_status); 56 + t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); 57 + return IRQ_HANDLED; 58 + } 59 + 60 + u32 t7xx_mhccif_read_sw_int_sts(struct t7xx_pci_dev *t7xx_dev) 61 + { 62 + return ioread32(t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_STS); 63 + } 64 + 65 + void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val) 66 + { 67 + iowrite32(val, t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK_SET); 68 + } 69 + 70 + void t7xx_mhccif_mask_clr(struct t7xx_pci_dev *t7xx_dev, u32 val) 71 + { 72 + iowrite32(val, t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK_CLR); 73 + } 74 + 75 + u32 t7xx_mhccif_mask_get(struct t7xx_pci_dev *t7xx_dev) 76 + { 77 + return ioread32(t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK); 78 + } 79 + 80 + static irqreturn_t t7xx_mhccif_isr_handler(int irq, void *data) 81 + { 82 + return IRQ_WAKE_THREAD; 83 + } 84 + 85 + void t7xx_mhccif_init(struct t7xx_pci_dev *t7xx_dev) 86 + { 87 + t7xx_dev->base_addr.mhccif_rc_base = t7xx_dev->base_addr.pcie_ext_reg_base + 88 + MHCCIF_RC_DEV_BASE - 89 + t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; 90 + 91 + t7xx_dev->intr_handler[MHCCIF_INT] = t7xx_mhccif_isr_handler; 92 + t7xx_dev->intr_thread[MHCCIF_INT] = t7xx_mhccif_isr_thread; 93 + t7xx_dev->callback_param[MHCCIF_INT] = t7xx_dev; 94 + } 95 + 96 + void t7xx_mhccif_h2d_swint_trigger(struct t7xx_pci_dev *t7xx_dev, u32 channel) 97 + { 98 + void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base; 99 + 100 + iowrite32(BIT(channel), mhccif_pbase + REG_RC2EP_SW_BSY); 101 + iowrite32(channel, mhccif_pbase + REG_RC2EP_SW_TCHNUM); 102 + }
+37
drivers/net/wwan/t7xx/t7xx_mhccif.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only 2 + * 3 + * Copyright (c) 2021, MediaTek Inc. 4 + * Copyright (c) 2021-2022, Intel Corporation. 5 + * 6 + * Authors: 7 + * Haijun Liu <haijun.liu@mediatek.com> 8 + * Sreehari Kancharla <sreehari.kancharla@intel.com> 9 + * 10 + * Contributors: 11 + * Amir Hanania <amir.hanania@intel.com> 12 + * Ricardo Martinez <ricardo.martinez@linux.intel.com> 13 + */ 14 + 15 + #ifndef __T7XX_MHCCIF_H__ 16 + #define __T7XX_MHCCIF_H__ 17 + 18 + #include <linux/types.h> 19 + 20 + #include "t7xx_pci.h" 21 + #include "t7xx_reg.h" 22 + 23 + #define D2H_SW_INT_MASK (D2H_INT_EXCEPTION_INIT | \ 24 + D2H_INT_EXCEPTION_INIT_DONE | \ 25 + D2H_INT_EXCEPTION_CLEARQ_DONE | \ 26 + D2H_INT_EXCEPTION_ALLQ_RESET | \ 27 + D2H_INT_PORT_ENUM | \ 28 + D2H_INT_ASYNC_MD_HK) 29 + 30 + void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val); 31 + void t7xx_mhccif_mask_clr(struct t7xx_pci_dev *t7xx_dev, u32 val); 32 + u32 t7xx_mhccif_mask_get(struct t7xx_pci_dev *t7xx_dev); 33 + void t7xx_mhccif_init(struct t7xx_pci_dev *t7xx_dev); 34 + u32 t7xx_mhccif_read_sw_int_sts(struct t7xx_pci_dev *t7xx_dev); 35 + void t7xx_mhccif_h2d_swint_trigger(struct t7xx_pci_dev *t7xx_dev, u32 channel); 36 + 37 + #endif /*__T7XX_MHCCIF_H__ */
+498
drivers/net/wwan/t7xx/t7xx_modem_ops.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2021, MediaTek Inc. 4 + * Copyright (c) 2021-2022, Intel Corporation. 5 + * 6 + * Authors: 7 + * Haijun Liu <haijun.liu@mediatek.com> 8 + * Eliot Lee <eliot.lee@intel.com> 9 + * Moises Veleta <moises.veleta@intel.com> 10 + * Ricardo Martinez <ricardo.martinez@linux.intel.com> 11 + * 12 + * Contributors: 13 + * Amir Hanania <amir.hanania@intel.com> 14 + * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com> 15 + * Sreehari Kancharla <sreehari.kancharla@intel.com> 16 + */ 17 + 18 + #include <linux/acpi.h> 19 + #include <linux/device.h> 20 + #include <linux/delay.h> 21 + #include <linux/gfp.h> 22 + #include <linux/io.h> 23 + #include <linux/irqreturn.h> 24 + #include <linux/kthread.h> 25 + #include <linux/skbuff.h> 26 + #include <linux/spinlock.h> 27 + #include <linux/string.h> 28 + #include <linux/types.h> 29 + #include <linux/workqueue.h> 30 + 31 + #include "t7xx_cldma.h" 32 + #include "t7xx_hif_cldma.h" 33 + #include "t7xx_mhccif.h" 34 + #include "t7xx_modem_ops.h" 35 + #include "t7xx_pci.h" 36 + #include "t7xx_pcie_mac.h" 37 + #include "t7xx_reg.h" 38 + #include "t7xx_state_monitor.h" 39 + 40 + #define RGU_RESET_DELAY_MS 10 41 + #define PORT_RESET_DELAY_MS 2000 42 + #define EX_HS_TIMEOUT_MS 5000 43 + #define EX_HS_POLL_DELAY_MS 10 44 + 45 + static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev) 46 + { 47 + return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK; 48 + } 49 + 50 + /** 51 + * t7xx_pci_mhccif_isr() - Process MHCCIF interrupts. 52 + * @t7xx_dev: MTK device. 53 + * 54 + * Check the interrupt status and queue commands accordingly. 55 + * 56 + * Returns: 57 + ** 0 - Success. 58 + ** -EINVAL - Failure to get FSM control. 59 + */ 60 + int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev) 61 + { 62 + struct t7xx_modem *md = t7xx_dev->md; 63 + struct t7xx_fsm_ctl *ctl; 64 + unsigned int int_sta; 65 + int ret = 0; 66 + u32 mask; 67 + 68 + ctl = md->fsm_ctl; 69 + if (!ctl) { 70 + dev_err_ratelimited(&t7xx_dev->pdev->dev, 71 + "MHCCIF interrupt received before initializing MD monitor\n"); 72 + return -EINVAL; 73 + } 74 + 75 + spin_lock_bh(&md->exp_lock); 76 + int_sta = t7xx_get_interrupt_status(t7xx_dev); 77 + md->exp_id |= int_sta; 78 + if (md->exp_id & D2H_INT_EXCEPTION_INIT) { 79 + if (ctl->md_state == MD_STATE_INVALID || 80 + ctl->md_state == MD_STATE_WAITING_FOR_HS1 || 81 + ctl->md_state == MD_STATE_WAITING_FOR_HS2 || 82 + ctl->md_state == MD_STATE_READY) { 83 + md->exp_id &= ~D2H_INT_EXCEPTION_INIT; 84 + ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_CCIF_EX); 85 + } 86 + } else if (md->exp_id & D2H_INT_PORT_ENUM) { 87 + md->exp_id &= ~D2H_INT_PORT_ENUM; 88 + 89 + if (ctl->curr_state == FSM_STATE_INIT || ctl->curr_state == FSM_STATE_PRE_START || 90 + ctl->curr_state == FSM_STATE_STOPPED) 91 + ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_PORT_ENUM); 92 + } else if (ctl->md_state == MD_STATE_WAITING_FOR_HS1) { 93 + mask = t7xx_mhccif_mask_get(t7xx_dev); 94 + if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) { 95 + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; 96 + queue_work(md->handshake_wq, &md->handshake_work); 97 + } 98 + } 99 + spin_unlock_bh(&md->exp_lock); 100 + 101 + return ret; 102 + } 103 + 104 + static void t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev *t7xx_dev) 105 + { 106 + struct t7xx_addr_base *pbase_addr = &t7xx_dev->base_addr; 107 + void __iomem *reset_pcie_reg; 108 + u32 val; 109 + 110 + reset_pcie_reg = pbase_addr->pcie_ext_reg_base + TOPRGU_CH_PCIE_IRQ_STA - 111 + pbase_addr->pcie_dev_reg_trsl_addr; 112 + val = ioread32(reset_pcie_reg); 113 + iowrite32(val, reset_pcie_reg); 114 + } 115 + 116 + void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev) 117 + { 118 + /* Clear L2 */ 119 + t7xx_clr_device_irq_via_pcie(t7xx_dev); 120 + /* Clear L1 */ 121 + t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT); 122 + } 123 + 124 + static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name) 125 + { 126 + #ifdef CONFIG_ACPI 127 + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 128 + struct device *dev = &t7xx_dev->pdev->dev; 129 + acpi_status acpi_ret; 130 + acpi_handle handle; 131 + 132 + handle = ACPI_HANDLE(dev); 133 + if (!handle) { 134 + dev_err(dev, "ACPI handle not found\n"); 135 + return -EFAULT; 136 + } 137 + 138 + if (!acpi_has_method(handle, fn_name)) { 139 + dev_err(dev, "%s method not found\n", fn_name); 140 + return -EFAULT; 141 + } 142 + 143 + acpi_ret = acpi_evaluate_object(handle, fn_name, NULL, &buffer); 144 + if (ACPI_FAILURE(acpi_ret)) { 145 + dev_err(dev, "%s method fail: %s\n", fn_name, acpi_format_exception(acpi_ret)); 146 + return -EFAULT; 147 + } 148 + 149 + #endif 150 + return 0; 151 + } 152 + 153 + int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev) 154 + { 155 + return t7xx_acpi_reset(t7xx_dev, "_RST"); 156 + } 157 + 158 + static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev) 159 + { 160 + u32 val; 161 + 162 + val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); 163 + if (val & MISC_RESET_TYPE_PLDR) 164 + t7xx_acpi_reset(t7xx_dev, "MRST._RST"); 165 + else if (val & MISC_RESET_TYPE_FLDR) 166 + t7xx_acpi_fldr_func(t7xx_dev); 167 + } 168 + 169 + static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data) 170 + { 171 + struct t7xx_pci_dev *t7xx_dev = data; 172 + 173 + msleep(RGU_RESET_DELAY_MS); 174 + t7xx_reset_device_via_pmic(t7xx_dev); 175 + return IRQ_HANDLED; 176 + } 177 + 178 + static irqreturn_t t7xx_rgu_isr_handler(int irq, void *data) 179 + { 180 + struct t7xx_pci_dev *t7xx_dev = data; 181 + struct t7xx_modem *modem; 182 + 183 + t7xx_clear_rgu_irq(t7xx_dev); 184 + if (!t7xx_dev->rgu_pci_irq_en) 185 + return IRQ_HANDLED; 186 + 187 + modem = t7xx_dev->md; 188 + modem->rgu_irq_asserted = true; 189 + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); 190 + return IRQ_WAKE_THREAD; 191 + } 192 + 193 + static void t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev *t7xx_dev) 194 + { 195 + /* Registers RGU callback ISR with PCIe driver */ 196 + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); 197 + t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT); 198 + 199 + t7xx_dev->intr_handler[SAP_RGU_INT] = t7xx_rgu_isr_handler; 200 + t7xx_dev->intr_thread[SAP_RGU_INT] = t7xx_rgu_isr_thread; 201 + t7xx_dev->callback_param[SAP_RGU_INT] = t7xx_dev; 202 + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); 203 + } 204 + 205 + /** 206 + * t7xx_cldma_exception() - CLDMA exception handler. 207 + * @md_ctrl: modem control struct. 208 + * @stage: exception stage. 209 + * 210 + * Part of the modem exception recovery. 211 + * Stages are one after the other as describe below: 212 + * HIF_EX_INIT: Disable and clear TXQ. 213 + * HIF_EX_CLEARQ_DONE: Disable RX, flush TX/RX workqueues and clear RX. 214 + * HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart. 215 + */ 216 + 217 + /* Modem Exception Handshake Flow 218 + * 219 + * Modem HW Exception interrupt received 220 + * (MD_IRQ_CCIF_EX) 221 + * | 222 + * +---------v--------+ 223 + * | HIF_EX_INIT | : Disable and clear TXQ 224 + * +------------------+ 225 + * | 226 + * +---------v--------+ 227 + * | HIF_EX_INIT_DONE | : Wait for the init to be done 228 + * +------------------+ 229 + * | 230 + * +---------v--------+ 231 + * |HIF_EX_CLEARQ_DONE| : Disable and clear RXQ 232 + * +------------------+ : Flush TX/RX workqueues 233 + * | 234 + * +---------v--------+ 235 + * |HIF_EX_ALLQ_RESET | : Restart HW and CLDMA 236 + * +------------------+ 237 + */ 238 + static void t7xx_cldma_exception(struct cldma_ctrl *md_ctrl, enum hif_ex_stage stage) 239 + { 240 + switch (stage) { 241 + case HIF_EX_INIT: 242 + t7xx_cldma_stop_all_qs(md_ctrl, MTK_TX); 243 + t7xx_cldma_clear_all_qs(md_ctrl, MTK_TX); 244 + break; 245 + 246 + case HIF_EX_CLEARQ_DONE: 247 + /* We do not want to get CLDMA IRQ when MD is 248 + * resetting CLDMA after it got clearq_ack. 249 + */ 250 + t7xx_cldma_stop_all_qs(md_ctrl, MTK_RX); 251 + t7xx_cldma_stop(md_ctrl); 252 + 253 + if (md_ctrl->hif_id == CLDMA_ID_MD) 254 + t7xx_cldma_hw_reset(md_ctrl->t7xx_dev->base_addr.infracfg_ao_base); 255 + 256 + t7xx_cldma_clear_all_qs(md_ctrl, MTK_RX); 257 + break; 258 + 259 + case HIF_EX_ALLQ_RESET: 260 + t7xx_cldma_hw_init(&md_ctrl->hw_info); 261 + t7xx_cldma_start(md_ctrl); 262 + break; 263 + 264 + default: 265 + break; 266 + } 267 + } 268 + 269 + static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage) 270 + { 271 + struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev; 272 + 273 + if (stage == HIF_EX_CLEARQ_DONE) { 274 + /* Give DHL time to flush data */ 275 + msleep(PORT_RESET_DELAY_MS); 276 + } 277 + 278 + t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage); 279 + 280 + if (stage == HIF_EX_INIT) 281 + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK); 282 + else if (stage == HIF_EX_CLEARQ_DONE) 283 + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_CLEARQ_ACK); 284 + } 285 + 286 + static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id) 287 + { 288 + unsigned int waited_time_ms = 0; 289 + 290 + do { 291 + if (md->exp_id & event_id) 292 + return 0; 293 + 294 + waited_time_ms += EX_HS_POLL_DELAY_MS; 295 + msleep(EX_HS_POLL_DELAY_MS); 296 + } while (waited_time_ms < EX_HS_TIMEOUT_MS); 297 + 298 + return -EFAULT; 299 + } 300 + 301 + static void t7xx_md_sys_sw_init(struct t7xx_pci_dev *t7xx_dev) 302 + { 303 + /* Register the MHCCIF ISR for MD exception, port enum and 304 + * async handshake notifications. 305 + */ 306 + t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK); 307 + t7xx_mhccif_mask_clr(t7xx_dev, D2H_INT_PORT_ENUM); 308 + 309 + /* Register RGU IRQ handler for sAP exception notification */ 310 + t7xx_dev->rgu_pci_irq_en = true; 311 + t7xx_pcie_register_rgu_isr(t7xx_dev); 312 + } 313 + 314 + static void t7xx_md_hk_wq(struct work_struct *work) 315 + { 316 + struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work); 317 + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; 318 + 319 + t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]); 320 + t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]); 321 + t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2); 322 + md->core_md.ready = true; 323 + wake_up(&ctl->async_hk_wq); 324 + } 325 + 326 + void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id) 327 + { 328 + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; 329 + void __iomem *mhccif_base; 330 + unsigned int int_sta; 331 + unsigned long flags; 332 + 333 + switch (evt_id) { 334 + case FSM_PRE_START: 335 + t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM); 336 + break; 337 + 338 + case FSM_START: 339 + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM); 340 + 341 + spin_lock_irqsave(&md->exp_lock, flags); 342 + int_sta = t7xx_get_interrupt_status(md->t7xx_dev); 343 + md->exp_id |= int_sta; 344 + if (md->exp_id & D2H_INT_EXCEPTION_INIT) { 345 + ctl->exp_flg = true; 346 + md->exp_id &= ~D2H_INT_EXCEPTION_INIT; 347 + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; 348 + } else if (ctl->exp_flg) { 349 + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; 350 + } else if (md->exp_id & D2H_INT_ASYNC_MD_HK) { 351 + queue_work(md->handshake_wq, &md->handshake_work); 352 + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; 353 + mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base; 354 + iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK); 355 + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); 356 + } else { 357 + t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); 358 + } 359 + spin_unlock_irqrestore(&md->exp_lock, flags); 360 + 361 + t7xx_mhccif_mask_clr(md->t7xx_dev, 362 + D2H_INT_EXCEPTION_INIT | 363 + D2H_INT_EXCEPTION_INIT_DONE | 364 + D2H_INT_EXCEPTION_CLEARQ_DONE | 365 + D2H_INT_EXCEPTION_ALLQ_RESET); 366 + break; 367 + 368 + case FSM_READY: 369 + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); 370 + break; 371 + 372 + default: 373 + break; 374 + } 375 + } 376 + 377 + void t7xx_md_exception_handshake(struct t7xx_modem *md) 378 + { 379 + struct device *dev = &md->t7xx_dev->pdev->dev; 380 + int ret; 381 + 382 + t7xx_md_exception(md, HIF_EX_INIT); 383 + ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE); 384 + if (ret) 385 + dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_INIT_DONE); 386 + 387 + t7xx_md_exception(md, HIF_EX_INIT_DONE); 388 + ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE); 389 + if (ret) 390 + dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_CLEARQ_DONE); 391 + 392 + t7xx_md_exception(md, HIF_EX_CLEARQ_DONE); 393 + ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET); 394 + if (ret) 395 + dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_ALLQ_RESET); 396 + 397 + t7xx_md_exception(md, HIF_EX_ALLQ_RESET); 398 + } 399 + 400 + static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev) 401 + { 402 + struct device *dev = &t7xx_dev->pdev->dev; 403 + struct t7xx_modem *md; 404 + 405 + md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL); 406 + if (!md) 407 + return NULL; 408 + 409 + md->t7xx_dev = t7xx_dev; 410 + t7xx_dev->md = md; 411 + spin_lock_init(&md->exp_lock); 412 + md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 413 + 0, "md_hk_wq"); 414 + if (!md->handshake_wq) 415 + return NULL; 416 + 417 + INIT_WORK(&md->handshake_work, t7xx_md_hk_wq); 418 + return md; 419 + } 420 + 421 + int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev) 422 + { 423 + struct t7xx_modem *md = t7xx_dev->md; 424 + 425 + md->md_init_finish = false; 426 + md->exp_id = 0; 427 + t7xx_fsm_reset(md); 428 + t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]); 429 + md->md_init_finish = true; 430 + return 0; 431 + } 432 + 433 + /** 434 + * t7xx_md_init() - Initialize modem. 435 + * @t7xx_dev: MTK device. 436 + * 437 + * Allocate and initialize MD control block, and initialize data path. 438 + * Register MHCCIF ISR and RGU ISR, and start the state machine. 439 + * 440 + * Return: 441 + ** 0 - Success. 442 + ** -ENOMEM - Allocation failure. 443 + */ 444 + int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev) 445 + { 446 + struct t7xx_modem *md; 447 + int ret; 448 + 449 + md = t7xx_md_alloc(t7xx_dev); 450 + if (!md) 451 + return -ENOMEM; 452 + 453 + ret = t7xx_cldma_alloc(CLDMA_ID_MD, t7xx_dev); 454 + if (ret) 455 + goto err_destroy_hswq; 456 + 457 + ret = t7xx_fsm_init(md); 458 + if (ret) 459 + goto err_destroy_hswq; 460 + 461 + ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]); 462 + if (ret) 463 + goto err_uninit_fsm; 464 + 465 + ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0); 466 + if (ret) /* fsm_uninit flushes cmd queue */ 467 + goto err_uninit_md_cldma; 468 + 469 + t7xx_md_sys_sw_init(t7xx_dev); 470 + md->md_init_finish = true; 471 + return 0; 472 + 473 + err_uninit_md_cldma: 474 + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); 475 + 476 + err_uninit_fsm: 477 + t7xx_fsm_uninit(md); 478 + 479 + err_destroy_hswq: 480 + destroy_workqueue(md->handshake_wq); 481 + dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n"); 482 + return ret; 483 + } 484 + 485 + void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev) 486 + { 487 + struct t7xx_modem *md = t7xx_dev->md; 488 + 489 + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); 490 + 491 + if (!md->md_init_finish) 492 + return; 493 + 494 + t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); 495 + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); 496 + t7xx_fsm_uninit(md); 497 + destroy_workqueue(md->handshake_wq); 498 + }
+85
drivers/net/wwan/t7xx/t7xx_modem_ops.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only 2 + * 3 + * Copyright (c) 2021, MediaTek Inc. 4 + * Copyright (c) 2021-2022, Intel Corporation. 5 + * 6 + * Authors: 7 + * Haijun Liu <haijun.liu@mediatek.com> 8 + * Eliot Lee <eliot.lee@intel.com> 9 + * Moises Veleta <moises.veleta@intel.com> 10 + * Ricardo Martinez <ricardo.martinez@linux.intel.com> 11 + * 12 + * Contributors: 13 + * Amir Hanania <amir.hanania@intel.com> 14 + * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com> 15 + * Sreehari Kancharla <sreehari.kancharla@intel.com> 16 + */ 17 + 18 + #ifndef __T7XX_MODEM_OPS_H__ 19 + #define __T7XX_MODEM_OPS_H__ 20 + 21 + #include <linux/spinlock.h> 22 + #include <linux/types.h> 23 + #include <linux/workqueue.h> 24 + 25 + #include "t7xx_hif_cldma.h" 26 + #include "t7xx_pci.h" 27 + 28 + #define FEATURE_COUNT 64 29 + 30 + /** 31 + * enum hif_ex_stage - HIF exception handshake stages with the HW. 32 + * @HIF_EX_INIT: Disable and clear TXQ. 33 + * @HIF_EX_INIT_DONE: Polling for initialization to be done. 34 + * @HIF_EX_CLEARQ_DONE: Disable RX, flush TX/RX workqueues and clear RX. 35 + * @HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart. 36 + */ 37 + enum hif_ex_stage { 38 + HIF_EX_INIT, 39 + HIF_EX_INIT_DONE, 40 + HIF_EX_CLEARQ_DONE, 41 + HIF_EX_ALLQ_RESET, 42 + }; 43 + 44 + struct mtk_runtime_feature { 45 + u8 feature_id; 46 + u8 support_info; 47 + u8 reserved[2]; 48 + __le32 data_len; 49 + __le32 data[]; 50 + }; 51 + 52 + enum md_event_id { 53 + FSM_PRE_START, 54 + FSM_START, 55 + FSM_READY, 56 + }; 57 + 58 + struct t7xx_sys_info { 59 + bool ready; 60 + }; 61 + 62 + struct t7xx_modem { 63 + struct cldma_ctrl *md_ctrl[CLDMA_NUM]; 64 + struct t7xx_pci_dev *t7xx_dev; 65 + struct t7xx_sys_info core_md; 66 + bool md_init_finish; 67 + bool rgu_irq_asserted; 68 + struct workqueue_struct *handshake_wq; 69 + struct work_struct handshake_work; 70 + struct t7xx_fsm_ctl *fsm_ctl; 71 + struct port_proxy *port_prox; 72 + unsigned int exp_id; 73 + spinlock_t exp_lock; /* Protects exception events */ 74 + }; 75 + 76 + void t7xx_md_exception_handshake(struct t7xx_modem *md); 77 + void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id); 78 + int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev); 79 + int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev); 80 + void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev); 81 + void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev); 82 + int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev); 83 + int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev); 84 + 85 + #endif /* __T7XX_MODEM_OPS_H__ */
+225
drivers/net/wwan/t7xx/t7xx_pci.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2021, MediaTek Inc. 4 + * Copyright (c) 2021-2022, Intel Corporation. 5 + * 6 + * Authors: 7 + * Haijun Liu <haijun.liu@mediatek.com> 8 + * Ricardo Martinez <ricardo.martinez@linux.intel.com> 9 + * Sreehari Kancharla <sreehari.kancharla@intel.com> 10 + * 11 + * Contributors: 12 + * Amir Hanania <amir.hanania@intel.com> 13 + * Andy Shevchenko <andriy.shevchenko@linux.intel.com> 14 + * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com> 15 + * Eliot Lee <eliot.lee@intel.com> 16 + * Moises Veleta <moises.veleta@intel.com> 17 + */ 18 + 19 + #include <linux/atomic.h> 20 + #include <linux/bits.h> 21 + #include <linux/device.h> 22 + #include <linux/dma-mapping.h> 23 + #include <linux/gfp.h> 24 + #include <linux/interrupt.h> 25 + #include <linux/io.h> 26 + #include <linux/module.h> 27 + #include <linux/pci.h> 28 + 29 + #include "t7xx_mhccif.h" 30 + #include "t7xx_modem_ops.h" 31 + #include "t7xx_pci.h" 32 + #include "t7xx_pcie_mac.h" 33 + #include "t7xx_reg.h" 34 + 35 + #define T7XX_PCI_IREG_BASE 0 36 + #define T7XX_PCI_EREG_BASE 2 37 + 38 + static int t7xx_request_irq(struct pci_dev *pdev) 39 + { 40 + struct t7xx_pci_dev *t7xx_dev; 41 + int ret, i; 42 + 43 + t7xx_dev = pci_get_drvdata(pdev); 44 + 45 + for (i = 0; i < EXT_INT_NUM; i++) { 46 + const char *irq_descr; 47 + int irq_vec; 48 + 49 + if (!t7xx_dev->intr_handler[i]) 50 + continue; 51 + 52 + irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d", 53 + dev_driver_string(&pdev->dev), i); 54 + if (!irq_descr) { 55 + ret = -ENOMEM; 56 + break; 57 + } 58 + 59 + irq_vec = pci_irq_vector(pdev, i); 60 + ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i], 61 + t7xx_dev->intr_thread[i], 0, irq_descr, 62 + t7xx_dev->callback_param[i]); 63 + if (ret) { 64 + dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret); 65 + break; 66 + } 67 + } 68 + 69 + if (ret) { 70 + while (i--) { 71 + if (!t7xx_dev->intr_handler[i]) 72 + continue; 73 + 74 + free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); 75 + } 76 + } 77 + 78 + return ret; 79 + } 80 + 81 + static int t7xx_setup_msix(struct t7xx_pci_dev *t7xx_dev) 82 + { 83 + struct pci_dev *pdev = t7xx_dev->pdev; 84 + int ret; 85 + 86 + /* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */ 87 + ret = pci_alloc_irq_vectors(pdev, EXT_INT_NUM, EXT_INT_NUM, PCI_IRQ_MSIX); 88 + if (ret < 0) { 89 + dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret); 90 + return ret; 91 + } 92 + 93 + ret = t7xx_request_irq(pdev); 94 + if (ret) { 95 + pci_free_irq_vectors(pdev); 96 + return ret; 97 + } 98 + 99 + t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM); 100 + return 0; 101 + } 102 + 103 + static int t7xx_interrupt_init(struct t7xx_pci_dev *t7xx_dev) 104 + { 105 + int ret, i; 106 + 107 + if (!t7xx_dev->pdev->msix_cap) 108 + return -EINVAL; 109 + 110 + ret = t7xx_setup_msix(t7xx_dev); 111 + if (ret) 112 + return ret; 113 + 114 + /* IPs enable interrupts when ready */ 115 + for (i = 0; i < EXT_INT_NUM; i++) 116 + t7xx_pcie_mac_set_int(t7xx_dev, i); 117 + 118 + return 0; 119 + } 120 + 121 + static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev) 122 + { 123 + t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base + 124 + INFRACFG_AO_DEV_CHIP - 125 + t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; 126 + } 127 + 128 + static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 129 + { 130 + struct t7xx_pci_dev *t7xx_dev; 131 + int ret; 132 + 133 + t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL); 134 + if (!t7xx_dev) 135 + return -ENOMEM; 136 + 137 + pci_set_drvdata(pdev, t7xx_dev); 138 + t7xx_dev->pdev = pdev; 139 + 140 + ret = pcim_enable_device(pdev); 141 + if (ret) 142 + return ret; 143 + 144 + pci_set_master(pdev); 145 + 146 + ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE), 147 + pci_name(pdev)); 148 + if (ret) { 149 + dev_err(&pdev->dev, "Could not request BARs: %d\n", ret); 150 + return -ENOMEM; 151 + } 152 + 153 + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 154 + if (ret) { 155 + dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret); 156 + return ret; 157 + } 158 + 159 + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 160 + if (ret) { 161 + dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret); 162 + return ret; 163 + } 164 + 165 + IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE]; 166 + t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE]; 167 + 168 + t7xx_pcie_mac_atr_init(t7xx_dev); 169 + t7xx_pci_infracfg_ao_calc(t7xx_dev); 170 + t7xx_mhccif_init(t7xx_dev); 171 + 172 + ret = t7xx_md_init(t7xx_dev); 173 + if (ret) 174 + return ret; 175 + 176 + t7xx_pcie_mac_interrupts_dis(t7xx_dev); 177 + 178 + ret = t7xx_interrupt_init(t7xx_dev); 179 + if (ret) { 180 + t7xx_md_exit(t7xx_dev); 181 + return ret; 182 + } 183 + 184 + t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); 185 + t7xx_pcie_mac_interrupts_en(t7xx_dev); 186 + 187 + return 0; 188 + } 189 + 190 + static void t7xx_pci_remove(struct pci_dev *pdev) 191 + { 192 + struct t7xx_pci_dev *t7xx_dev; 193 + int i; 194 + 195 + t7xx_dev = pci_get_drvdata(pdev); 196 + t7xx_md_exit(t7xx_dev); 197 + 198 + for (i = 0; i < EXT_INT_NUM; i++) { 199 + if (!t7xx_dev->intr_handler[i]) 200 + continue; 201 + 202 + free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); 203 + } 204 + 205 + pci_free_irq_vectors(t7xx_dev->pdev); 206 + } 207 + 208 + static const struct pci_device_id t7xx_pci_table[] = { 209 + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) }, 210 + { } 211 + }; 212 + MODULE_DEVICE_TABLE(pci, t7xx_pci_table); 213 + 214 + static struct pci_driver t7xx_pci_driver = { 215 + .name = "mtk_t7xx", 216 + .id_table = t7xx_pci_table, 217 + .probe = t7xx_pci_probe, 218 + .remove = t7xx_pci_remove, 219 + }; 220 + 221 + module_pci_driver(t7xx_pci_driver); 222 + 223 + MODULE_AUTHOR("MediaTek Inc"); 224 + MODULE_DESCRIPTION("MediaTek PCIe 5G WWAN modem T7xx driver"); 225 + MODULE_LICENSE("GPL");
+64
drivers/net/wwan/t7xx/t7xx_pci.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only 2 + * 3 + * Copyright (c) 2021, MediaTek Inc. 4 + * Copyright (c) 2021-2022, Intel Corporation. 5 + * 6 + * Authors: 7 + * Haijun Liu <haijun.liu@mediatek.com> 8 + * Ricardo Martinez <ricardo.martinez@linux.intel.com> 9 + * Sreehari Kancharla <sreehari.kancharla@intel.com> 10 + * 11 + * Contributors: 12 + * Amir Hanania <amir.hanania@intel.com> 13 + * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com> 14 + * Moises Veleta <moises.veleta@intel.com> 15 + */ 16 + 17 + #ifndef __T7XX_PCI_H__ 18 + #define __T7XX_PCI_H__ 19 + 20 + #include <linux/irqreturn.h> 21 + #include <linux/pci.h> 22 + #include <linux/types.h> 23 + 24 + #include "t7xx_reg.h" 25 + 26 + /* struct t7xx_addr_base - holds base addresses 27 + * @pcie_mac_ireg_base: PCIe MAC register base 28 + * @pcie_ext_reg_base: used to calculate base addresses for CLDMA, DPMA and MHCCIF registers 29 + * @pcie_dev_reg_trsl_addr: used to calculate the register base address 30 + * @infracfg_ao_base: base address used in CLDMA reset operations 31 + * @mhccif_rc_base: host view of MHCCIF rc base addr 32 + */ 33 + struct t7xx_addr_base { 34 + void __iomem *pcie_mac_ireg_base; 35 + void __iomem *pcie_ext_reg_base; 36 + u32 pcie_dev_reg_trsl_addr; 37 + void __iomem *infracfg_ao_base; 38 + void __iomem *mhccif_rc_base; 39 + }; 40 + 41 + typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param); 42 + 43 + /* struct t7xx_pci_dev - MTK device context structure 44 + * @intr_handler: array of handler function for request_threaded_irq 45 + * @intr_thread: array of thread_fn for request_threaded_irq 46 + * @callback_param: array of cookie passed back to interrupt functions 47 + * @pdev: PCI device 48 + * @base_addr: memory base addresses of HW components 49 + * @md: modem interface 50 + * @ccmni_ctlb: context structure used to control the network data path 51 + * @rgu_pci_irq_en: RGU callback ISR registered and active 52 + */ 53 + struct t7xx_pci_dev { 54 + t7xx_intr_callback intr_handler[EXT_INT_NUM]; 55 + t7xx_intr_callback intr_thread[EXT_INT_NUM]; 56 + void *callback_param[EXT_INT_NUM]; 57 + struct pci_dev *pdev; 58 + struct t7xx_addr_base base_addr; 59 + struct t7xx_modem *md; 60 + struct t7xx_ccmni_ctrl *ccmni_ctlb; 61 + bool rgu_pci_irq_en; 62 + }; 63 + 64 + #endif /* __T7XX_PCI_H__ */
+262
drivers/net/wwan/t7xx/t7xx_pcie_mac.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2021, MediaTek Inc. 4 + * Copyright (c) 2021-2022, Intel Corporation. 5 + * 6 + * Authors: 7 + * Haijun Liu <haijun.liu@mediatek.com> 8 + * Moises Veleta <moises.veleta@intel.com> 9 + * Sreehari Kancharla <sreehari.kancharla@intel.com> 10 + * 11 + * Contributors: 12 + * Amir Hanania <amir.hanania@intel.com> 13 + * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com> 14 + * Ricardo Martinez <ricardo.martinez@linux.intel.com> 15 + */ 16 + 17 + #include <linux/bits.h> 18 + #include <linux/bitops.h> 19 + #include <linux/device.h> 20 + #include <linux/io-64-nonatomic-lo-hi.h> 21 + #include <linux/pci.h> 22 + #include <linux/string.h> 23 + #include <linux/types.h> 24 + 25 + #include "t7xx_pci.h" 26 + #include "t7xx_pcie_mac.h" 27 + #include "t7xx_reg.h" 28 + 29 + #define T7XX_PCIE_REG_BAR 2 30 + #define T7XX_PCIE_REG_PORT ATR_SRC_PCI_WIN0 31 + #define T7XX_PCIE_REG_TABLE_NUM 0 32 + #define T7XX_PCIE_REG_TRSL_PORT ATR_DST_AXIM_0 33 + 34 + #define T7XX_PCIE_DEV_DMA_PORT_START ATR_SRC_AXIS_0 35 + #define T7XX_PCIE_DEV_DMA_PORT_END ATR_SRC_AXIS_2 36 + #define T7XX_PCIE_DEV_DMA_TABLE_NUM 0 37 + #define T7XX_PCIE_DEV_DMA_TRSL_ADDR 0 38 + #define T7XX_PCIE_DEV_DMA_SRC_ADDR 0 39 + #define T7XX_PCIE_DEV_DMA_TRANSPARENT 1 40 + #define T7XX_PCIE_DEV_DMA_SIZE 0 41 + 42 + enum t7xx_atr_src_port { 43 + ATR_SRC_PCI_WIN0, 44 + ATR_SRC_PCI_WIN1, 45 + ATR_SRC_AXIS_0, 46 + ATR_SRC_AXIS_1, 47 + ATR_SRC_AXIS_2, 48 + ATR_SRC_AXIS_3, 49 + }; 50 + 51 + enum t7xx_atr_dst_port { 52 + ATR_DST_PCI_TRX, 53 + ATR_DST_PCI_CONFIG, 54 + ATR_DST_AXIM_0 = 4, 55 + ATR_DST_AXIM_1, 56 + ATR_DST_AXIM_2, 57 + ATR_DST_AXIM_3, 58 + }; 59 + 60 + struct t7xx_atr_config { 61 + u64 src_addr; 62 + u64 trsl_addr; 63 + u64 size; 64 + u32 port; 65 + u32 table; 66 + enum t7xx_atr_dst_port trsl_id; 67 + u32 transparent; 68 + }; 69 + 70 + static void t7xx_pcie_mac_atr_tables_dis(void __iomem *pbase, enum t7xx_atr_src_port port) 71 + { 72 + void __iomem *reg; 73 + int i, offset; 74 + 75 + for (i = 0; i < ATR_TABLE_NUM_PER_ATR; i++) { 76 + offset = ATR_PORT_OFFSET * port + ATR_TABLE_OFFSET * i; 77 + reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset; 78 + iowrite64(0, reg); 79 + } 80 + } 81 + 82 + static int t7xx_pcie_mac_atr_cfg(struct t7xx_pci_dev *t7xx_dev, struct t7xx_atr_config *cfg) 83 + { 84 + struct device *dev = &t7xx_dev->pdev->dev; 85 + void __iomem *pbase = IREG_BASE(t7xx_dev); 86 + int atr_size, pos, offset; 87 + void __iomem *reg; 88 + u64 value; 89 + 90 + if (cfg->transparent) { 91 + /* No address conversion is performed */ 92 + atr_size = ATR_TRANSPARENT_SIZE; 93 + } else { 94 + if (cfg->src_addr & (cfg->size - 1)) { 95 + dev_err(dev, "Source address is not aligned to size\n"); 96 + return -EINVAL; 97 + } 98 + 99 + if (cfg->trsl_addr & (cfg->size - 1)) { 100 + dev_err(dev, "Translation address %llx is not aligned to size %llx\n", 101 + cfg->trsl_addr, cfg->size - 1); 102 + return -EINVAL; 103 + } 104 + 105 + pos = __ffs64(cfg->size); 106 + 107 + /* HW calculates the address translation space as 2^(atr_size + 1) */ 108 + atr_size = pos - 1; 109 + } 110 + 111 + offset = ATR_PORT_OFFSET * cfg->port + ATR_TABLE_OFFSET * cfg->table; 112 + 113 + reg = pbase + ATR_PCIE_WIN0_T0_TRSL_ADDR + offset; 114 + value = cfg->trsl_addr & ATR_PCIE_WIN0_ADDR_ALGMT; 115 + iowrite64(value, reg); 116 + 117 + reg = pbase + ATR_PCIE_WIN0_T0_TRSL_PARAM + offset; 118 + iowrite32(cfg->trsl_id, reg); 119 + 120 + reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset; 121 + value = (cfg->src_addr & ATR_PCIE_WIN0_ADDR_ALGMT) | (atr_size << 1) | BIT(0); 122 + iowrite64(value, reg); 123 + 124 + /* Ensure ATR is set */ 125 + ioread64(reg); 126 + return 0; 127 + } 128 + 129 + /** 130 + * t7xx_pcie_mac_atr_init() - Initialize address translation. 131 + * @t7xx_dev: MTK device. 132 + * 133 + * Setup ATR for ports & device. 134 + */ 135 + void t7xx_pcie_mac_atr_init(struct t7xx_pci_dev *t7xx_dev) 136 + { 137 + struct t7xx_atr_config cfg; 138 + u32 i; 139 + 140 + /* Disable for all ports */ 141 + for (i = ATR_SRC_PCI_WIN0; i <= ATR_SRC_AXIS_3; i++) 142 + t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), i); 143 + 144 + memset(&cfg, 0, sizeof(cfg)); 145 + /* Config ATR for RC to access device's register */ 146 + cfg.src_addr = pci_resource_start(t7xx_dev->pdev, T7XX_PCIE_REG_BAR); 147 + cfg.size = T7XX_PCIE_REG_SIZE_CHIP; 148 + cfg.trsl_addr = T7XX_PCIE_REG_TRSL_ADDR_CHIP; 149 + cfg.port = T7XX_PCIE_REG_PORT; 150 + cfg.table = T7XX_PCIE_REG_TABLE_NUM; 151 + cfg.trsl_id = T7XX_PCIE_REG_TRSL_PORT; 152 + t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), cfg.port); 153 + t7xx_pcie_mac_atr_cfg(t7xx_dev, &cfg); 154 + 155 + t7xx_dev->base_addr.pcie_dev_reg_trsl_addr = T7XX_PCIE_REG_TRSL_ADDR_CHIP; 156 + 157 + /* Config ATR for EP to access RC's memory */ 158 + for (i = T7XX_PCIE_DEV_DMA_PORT_START; i <= T7XX_PCIE_DEV_DMA_PORT_END; i++) { 159 + cfg.src_addr = T7XX_PCIE_DEV_DMA_SRC_ADDR; 160 + cfg.size = T7XX_PCIE_DEV_DMA_SIZE; 161 + cfg.trsl_addr = T7XX_PCIE_DEV_DMA_TRSL_ADDR; 162 + cfg.port = i; 163 + cfg.table = T7XX_PCIE_DEV_DMA_TABLE_NUM; 164 + cfg.trsl_id = ATR_DST_PCI_TRX; 165 + cfg.transparent = T7XX_PCIE_DEV_DMA_TRANSPARENT; 166 + t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), cfg.port); 167 + t7xx_pcie_mac_atr_cfg(t7xx_dev, &cfg); 168 + } 169 + } 170 + 171 + /** 172 + * t7xx_pcie_mac_enable_disable_int() - Enable/disable interrupts. 173 + * @t7xx_dev: MTK device. 174 + * @enable: Enable/disable. 175 + * 176 + * Enable or disable device interrupts. 177 + */ 178 + static void t7xx_pcie_mac_enable_disable_int(struct t7xx_pci_dev *t7xx_dev, bool enable) 179 + { 180 + u32 value; 181 + 182 + value = ioread32(IREG_BASE(t7xx_dev) + ISTAT_HST_CTRL); 183 + 184 + if (enable) 185 + value &= ~ISTAT_HST_CTRL_DIS; 186 + else 187 + value |= ISTAT_HST_CTRL_DIS; 188 + 189 + iowrite32(value, IREG_BASE(t7xx_dev) + ISTAT_HST_CTRL); 190 + } 191 + 192 + void t7xx_pcie_mac_interrupts_en(struct t7xx_pci_dev *t7xx_dev) 193 + { 194 + t7xx_pcie_mac_enable_disable_int(t7xx_dev, true); 195 + } 196 + 197 + void t7xx_pcie_mac_interrupts_dis(struct t7xx_pci_dev *t7xx_dev) 198 + { 199 + t7xx_pcie_mac_enable_disable_int(t7xx_dev, false); 200 + } 201 + 202 + /** 203 + * t7xx_pcie_mac_clear_set_int() - Clear/set interrupt by type. 204 + * @t7xx_dev: MTK device. 205 + * @int_type: Interrupt type. 206 + * @clear: Clear/set. 207 + * 208 + * Clear or set device interrupt by type. 209 + */ 210 + static void t7xx_pcie_mac_clear_set_int(struct t7xx_pci_dev *t7xx_dev, 211 + enum t7xx_int int_type, bool clear) 212 + { 213 + void __iomem *reg; 214 + u32 val; 215 + 216 + if (clear) 217 + reg = IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0; 218 + else 219 + reg = IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_SET_GRP0_0; 220 + 221 + val = BIT(EXT_INT_START + int_type); 222 + iowrite32(val, reg); 223 + } 224 + 225 + void t7xx_pcie_mac_clear_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type) 226 + { 227 + t7xx_pcie_mac_clear_set_int(t7xx_dev, int_type, true); 228 + } 229 + 230 + void t7xx_pcie_mac_set_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type) 231 + { 232 + t7xx_pcie_mac_clear_set_int(t7xx_dev, int_type, false); 233 + } 234 + 235 + /** 236 + * t7xx_pcie_mac_clear_int_status() - Clear interrupt status by type. 237 + * @t7xx_dev: MTK device. 238 + * @int_type: Interrupt type. 239 + * 240 + * Enable or disable device interrupts' status by type. 241 + */ 242 + void t7xx_pcie_mac_clear_int_status(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type) 243 + { 244 + void __iomem *reg = IREG_BASE(t7xx_dev) + MSIX_ISTAT_HST_GRP0_0; 245 + u32 val = BIT(EXT_INT_START + int_type); 246 + 247 + iowrite32(val, reg); 248 + } 249 + 250 + /** 251 + * t7xx_pcie_set_mac_msix_cfg() - Write MSIX control configuration. 252 + * @t7xx_dev: MTK device. 253 + * @irq_count: Number of MSIX IRQ vectors. 254 + * 255 + * Write IRQ count to device. 256 + */ 257 + void t7xx_pcie_set_mac_msix_cfg(struct t7xx_pci_dev *t7xx_dev, unsigned int irq_count) 258 + { 259 + u32 val = ffs(irq_count) * 2 - 1; 260 + 261 + iowrite32(val, IREG_BASE(t7xx_dev) + T7XX_PCIE_CFG_MSIX); 262 + }
+31
drivers/net/wwan/t7xx/t7xx_pcie_mac.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only 2 + * 3 + * Copyright (c) 2021, MediaTek Inc. 4 + * Copyright (c) 2021-2022, Intel Corporation. 5 + * 6 + * Authors: 7 + * Haijun Liu <haijun.liu@mediatek.com> 8 + * Sreehari Kancharla <sreehari.kancharla@intel.com> 9 + * 10 + * Contributors: 11 + * Moises Veleta <moises.veleta@intel.com> 12 + * Ricardo Martinez <ricardo.martinez@linux.intel.com> 13 + */ 14 + 15 + #ifndef __T7XX_PCIE_MAC_H__ 16 + #define __T7XX_PCIE_MAC_H__ 17 + 18 + #include "t7xx_pci.h" 19 + #include "t7xx_reg.h" 20 + 21 + #define IREG_BASE(t7xx_dev) ((t7xx_dev)->base_addr.pcie_mac_ireg_base) 22 + 23 + void t7xx_pcie_mac_interrupts_en(struct t7xx_pci_dev *t7xx_dev); 24 + void t7xx_pcie_mac_interrupts_dis(struct t7xx_pci_dev *t7xx_dev); 25 + void t7xx_pcie_mac_atr_init(struct t7xx_pci_dev *t7xx_dev); 26 + void t7xx_pcie_mac_clear_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type); 27 + void t7xx_pcie_mac_set_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type); 28 + void t7xx_pcie_mac_clear_int_status(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type); 29 + void t7xx_pcie_set_mac_msix_cfg(struct t7xx_pci_dev *t7xx_dev, unsigned int irq_count); 30 + 31 + #endif /* __T7XX_PCIE_MAC_H__ */
+104
drivers/net/wwan/t7xx/t7xx_reg.h
··· 19 19 #ifndef __T7XX_REG_H__ 20 20 #define __T7XX_REG_H__ 21 21 22 + #include <linux/bits.h> 23 + 24 + /* Device base address offset */ 25 + #define MHCCIF_RC_DEV_BASE 0x10024000 26 + 27 + #define REG_RC2EP_SW_BSY 0x04 28 + #define REG_RC2EP_SW_INT_START 0x08 29 + 30 + #define REG_RC2EP_SW_TCHNUM 0x0c 31 + #define H2D_CH_EXCEPTION_ACK 1 32 + #define H2D_CH_EXCEPTION_CLEARQ_ACK 2 33 + #define H2D_CH_DS_LOCK 3 34 + /* Channels 4-8 are reserved */ 35 + #define H2D_CH_SUSPEND_REQ 9 36 + #define H2D_CH_RESUME_REQ 10 37 + #define H2D_CH_SUSPEND_REQ_AP 11 38 + #define H2D_CH_RESUME_REQ_AP 12 39 + #define H2D_CH_DEVICE_RESET 13 40 + #define H2D_CH_DRM_DISABLE_AP 14 41 + 42 + #define REG_EP2RC_SW_INT_STS 0x10 43 + #define REG_EP2RC_SW_INT_ACK 0x14 44 + #define REG_EP2RC_SW_INT_EAP_MASK 0x20 45 + #define REG_EP2RC_SW_INT_EAP_MASK_SET 0x30 46 + #define REG_EP2RC_SW_INT_EAP_MASK_CLR 0x40 47 + 48 + #define D2H_INT_DS_LOCK_ACK BIT(0) 49 + #define D2H_INT_EXCEPTION_INIT BIT(1) 50 + #define D2H_INT_EXCEPTION_INIT_DONE BIT(2) 51 + #define D2H_INT_EXCEPTION_CLEARQ_DONE BIT(3) 52 + #define D2H_INT_EXCEPTION_ALLQ_RESET BIT(4) 53 + #define D2H_INT_PORT_ENUM BIT(5) 54 + /* Bits 6-10 are reserved */ 55 + #define D2H_INT_SUSPEND_ACK BIT(11) 56 + #define D2H_INT_RESUME_ACK BIT(12) 57 + #define D2H_INT_SUSPEND_ACK_AP BIT(13) 58 + #define D2H_INT_RESUME_ACK_AP BIT(14) 59 + #define D2H_INT_ASYNC_SAP_HK BIT(15) 60 + #define D2H_INT_ASYNC_MD_HK BIT(16) 61 + 62 + /* Register base */ 63 + #define INFRACFG_AO_DEV_CHIP 0x10001000 64 + 65 + /* ATR setting */ 66 + #define T7XX_PCIE_REG_TRSL_ADDR_CHIP 0x10000000 67 + #define T7XX_PCIE_REG_SIZE_CHIP 0x00400000 68 + 69 + /* Reset Generic Unit (RGU) */ 70 + #define TOPRGU_CH_PCIE_IRQ_STA 0x1000790c 71 + 72 + #define ATR_PORT_OFFSET 0x100 73 + #define ATR_TABLE_OFFSET 0x20 74 + #define ATR_TABLE_NUM_PER_ATR 8 75 + #define ATR_TRANSPARENT_SIZE 0x3f 76 + 77 + /* PCIE_MAC_IREG Register Definition */ 78 + 79 + #define ISTAT_HST_CTRL 0x01ac 80 + #define ISTAT_HST_CTRL_DIS BIT(0) 81 + 82 + #define T7XX_PCIE_MISC_CTRL 0x0348 83 + #define T7XX_PCIE_MISC_MAC_SLEEP_DIS BIT(7) 84 + 85 + #define T7XX_PCIE_CFG_MSIX 0x03ec 86 + #define ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR 0x0600 87 + #define ATR_PCIE_WIN0_T0_TRSL_ADDR 0x0608 88 + #define ATR_PCIE_WIN0_T0_TRSL_PARAM 0x0610 89 + #define ATR_PCIE_WIN0_ADDR_ALGMT GENMASK_ULL(63, 12) 90 + 91 + #define ATR_SRC_ADDR_INVALID 0x007f 92 + 93 + #define T7XX_PCIE_PM_RESUME_STATE 0x0d0c 94 + 95 + enum t7xx_pm_resume_state { 96 + PM_RESUME_REG_STATE_L3, 97 + PM_RESUME_REG_STATE_L1, 98 + PM_RESUME_REG_STATE_INIT, 99 + PM_RESUME_REG_STATE_EXP, 100 + PM_RESUME_REG_STATE_L2, 101 + PM_RESUME_REG_STATE_L2_EXP, 102 + }; 103 + 104 + #define T7XX_PCIE_MISC_DEV_STATUS 0x0d1c 105 + #define MISC_STAGE_MASK GENMASK(2, 0) 106 + #define MISC_RESET_TYPE_PLDR BIT(26) 107 + #define MISC_RESET_TYPE_FLDR BIT(27) 108 + #define LINUX_STAGE 4 109 + 110 + #define T7XX_PCIE_RESOURCE_STATUS 0x0d28 111 + #define T7XX_PCIE_RESOURCE_STS_MSK GENMASK(4, 0) 112 + 113 + #define DISABLE_ASPM_LOWPWR 0x0e50 114 + #define ENABLE_ASPM_LOWPWR 0x0e54 115 + #define T7XX_L1_BIT(i) BIT((i) * 4 + 1) 116 + #define T7XX_L1_1_BIT(i) BIT((i) * 4 + 2) 117 + #define T7XX_L1_2_BIT(i) BIT((i) * 4 + 3) 118 + 119 + #define MSIX_ISTAT_HST_GRP0_0 0x0f00 120 + #define IMASK_HOST_MSIX_SET_GRP0_0 0x3000 121 + #define IMASK_HOST_MSIX_CLR_GRP0_0 0x3080 122 + #define EXT_INT_START 24 123 + #define EXT_INT_NUM 8 124 + #define MSIX_MSK_SET_ALL GENMASK(31, 24) 125 + 22 126 enum t7xx_int { 23 127 DPMAIF_INT, 24 128 CLDMA0_INT,
+540
drivers/net/wwan/t7xx/t7xx_state_monitor.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2021, MediaTek Inc. 4 + * Copyright (c) 2021-2022, Intel Corporation. 5 + * 6 + * Authors: 7 + * Haijun Liu <haijun.liu@mediatek.com> 8 + * Eliot Lee <eliot.lee@intel.com> 9 + * Moises Veleta <moises.veleta@intel.com> 10 + * Ricardo Martinez <ricardo.martinez@linux.intel.com> 11 + * 12 + * Contributors: 13 + * Amir Hanania <amir.hanania@intel.com> 14 + * Sreehari Kancharla <sreehari.kancharla@intel.com> 15 + */ 16 + 17 + #include <linux/bits.h> 18 + #include <linux/bitfield.h> 19 + #include <linux/completion.h> 20 + #include <linux/device.h> 21 + #include <linux/delay.h> 22 + #include <linux/err.h> 23 + #include <linux/gfp.h> 24 + #include <linux/iopoll.h> 25 + #include <linux/jiffies.h> 26 + #include <linux/kernel.h> 27 + #include <linux/kthread.h> 28 + #include <linux/list.h> 29 + #include <linux/slab.h> 30 + #include <linux/spinlock.h> 31 + #include <linux/string.h> 32 + #include <linux/types.h> 33 + #include <linux/wait.h> 34 + 35 + #include "t7xx_hif_cldma.h" 36 + #include "t7xx_mhccif.h" 37 + #include "t7xx_modem_ops.h" 38 + #include "t7xx_pci.h" 39 + #include "t7xx_pcie_mac.h" 40 + #include "t7xx_reg.h" 41 + #include "t7xx_state_monitor.h" 42 + 43 + #define FSM_DRM_DISABLE_DELAY_MS 200 44 + #define FSM_EVENT_POLL_INTERVAL_MS 20 45 + #define FSM_MD_EX_REC_OK_TIMEOUT_MS 10000 46 + #define FSM_MD_EX_PASS_TIMEOUT_MS 45000 47 + #define FSM_CMD_TIMEOUT_MS 2000 48 + 49 + void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) 50 + { 51 + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; 52 + unsigned long flags; 53 + 54 + spin_lock_irqsave(&ctl->notifier_lock, flags); 55 + list_add_tail(&notifier->entry, &ctl->notifier_list); 56 + spin_unlock_irqrestore(&ctl->notifier_lock, flags); 57 + } 58 + 59 + void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) 60 + { 61 + struct t7xx_fsm_notifier *notifier_cur, *notifier_next; 62 + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; 63 + unsigned long flags; 64 + 65 + spin_lock_irqsave(&ctl->notifier_lock, flags); 66 + list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) { 67 + if (notifier_cur == notifier) 68 + list_del(&notifier->entry); 69 + } 70 + spin_unlock_irqrestore(&ctl->notifier_lock, flags); 71 + } 72 + 73 + static void fsm_state_notify(struct t7xx_modem *md, enum md_state state) 74 + { 75 + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; 76 + struct t7xx_fsm_notifier *notifier; 77 + unsigned long flags; 78 + 79 + spin_lock_irqsave(&ctl->notifier_lock, flags); 80 + list_for_each_entry(notifier, &ctl->notifier_list, entry) { 81 + spin_unlock_irqrestore(&ctl->notifier_lock, flags); 82 + if (notifier->notifier_fn) 83 + notifier->notifier_fn(state, notifier->data); 84 + 85 + spin_lock_irqsave(&ctl->notifier_lock, flags); 86 + } 87 + spin_unlock_irqrestore(&ctl->notifier_lock, flags); 88 + } 89 + 90 + void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state) 91 + { 92 + ctl->md_state = state; 93 + fsm_state_notify(ctl->md, state); 94 + } 95 + 96 + static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result) 97 + { 98 + if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { 99 + *cmd->ret = result; 100 + complete_all(cmd->done); 101 + } 102 + 103 + kfree(cmd); 104 + } 105 + 106 + static void fsm_del_kf_event(struct t7xx_fsm_event *event) 107 + { 108 + list_del(&event->entry); 109 + kfree(event); 110 + } 111 + 112 + static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl) 113 + { 114 + struct device *dev = &ctl->md->t7xx_dev->pdev->dev; 115 + struct t7xx_fsm_event *event, *evt_next; 116 + struct t7xx_fsm_command *cmd, *cmd_next; 117 + unsigned long flags; 118 + 119 + spin_lock_irqsave(&ctl->command_lock, flags); 120 + list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) { 121 + dev_warn(dev, "Unhandled command %d\n", cmd->cmd_id); 122 + list_del(&cmd->entry); 123 + fsm_finish_command(ctl, cmd, -EINVAL); 124 + } 125 + spin_unlock_irqrestore(&ctl->command_lock, flags); 126 + 127 + spin_lock_irqsave(&ctl->event_lock, flags); 128 + list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) { 129 + dev_warn(dev, "Unhandled event %d\n", event->event_id); 130 + fsm_del_kf_event(event); 131 + } 132 + spin_unlock_irqrestore(&ctl->event_lock, flags); 133 + } 134 + 135 + static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected, 136 + enum t7xx_fsm_event_state event_ignore, int retries) 137 + { 138 + struct t7xx_fsm_event *event; 139 + bool event_received = false; 140 + unsigned long flags; 141 + int cnt = 0; 142 + 143 + while (cnt++ < retries && !event_received) { 144 + bool sleep_required = true; 145 + 146 + if (kthread_should_stop()) 147 + return; 148 + 149 + spin_lock_irqsave(&ctl->event_lock, flags); 150 + event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry); 151 + if (event) { 152 + event_received = event->event_id == event_expected; 153 + if (event_received || event->event_id == event_ignore) { 154 + fsm_del_kf_event(event); 155 + sleep_required = false; 156 + } 157 + } 158 + spin_unlock_irqrestore(&ctl->event_lock, flags); 159 + 160 + if (sleep_required) 161 + msleep(FSM_EVENT_POLL_INTERVAL_MS); 162 + } 163 + } 164 + 165 + static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, 166 + enum t7xx_ex_reason reason) 167 + { 168 + struct device *dev = &ctl->md->t7xx_dev->pdev->dev; 169 + 170 + if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) { 171 + if (cmd) 172 + fsm_finish_command(ctl, cmd, -EINVAL); 173 + 174 + return; 175 + } 176 + 177 + ctl->curr_state = FSM_STATE_EXCEPTION; 178 + 179 + switch (reason) { 180 + case EXCEPTION_HS_TIMEOUT: 181 + dev_err(dev, "Boot Handshake failure\n"); 182 + break; 183 + 184 + case EXCEPTION_EVENT: 185 + dev_err(dev, "Exception event\n"); 186 + t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION); 187 + t7xx_md_exception_handshake(ctl->md); 188 + 189 + fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX, 190 + FSM_MD_EX_REC_OK_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS); 191 + fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_PASS, FSM_EVENT_INVALID, 192 + FSM_MD_EX_PASS_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS); 193 + break; 194 + 195 + default: 196 + dev_err(dev, "Exception %d\n", reason); 197 + break; 198 + } 199 + 200 + if (cmd) 201 + fsm_finish_command(ctl, cmd, 0); 202 + } 203 + 204 + static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl) 205 + { 206 + ctl->curr_state = FSM_STATE_STOPPED; 207 + 208 + t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED); 209 + return t7xx_md_reset(ctl->md->t7xx_dev); 210 + } 211 + 212 + static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) 213 + { 214 + if (ctl->curr_state == FSM_STATE_STOPPED) { 215 + fsm_finish_command(ctl, cmd, -EINVAL); 216 + return; 217 + } 218 + 219 + fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl)); 220 + } 221 + 222 + static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) 223 + { 224 + struct t7xx_pci_dev *t7xx_dev; 225 + struct cldma_ctrl *md_ctrl; 226 + int err; 227 + 228 + if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) { 229 + fsm_finish_command(ctl, cmd, -EINVAL); 230 + return; 231 + } 232 + 233 + md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD]; 234 + t7xx_dev = ctl->md->t7xx_dev; 235 + 236 + ctl->curr_state = FSM_STATE_STOPPING; 237 + t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP); 238 + t7xx_cldma_stop(md_ctrl); 239 + 240 + if (!ctl->md->rgu_irq_asserted) { 241 + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP); 242 + /* Wait for the DRM disable to take effect */ 243 + msleep(FSM_DRM_DISABLE_DELAY_MS); 244 + 245 + err = t7xx_acpi_fldr_func(t7xx_dev); 246 + if (err) 247 + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET); 248 + } 249 + 250 + fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl)); 251 + } 252 + 253 + static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl) 254 + { 255 + if (ctl->md_state != MD_STATE_WAITING_FOR_HS2) 256 + return; 257 + 258 + ctl->md_state = MD_STATE_READY; 259 + 260 + fsm_state_notify(ctl->md, MD_STATE_READY); 261 + } 262 + 263 + static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl) 264 + { 265 + struct t7xx_modem *md = ctl->md; 266 + 267 + ctl->curr_state = FSM_STATE_READY; 268 + t7xx_fsm_broadcast_ready_state(ctl); 269 + t7xx_md_event_notify(md, FSM_READY); 270 + } 271 + 272 + static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl) 273 + { 274 + struct t7xx_modem *md = ctl->md; 275 + struct device *dev; 276 + 277 + ctl->curr_state = FSM_STATE_STARTING; 278 + 279 + t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1); 280 + t7xx_md_event_notify(md, FSM_START); 281 + 282 + wait_event_interruptible_timeout(ctl->async_hk_wq, md->core_md.ready || ctl->exp_flg, 283 + HZ * 60); 284 + dev = &md->t7xx_dev->pdev->dev; 285 + 286 + if (ctl->exp_flg) 287 + dev_err(dev, "MD exception is captured during handshake\n"); 288 + 289 + if (!md->core_md.ready) { 290 + dev_err(dev, "MD handshake timeout\n"); 291 + fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); 292 + return -ETIMEDOUT; 293 + } 294 + 295 + fsm_routine_ready(ctl); 296 + return 0; 297 + } 298 + 299 + static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) 300 + { 301 + struct t7xx_modem *md = ctl->md; 302 + u32 dev_status; 303 + int ret; 304 + 305 + if (!md) 306 + return; 307 + 308 + if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START && 309 + ctl->curr_state != FSM_STATE_STOPPED) { 310 + fsm_finish_command(ctl, cmd, -EINVAL); 311 + return; 312 + } 313 + 314 + ctl->curr_state = FSM_STATE_PRE_START; 315 + t7xx_md_event_notify(md, FSM_PRE_START); 316 + 317 + ret = read_poll_timeout(ioread32, dev_status, 318 + (dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000, 319 + false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); 320 + if (ret) { 321 + struct device *dev = &md->t7xx_dev->pdev->dev; 322 + 323 + fsm_finish_command(ctl, cmd, -ETIMEDOUT); 324 + dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK); 325 + return; 326 + } 327 + 328 + t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]); 329 + fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl)); 330 + } 331 + 332 + static int fsm_main_thread(void *data) 333 + { 334 + struct t7xx_fsm_ctl *ctl = data; 335 + struct t7xx_fsm_command *cmd; 336 + unsigned long flags; 337 + 338 + while (!kthread_should_stop()) { 339 + if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) || 340 + kthread_should_stop())) 341 + continue; 342 + 343 + if (kthread_should_stop()) 344 + break; 345 + 346 + spin_lock_irqsave(&ctl->command_lock, flags); 347 + cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry); 348 + list_del(&cmd->entry); 349 + spin_unlock_irqrestore(&ctl->command_lock, flags); 350 + 351 + switch (cmd->cmd_id) { 352 + case FSM_CMD_START: 353 + fsm_routine_start(ctl, cmd); 354 + break; 355 + 356 + case FSM_CMD_EXCEPTION: 357 + fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag)); 358 + break; 359 + 360 + case FSM_CMD_PRE_STOP: 361 + fsm_routine_stopping(ctl, cmd); 362 + break; 363 + 364 + case FSM_CMD_STOP: 365 + fsm_routine_stopped(ctl, cmd); 366 + break; 367 + 368 + default: 369 + fsm_finish_command(ctl, cmd, -EINVAL); 370 + fsm_flush_event_cmd_qs(ctl); 371 + break; 372 + } 373 + } 374 + 375 + return 0; 376 + } 377 + 378 + int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag) 379 + { 380 + DECLARE_COMPLETION_ONSTACK(done); 381 + struct t7xx_fsm_command *cmd; 382 + unsigned long flags; 383 + int ret; 384 + 385 + cmd = kzalloc(sizeof(*cmd), flag & FSM_CMD_FLAG_IN_INTERRUPT ? GFP_ATOMIC : GFP_KERNEL); 386 + if (!cmd) 387 + return -ENOMEM; 388 + 389 + INIT_LIST_HEAD(&cmd->entry); 390 + cmd->cmd_id = cmd_id; 391 + cmd->flag = flag; 392 + if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { 393 + cmd->done = &done; 394 + cmd->ret = &ret; 395 + } 396 + 397 + spin_lock_irqsave(&ctl->command_lock, flags); 398 + list_add_tail(&cmd->entry, &ctl->command_queue); 399 + spin_unlock_irqrestore(&ctl->command_lock, flags); 400 + 401 + wake_up(&ctl->command_wq); 402 + 403 + if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { 404 + unsigned long wait_ret; 405 + 406 + wait_ret = wait_for_completion_timeout(&done, 407 + msecs_to_jiffies(FSM_CMD_TIMEOUT_MS)); 408 + if (!wait_ret) 409 + return -ETIMEDOUT; 410 + 411 + return ret; 412 + } 413 + 414 + return 0; 415 + } 416 + 417 + int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id, 418 + unsigned char *data, unsigned int length) 419 + { 420 + struct device *dev = &ctl->md->t7xx_dev->pdev->dev; 421 + struct t7xx_fsm_event *event; 422 + unsigned long flags; 423 + 424 + if (event_id <= FSM_EVENT_INVALID || event_id >= FSM_EVENT_MAX) { 425 + dev_err(dev, "Invalid event %d\n", event_id); 426 + return -EINVAL; 427 + } 428 + 429 + event = kmalloc(sizeof(*event) + length, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); 430 + if (!event) 431 + return -ENOMEM; 432 + 433 + INIT_LIST_HEAD(&event->entry); 434 + event->event_id = event_id; 435 + event->length = length; 436 + 437 + if (data && length) 438 + memcpy(event->data, data, length); 439 + 440 + spin_lock_irqsave(&ctl->event_lock, flags); 441 + list_add_tail(&event->entry, &ctl->event_queue); 442 + spin_unlock_irqrestore(&ctl->event_lock, flags); 443 + 444 + wake_up_all(&ctl->event_wq); 445 + return 0; 446 + } 447 + 448 + void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id) 449 + { 450 + struct t7xx_fsm_event *event, *evt_next; 451 + unsigned long flags; 452 + 453 + spin_lock_irqsave(&ctl->event_lock, flags); 454 + list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) { 455 + if (event->event_id == event_id) 456 + fsm_del_kf_event(event); 457 + } 458 + spin_unlock_irqrestore(&ctl->event_lock, flags); 459 + } 460 + 461 + enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl) 462 + { 463 + if (ctl) 464 + return ctl->md_state; 465 + 466 + return MD_STATE_INVALID; 467 + } 468 + 469 + unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl) 470 + { 471 + if (ctl) 472 + return ctl->curr_state; 473 + 474 + return FSM_STATE_STOPPED; 475 + } 476 + 477 + int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type) 478 + { 479 + unsigned int cmd_flags = FSM_CMD_FLAG_IN_INTERRUPT; 480 + 481 + if (type == MD_IRQ_PORT_ENUM) { 482 + return t7xx_fsm_append_cmd(ctl, FSM_CMD_START, cmd_flags); 483 + } else if (type == MD_IRQ_CCIF_EX) { 484 + ctl->exp_flg = true; 485 + wake_up(&ctl->async_hk_wq); 486 + cmd_flags |= FIELD_PREP(FSM_CMD_EX_REASON, EXCEPTION_EVENT); 487 + return t7xx_fsm_append_cmd(ctl, FSM_CMD_EXCEPTION, cmd_flags); 488 + } 489 + 490 + return -EINVAL; 491 + } 492 + 493 + void t7xx_fsm_reset(struct t7xx_modem *md) 494 + { 495 + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; 496 + 497 + fsm_flush_event_cmd_qs(ctl); 498 + ctl->curr_state = FSM_STATE_STOPPED; 499 + ctl->exp_flg = false; 500 + } 501 + 502 + int t7xx_fsm_init(struct t7xx_modem *md) 503 + { 504 + struct device *dev = &md->t7xx_dev->pdev->dev; 505 + struct t7xx_fsm_ctl *ctl; 506 + 507 + ctl = devm_kzalloc(dev, sizeof(*ctl), GFP_KERNEL); 508 + if (!ctl) 509 + return -ENOMEM; 510 + 511 + md->fsm_ctl = ctl; 512 + ctl->md = md; 513 + ctl->curr_state = FSM_STATE_INIT; 514 + INIT_LIST_HEAD(&ctl->command_queue); 515 + INIT_LIST_HEAD(&ctl->event_queue); 516 + init_waitqueue_head(&ctl->async_hk_wq); 517 + init_waitqueue_head(&ctl->event_wq); 518 + INIT_LIST_HEAD(&ctl->notifier_list); 519 + init_waitqueue_head(&ctl->command_wq); 520 + spin_lock_init(&ctl->event_lock); 521 + spin_lock_init(&ctl->command_lock); 522 + ctl->exp_flg = false; 523 + spin_lock_init(&ctl->notifier_lock); 524 + 525 + ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm"); 526 + return PTR_ERR_OR_ZERO(ctl->fsm_thread); 527 + } 528 + 529 + void t7xx_fsm_uninit(struct t7xx_modem *md) 530 + { 531 + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; 532 + 533 + if (!ctl) 534 + return; 535 + 536 + if (ctl->fsm_thread) 537 + kthread_stop(ctl->fsm_thread); 538 + 539 + fsm_flush_event_cmd_qs(ctl); 540 + }
+133
drivers/net/wwan/t7xx/t7xx_state_monitor.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only 2 + * 3 + * Copyright (c) 2021, MediaTek Inc. 4 + * Copyright (c) 2021-2022, Intel Corporation. 5 + * 6 + * Authors: 7 + * Amir Hanania <amir.hanania@intel.com> 8 + * Haijun Liu <haijun.liu@mediatek.com> 9 + * Moises Veleta <moises.veleta@intel.com> 10 + * 11 + * Contributors: 12 + * Eliot Lee <eliot.lee@intel.com> 13 + * Ricardo Martinez <ricardo.martinez@linux.intel.com> 14 + * Sreehari Kancharla <sreehari.kancharla@intel.com> 15 + */ 16 + 17 + #ifndef __T7XX_MONITOR_H__ 18 + #define __T7XX_MONITOR_H__ 19 + 20 + #include <linux/bits.h> 21 + #include <linux/sched.h> 22 + #include <linux/spinlock.h> 23 + #include <linux/types.h> 24 + #include <linux/wait.h> 25 + 26 + #include "t7xx_modem_ops.h" 27 + 28 + enum t7xx_fsm_state { 29 + FSM_STATE_INIT, 30 + FSM_STATE_PRE_START, 31 + FSM_STATE_STARTING, 32 + FSM_STATE_READY, 33 + FSM_STATE_EXCEPTION, 34 + FSM_STATE_STOPPING, 35 + FSM_STATE_STOPPED, 36 + }; 37 + 38 + enum t7xx_fsm_event_state { 39 + FSM_EVENT_INVALID, 40 + FSM_EVENT_MD_EX, 41 + FSM_EVENT_MD_EX_REC_OK, 42 + FSM_EVENT_MD_EX_PASS, 43 + FSM_EVENT_MAX 44 + }; 45 + 46 + enum t7xx_fsm_cmd_state { 47 + FSM_CMD_INVALID, 48 + FSM_CMD_START, 49 + FSM_CMD_EXCEPTION, 50 + FSM_CMD_PRE_STOP, 51 + FSM_CMD_STOP, 52 + }; 53 + 54 + enum t7xx_ex_reason { 55 + EXCEPTION_HS_TIMEOUT, 56 + EXCEPTION_EVENT, 57 + }; 58 + 59 + enum t7xx_md_irq_type { 60 + MD_IRQ_WDT, 61 + MD_IRQ_CCIF_EX, 62 + MD_IRQ_PORT_ENUM, 63 + }; 64 + 65 + enum md_state { 66 + MD_STATE_INVALID, 67 + MD_STATE_WAITING_FOR_HS1, 68 + MD_STATE_WAITING_FOR_HS2, 69 + MD_STATE_READY, 70 + MD_STATE_EXCEPTION, 71 + MD_STATE_WAITING_TO_STOP, 72 + MD_STATE_STOPPED, 73 + }; 74 + 75 + #define FSM_CMD_FLAG_WAIT_FOR_COMPLETION BIT(0) 76 + #define FSM_CMD_FLAG_FLIGHT_MODE BIT(1) 77 + #define FSM_CMD_FLAG_IN_INTERRUPT BIT(2) 78 + #define FSM_CMD_EX_REASON GENMASK(23, 16) 79 + 80 + struct t7xx_fsm_ctl { 81 + struct t7xx_modem *md; 82 + enum md_state md_state; 83 + unsigned int curr_state; 84 + struct list_head command_queue; 85 + struct list_head event_queue; 86 + wait_queue_head_t command_wq; 87 + wait_queue_head_t event_wq; 88 + wait_queue_head_t async_hk_wq; 89 + spinlock_t event_lock; /* Protects event queue */ 90 + spinlock_t command_lock; /* Protects command queue */ 91 + struct task_struct *fsm_thread; 92 + bool exp_flg; 93 + spinlock_t notifier_lock; /* Protects notifier list */ 94 + struct list_head notifier_list; 95 + }; 96 + 97 + struct t7xx_fsm_event { 98 + struct list_head entry; 99 + enum t7xx_fsm_event_state event_id; 100 + unsigned int length; 101 + unsigned char data[]; 102 + }; 103 + 104 + struct t7xx_fsm_command { 105 + struct list_head entry; 106 + enum t7xx_fsm_cmd_state cmd_id; 107 + unsigned int flag; 108 + struct completion *done; 109 + int *ret; 110 + }; 111 + 112 + struct t7xx_fsm_notifier { 113 + struct list_head entry; 114 + int (*notifier_fn)(enum md_state state, void *data); 115 + void *data; 116 + }; 117 + 118 + int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, 119 + unsigned int flag); 120 + int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id, 121 + unsigned char *data, unsigned int length); 122 + void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id); 123 + void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state); 124 + void t7xx_fsm_reset(struct t7xx_modem *md); 125 + int t7xx_fsm_init(struct t7xx_modem *md); 126 + void t7xx_fsm_uninit(struct t7xx_modem *md); 127 + int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type); 128 + enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl); 129 + unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl); 130 + void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier); 131 + void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier); 132 + 133 + #endif /* __T7XX_MONITOR_H__ */