Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: wwan: t7xx: Add AP CLDMA

At this moment with the current status, t7xx is not functional due to
problems like this after connection, if there is no activity:
[ 57.370534] mtk_t7xx 0000:72:00.0: [PM] SAP suspend error: -110
[ 57.370581] mtk_t7xx 0000:72:00.0: can't suspend
(t7xx_pci_pm_runtime_suspend [mtk_t7xx] returned -110)
because after this, the traffic no longer works.

The complete series 'net: wwan: t7xx: fw flashing & coredump support'
was reverted because of issues with the pci implementation.
In order to have at least the modem working, it would be enough if just
the first commit of the series is re-applied:
d20ef656f994 net: wwan: t7xx: Add AP CLDMA
With that, the Application Processor would be controlled, correctly
suspended and the commented problems would be fixed (I am testing here
like this with no related issue).

This commit is independent of the others and not related to the
commented pci implementation for the new features: fw flashing and
coredump collection.

Use v2 patch version of d20ef656f994 as JinJian Song suggests
(https://patchwork.kernel.org/project/netdevbpf/patch/20230105154215.198828-1-m.chetan.kumar@linux.intel.com/).

Original text from the commit that would be re-applied:

d20ef656f994 net: wwan: t7xx: Add AP CLDMA
Author: Haijun Liu <haijun.liu@mediatek.com>
Date: Tue Aug 16 09:53:28 2022 +0530

The t7xx device contains two Cross Layer DMA (CLDMA) interfaces to
communicate with AP and Modem processors respectively. So far only
MD-CLDMA was being used, this patch enables AP-CLDMA.

Rename small Application Processor (sAP) to AP.

Signed-off-by: Haijun Liu <haijun.liu@mediatek.com>
Co-developed-by: Madhusmita Sahu <madhusmita.sahu@intel.com>
Signed-off-by: Madhusmita Sahu <madhusmita.sahu@intel.com>
Signed-off-by: Moises Veleta <moises.veleta@linux.intel.com>
Signed-off-by: Devegowda Chandrashekar <chandrashekar.devegowda@intel.com>
Signed-off-by: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>

Signed-off-by: Jose Ignacio Tornos Martinez <jtornosm@redhat.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Link: https://lore.kernel.org/r/20230711062817.6108-1-jtornosm@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Jose Ignacio Tornos Martinez and committed by
Jakub Kicinski
ba2274dc c5ec13e3

+116 -31
+11 -6
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
··· 1066 1066 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 1067 1067 u32 phy_ao_base, phy_pd_base; 1068 1068 1069 - if (md_ctrl->hif_id != CLDMA_ID_MD) 1070 - return; 1071 - 1072 - phy_ao_base = CLDMA1_AO_BASE; 1073 - phy_pd_base = CLDMA1_PD_BASE; 1074 - hw_info->phy_interrupt_id = CLDMA1_INT; 1075 1069 hw_info->hw_mode = MODE_BIT_64; 1070 + 1071 + if (md_ctrl->hif_id == CLDMA_ID_MD) { 1072 + phy_ao_base = CLDMA1_AO_BASE; 1073 + phy_pd_base = CLDMA1_PD_BASE; 1074 + hw_info->phy_interrupt_id = CLDMA1_INT; 1075 + } else { 1076 + phy_ao_base = CLDMA0_AO_BASE; 1077 + phy_pd_base = CLDMA0_PD_BASE; 1078 + hw_info->phy_interrupt_id = CLDMA0_INT; 1079 + } 1080 + 1076 1081 hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, 1077 1082 pbase->pcie_dev_reg_trsl_addr, phy_ao_base); 1078 1083 hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
+1 -1
drivers/net/wwan/t7xx/t7xx_hif_cldma.h
··· 34 34 /** 35 35 * enum cldma_id - Identifiers for CLDMA HW units. 36 36 * @CLDMA_ID_MD: Modem control channel. 37 - * @CLDMA_ID_AP: Application Processor control channel (not used at the moment). 37 + * @CLDMA_ID_AP: Application Processor control channel. 38 38 * @CLDMA_NUM: Number of CLDMA HW units available. 39 39 */ 40 40 enum cldma_id {
+1
drivers/net/wwan/t7xx/t7xx_mhccif.h
··· 25 25 D2H_INT_EXCEPTION_CLEARQ_DONE | \ 26 26 D2H_INT_EXCEPTION_ALLQ_RESET | \ 27 27 D2H_INT_PORT_ENUM | \ 28 + D2H_INT_ASYNC_AP_HK | \ 28 29 D2H_INT_ASYNC_MD_HK) 29 30 30 31 void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val);
+61 -15
drivers/net/wwan/t7xx/t7xx_modem_ops.c
··· 44 44 #include "t7xx_state_monitor.h" 45 45 46 46 #define RT_ID_MD_PORT_ENUM 0 47 + #define RT_ID_AP_PORT_ENUM 1 47 48 /* Modem feature query identification code - "ICCC" */ 48 49 #define MD_FEATURE_QUERY_ID 0x49434343 49 50 ··· 299 298 } 300 299 301 300 t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage); 301 + t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage); 302 302 303 303 if (stage == HIF_EX_INIT) 304 304 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK); ··· 428 426 if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED) 429 427 return -EINVAL; 430 428 431 - if (i == RT_ID_MD_PORT_ENUM) 429 + if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM) 432 430 t7xx_port_enum_msg_handler(ctl->md, rt_feature->data); 433 431 } 434 432 ··· 458 456 return 0; 459 457 } 460 458 461 - static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl, 459 + static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info, 460 + struct t7xx_fsm_ctl *ctl, 462 461 enum t7xx_fsm_event_state event_id, 463 462 enum t7xx_fsm_event_state err_detect) 464 463 { 465 464 struct t7xx_fsm_event *event = NULL, *event_next; 466 - struct t7xx_sys_info *core_info = &md->core_md; 467 465 struct device *dev = &md->t7xx_dev->pdev->dev; 468 466 unsigned long flags; 469 467 int ret; ··· 533 531 t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]); 534 532 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2); 535 533 md->core_md.handshake_ongoing = true; 536 - t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT); 534 + t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT); 535 + } 536 + 537 + static void t7xx_ap_hk_wq(struct work_struct *work) 538 + { 539 + struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work); 540 + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; 541 + 542 + /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */ 543 + t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT); 544 + t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]); 545 + t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]); 546 + t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]); 547 + md->core_ap.handshake_ongoing = true; 548 + t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT); 537 549 } 538 550 539 551 void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id) 540 552 { 541 553 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; 542 - void __iomem *mhccif_base; 543 554 unsigned int int_sta; 544 555 unsigned long flags; 545 556 546 557 switch (evt_id) { 547 558 case FSM_PRE_START: 548 - t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM); 559 + t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK | 560 + D2H_INT_ASYNC_AP_HK); 549 561 break; 550 562 551 563 case FSM_START: ··· 572 556 ctl->exp_flg = true; 573 557 md->exp_id &= ~D2H_INT_EXCEPTION_INIT; 574 558 md->exp_id &= ~D2H_INT_ASYNC_MD_HK; 559 + md->exp_id &= ~D2H_INT_ASYNC_AP_HK; 575 560 } else if (ctl->exp_flg) { 576 561 md->exp_id &= ~D2H_INT_ASYNC_MD_HK; 577 - } else if (md->exp_id & D2H_INT_ASYNC_MD_HK) { 578 - queue_work(md->handshake_wq, &md->handshake_work); 579 - md->exp_id &= ~D2H_INT_ASYNC_MD_HK; 580 - mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base; 581 - iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK); 582 - t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); 562 + md->exp_id &= ~D2H_INT_ASYNC_AP_HK; 583 563 } else { 584 - t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); 564 + void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base; 565 + 566 + if (md->exp_id & D2H_INT_ASYNC_MD_HK) { 567 + queue_work(md->handshake_wq, &md->handshake_work); 568 + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; 569 + iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK); 570 + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); 571 + } 572 + 573 + if (md->exp_id & D2H_INT_ASYNC_AP_HK) { 574 + queue_work(md->handshake_wq, &md->ap_handshake_work); 575 + md->exp_id &= ~D2H_INT_ASYNC_AP_HK; 576 + iowrite32(D2H_INT_ASYNC_AP_HK, mhccif_base + REG_EP2RC_SW_INT_ACK); 577 + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK); 578 + } 585 579 } 586 580 spin_unlock_irqrestore(&md->exp_lock, flags); 587 581 ··· 604 578 605 579 case FSM_READY: 606 580 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); 581 + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK); 607 582 break; 608 583 609 584 default: ··· 656 629 md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK; 657 630 md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |= 658 631 FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED); 632 + 633 + INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq); 634 + md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK; 635 + md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |= 636 + FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED); 637 + 659 638 return md; 660 639 } 661 640 ··· 673 640 md->exp_id = 0; 674 641 t7xx_fsm_reset(md); 675 642 t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]); 643 + t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]); 676 644 t7xx_port_proxy_reset(md->port_prox); 677 645 md->md_init_finish = true; 678 646 return t7xx_core_reset(md); ··· 703 669 if (ret) 704 670 goto err_destroy_hswq; 705 671 672 + ret = t7xx_cldma_alloc(CLDMA_ID_AP, t7xx_dev); 673 + if (ret) 674 + goto err_destroy_hswq; 675 + 706 676 ret = t7xx_fsm_init(md); 707 677 if (ret) 708 678 goto err_destroy_hswq; ··· 719 681 if (ret) 720 682 goto err_uninit_ccmni; 721 683 722 - ret = t7xx_port_proxy_init(md); 684 + ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]); 723 685 if (ret) 724 686 goto err_uninit_md_cldma; 725 687 688 + ret = t7xx_port_proxy_init(md); 689 + if (ret) 690 + goto err_uninit_ap_cldma; 691 + 726 692 ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0); 727 - if (ret) /* fsm_uninit flushes cmd queue */ 693 + if (ret) /* t7xx_fsm_uninit() flushes cmd queue */ 728 694 goto err_uninit_proxy; 729 695 730 696 t7xx_md_sys_sw_init(t7xx_dev); ··· 737 695 738 696 err_uninit_proxy: 739 697 t7xx_port_proxy_uninit(md->port_prox); 698 + 699 + err_uninit_ap_cldma: 700 + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]); 740 701 741 702 err_uninit_md_cldma: 742 703 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); ··· 767 722 768 723 t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); 769 724 t7xx_port_proxy_uninit(md->port_prox); 725 + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]); 770 726 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); 771 727 t7xx_ccmni_exit(t7xx_dev); 772 728 t7xx_fsm_uninit(md);
+2
drivers/net/wwan/t7xx/t7xx_modem_ops.h
··· 66 66 struct cldma_ctrl *md_ctrl[CLDMA_NUM]; 67 67 struct t7xx_pci_dev *t7xx_dev; 68 68 struct t7xx_sys_info core_md; 69 + struct t7xx_sys_info core_ap; 69 70 bool md_init_finish; 70 71 bool rgu_irq_asserted; 71 72 struct workqueue_struct *handshake_wq; 72 73 struct work_struct handshake_work; 74 + struct work_struct ap_handshake_work; 73 75 struct t7xx_fsm_ctl *fsm_ctl; 74 76 struct port_proxy *port_prox; 75 77 unsigned int exp_id;
+5 -1
drivers/net/wwan/t7xx/t7xx_port.h
··· 36 36 /* Channel ID and Message ID definitions. 37 37 * The channel number consists of peer_id(15:12) , channel_id(11:0) 38 38 * peer_id: 39 - * 0:reserved, 1: to sAP, 2: to MD 39 + * 0:reserved, 1: to AP, 2: to MD 40 40 */ 41 41 enum port_ch { 42 + /* to AP */ 43 + PORT_CH_AP_CONTROL_RX = 0x1000, 44 + PORT_CH_AP_CONTROL_TX = 0x1001, 45 + 42 46 /* to MD */ 43 47 PORT_CH_CONTROL_RX = 0x2000, 44 48 PORT_CH_CONTROL_TX = 0x2001,
+6 -2
drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
··· 167 167 case CTL_ID_HS2_MSG: 168 168 skb_pull(skb, sizeof(*ctrl_msg_h)); 169 169 170 - if (port_conf->rx_ch == PORT_CH_CONTROL_RX) { 171 - ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2, skb->data, 170 + if (port_conf->rx_ch == PORT_CH_CONTROL_RX || 171 + port_conf->rx_ch == PORT_CH_AP_CONTROL_RX) { 172 + int event = port_conf->rx_ch == PORT_CH_CONTROL_RX ? 173 + FSM_EVENT_MD_HS2 : FSM_EVENT_AP_HS2; 174 + 175 + ret = t7xx_fsm_append_event(ctl, event, skb->data, 172 176 le32_to_cpu(ctrl_msg_h->data_length)); 173 177 if (ret) 174 178 dev_err(port->dev, "Failed to append Handshake 2 event");
+15 -3
drivers/net/wwan/t7xx/t7xx_port_proxy.c
··· 48 48 i < (proxy)->port_count; \ 49 49 i++, (p) = &(proxy)->ports[i]) 50 50 51 - static const struct t7xx_port_conf t7xx_md_port_conf[] = { 51 + static const struct t7xx_port_conf t7xx_port_conf[] = { 52 52 { 53 53 .tx_ch = PORT_CH_UART2_TX, 54 54 .rx_ch = PORT_CH_UART2_RX, ··· 89 89 .path_id = CLDMA_ID_MD, 90 90 .ops = &ctl_port_ops, 91 91 .name = "t7xx_ctrl", 92 + }, { 93 + .tx_ch = PORT_CH_AP_CONTROL_TX, 94 + .rx_ch = PORT_CH_AP_CONTROL_RX, 95 + .txq_index = Q_IDX_CTRL, 96 + .rxq_index = Q_IDX_CTRL, 97 + .path_id = CLDMA_ID_AP, 98 + .ops = &ctl_port_ops, 99 + .name = "t7xx_ap_ctrl", 92 100 }, 93 101 }; 94 102 ··· 436 428 if (port_conf->tx_ch == PORT_CH_CONTROL_TX) 437 429 md->core_md.ctl_port = port; 438 430 431 + if (port_conf->tx_ch == PORT_CH_AP_CONTROL_TX) 432 + md->core_ap.ctl_port = port; 433 + 439 434 port->t7xx_dev = md->t7xx_dev; 440 435 port->dev = &md->t7xx_dev->pdev->dev; 441 436 spin_lock_init(&port->port_update_lock); ··· 453 442 454 443 static int t7xx_proxy_alloc(struct t7xx_modem *md) 455 444 { 456 - unsigned int port_count = ARRAY_SIZE(t7xx_md_port_conf); 445 + unsigned int port_count = ARRAY_SIZE(t7xx_port_conf); 457 446 struct device *dev = &md->t7xx_dev->pdev->dev; 458 447 struct port_proxy *port_prox; 459 448 int i; ··· 467 456 port_prox->dev = dev; 468 457 469 458 for (i = 0; i < port_count; i++) 470 - port_prox->ports[i].port_conf = &t7xx_md_port_conf[i]; 459 + port_prox->ports[i].port_conf = &t7xx_port_conf[i]; 471 460 472 461 port_prox->port_count = port_count; 473 462 t7xx_proxy_init_all_ports(md); ··· 492 481 if (ret) 493 482 return ret; 494 483 484 + t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb); 495 485 t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb); 496 486 return 0; 497 487 }
+1 -1
drivers/net/wwan/t7xx/t7xx_reg.h
··· 56 56 #define D2H_INT_RESUME_ACK BIT(12) 57 57 #define D2H_INT_SUSPEND_ACK_AP BIT(13) 58 58 #define D2H_INT_RESUME_ACK_AP BIT(14) 59 - #define D2H_INT_ASYNC_SAP_HK BIT(15) 59 + #define D2H_INT_ASYNC_AP_HK BIT(15) 60 60 #define D2H_INT_ASYNC_MD_HK BIT(16) 61 61 62 62 /* Register base */
+11 -2
drivers/net/wwan/t7xx/t7xx_state_monitor.c
··· 285 285 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1); 286 286 t7xx_md_event_notify(md, FSM_START); 287 287 288 - wait_event_interruptible_timeout(ctl->async_hk_wq, md->core_md.ready || ctl->exp_flg, 289 - HZ * 60); 288 + wait_event_interruptible_timeout(ctl->async_hk_wq, 289 + (md->core_md.ready && md->core_ap.ready) || 290 + ctl->exp_flg, HZ * 60); 290 291 dev = &md->t7xx_dev->pdev->dev; 291 292 292 293 if (ctl->exp_flg) ··· 297 296 dev_err(dev, "MD handshake timeout\n"); 298 297 if (md->core_md.handshake_ongoing) 299 298 t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0); 299 + 300 + fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); 301 + return -ETIMEDOUT; 302 + } else if (!md->core_ap.ready) { 303 + dev_err(dev, "AP handshake timeout\n"); 304 + if (md->core_ap.handshake_ongoing) 305 + t7xx_fsm_append_event(ctl, FSM_EVENT_AP_HS2_EXIT, NULL, 0); 300 306 301 307 fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); 302 308 return -ETIMEDOUT; ··· 343 335 return; 344 336 } 345 337 338 + t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]); 346 339 t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]); 347 340 fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl)); 348 341 }
+2
drivers/net/wwan/t7xx/t7xx_state_monitor.h
··· 38 38 enum t7xx_fsm_event_state { 39 39 FSM_EVENT_INVALID, 40 40 FSM_EVENT_MD_HS2, 41 + FSM_EVENT_AP_HS2, 41 42 FSM_EVENT_MD_EX, 42 43 FSM_EVENT_MD_EX_REC_OK, 43 44 FSM_EVENT_MD_EX_PASS, 44 45 FSM_EVENT_MD_HS2_EXIT, 46 + FSM_EVENT_AP_HS2_EXIT, 45 47 FSM_EVENT_MAX 46 48 }; 47 49