Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

wifi: mt76: move wed common utilities in wed.c

Introduce wed.c in order to collect mt76 wed common codebase used by
mt7915 and mt7996 drivers.

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Felix Fietkau <nbd@nbd.name>

authored by

Lorenzo Bianconi and committed by
Felix Fietkau
8a7386e7 7b4f9cd6

+268 -252
+1 -1
drivers/net/wireless/mediatek/mt76/Makefile
··· 10 10 11 11 mt76-y := \ 12 12 mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \ 13 - tx.o agg-rx.o mcu.o 13 + tx.o agg-rx.o mcu.o wed.o 14 14 15 15 mt76-$(CONFIG_PCI) += pci.o 16 16 mt76-$(CONFIG_NL80211_TESTMODE) += testmode.o
+7 -99
drivers/net/wireless/mediatek/mt76/dma.c
··· 197 197 q->tail = q->head; 198 198 } 199 199 200 - static void 201 - __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, 202 - bool reset_idx) 200 + void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, 201 + bool reset_idx) 203 202 { 204 203 if (!q || !q->ndesc) 205 204 return; ··· 218 219 mt76_dma_sync_idx(dev, q); 219 220 } 220 221 221 - static void 222 - mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) 222 + void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) 223 223 { 224 224 __mt76_dma_queue_reset(dev, q, true); 225 225 } ··· 630 632 return ret; 631 633 } 632 634 633 - static int 634 - mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, 635 - bool allow_direct) 635 + int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, 636 + bool allow_direct) 636 637 { 637 638 int len = SKB_WITH_OVERHEAD(q->buf_size); 638 639 int frames = 0; ··· 678 681 return frames; 679 682 } 680 683 681 - int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) 682 - { 683 - #ifdef CONFIG_NET_MEDIATEK_SOC_WED 684 - int ret = 0, type, ring; 685 - u16 flags; 686 - 687 - if (!q || !q->ndesc) 688 - return -EINVAL; 689 - 690 - flags = q->flags; 691 - if (!q->wed || !mtk_wed_device_active(q->wed)) 692 - q->flags &= ~MT_QFLAG_WED; 693 - 694 - if (!(q->flags & MT_QFLAG_WED)) 695 - return 0; 696 - 697 - type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags); 698 - ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags); 699 - 700 - switch (type) { 701 - case MT76_WED_Q_TX: 702 - ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs, 703 - reset); 704 - if (!ret) 705 - q->wed_regs = q->wed->tx_ring[ring].reg_base; 706 - break; 707 - case MT76_WED_Q_TXFREE: 708 - /* WED txfree queue needs ring to be initialized before setup */ 709 - q->flags = 0; 710 - mt76_dma_queue_reset(dev, q); 711 - mt76_dma_rx_fill(dev, q, false); 712 - 713 - ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs); 714 - if (!ret) 715 - q->wed_regs = q->wed->txfree_ring.reg_base; 716 - break; 717 - case MT76_WED_Q_RX: 718 - ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs, 719 - reset); 720 - if (!ret) 721 - q->wed_regs = q->wed->rx_ring[ring].reg_base; 722 - break; 723 - case MT76_WED_RRO_Q_DATA: 724 - q->flags &= ~MT_QFLAG_WED; 725 - __mt76_dma_queue_reset(dev, q, false); 726 - mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs); 727 - q->head = q->ndesc - 1; 728 - q->queued = q->head; 729 - break; 730 - case MT76_WED_RRO_Q_MSDU_PG: 731 - q->flags &= ~MT_QFLAG_WED; 732 - __mt76_dma_queue_reset(dev, q, false); 733 - mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs); 734 - q->head = q->ndesc - 1; 735 - q->queued = q->head; 736 - break; 737 - case MT76_WED_RRO_Q_IND: 738 - q->flags &= ~MT_QFLAG_WED; 739 - mt76_dma_queue_reset(dev, q); 740 - mt76_dma_rx_fill(dev, q, false); 741 - mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs); 742 - break; 743 - default: 744 - ret = -EINVAL; 745 - break; 746 - } 747 - q->flags = flags; 748 - 749 - return ret; 750 - #else 751 - return 0; 752 - #endif 753 - } 754 - EXPORT_SYMBOL_GPL(mt76_dma_wed_setup); 755 - 756 684 static int 757 685 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, 758 686 int idx, int n_desc, int bufsize, ··· 722 800 if (ret) 723 801 return ret; 724 802 725 - ret = mt76_dma_wed_setup(dev, q, false); 803 + ret = mt76_wed_dma_setup(dev, q, false); 726 804 if (ret) 727 805 return ret; 728 806 ··· 785 863 mt76_dma_rx_cleanup(dev, q); 786 864 787 865 /* reset WED rx queues */ 788 - mt76_dma_wed_setup(dev, q, true); 866 + mt76_wed_dma_setup(dev, q, true); 789 867 790 868 if (mt76_queue_is_wed_tx_free(q)) 791 869 return; ··· 975 1053 dev->queue_ops = &mt76_dma_ops; 976 1054 } 977 1055 EXPORT_SYMBOL_GPL(mt76_dma_attach); 978 - 979 - void mt76_dma_wed_reset(struct mt76_dev *dev) 980 - { 981 - struct mt76_mmio *mmio = &dev->mmio; 982 - 983 - if (!test_bit(MT76_STATE_WED_RESET, &dev->phy.state)) 984 - return; 985 - 986 - complete(&mmio->wed_reset); 987 - 988 - if (!wait_for_completion_timeout(&mmio->wed_reset_complete, 3 * HZ)) 989 - dev_err(dev->dev, "wed reset complete timeout\n"); 990 - } 991 - EXPORT_SYMBOL_GPL(mt76_dma_wed_reset); 992 1056 993 1057 void mt76_dma_cleanup(struct mt76_dev *dev) 994 1058 {
+6 -3
drivers/net/wireless/mediatek/mt76/dma.h
··· 79 79 int mt76_dma_rx_poll(struct napi_struct *napi, int budget); 80 80 void mt76_dma_attach(struct mt76_dev *dev); 81 81 void mt76_dma_cleanup(struct mt76_dev *dev); 82 - int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset); 83 - void mt76_dma_wed_reset(struct mt76_dev *dev); 82 + int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, 83 + bool allow_direct); 84 + void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, 85 + bool reset_idx); 86 + void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q); 84 87 85 88 static inline void 86 89 mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q) 87 90 { 88 91 dev->queue_ops->reset_q(dev, q); 89 92 if (mtk_wed_device_active(&dev->mmio.wed)) 90 - mt76_dma_wed_setup(dev, q, true); 93 + mt76_wed_dma_setup(dev, q, true); 91 94 } 92 95 93 96 static inline void
-16
drivers/net/wireless/mediatek/mt76/mac80211.c
··· 1854 1854 return MT_DFS_STATE_ACTIVE; 1855 1855 } 1856 1856 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state); 1857 - 1858 - #ifdef CONFIG_NET_MEDIATEK_SOC_WED 1859 - int mt76_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1860 - struct net_device *netdev, enum tc_setup_type type, 1861 - void *type_data) 1862 - { 1863 - struct mt76_phy *phy = hw->priv; 1864 - struct mtk_wed_device *wed = &phy->dev->mmio.wed; 1865 - 1866 - if (!mtk_wed_device_active(wed)) 1867 - return -EOPNOTSUPP; 1868 - 1869 - return mtk_wed_device_setup_tc(wed, netdev, type, type_data); 1870 - } 1871 - EXPORT_SYMBOL_GPL(mt76_net_setup_tc); 1872 - #endif /* CONFIG_NET_MEDIATEK_SOC_WED */
-107
drivers/net/wireless/mediatek/mt76/mmio.c
··· 85 85 } 86 86 EXPORT_SYMBOL_GPL(mt76_set_irq_mask); 87 87 88 - #ifdef CONFIG_NET_MEDIATEK_SOC_WED 89 - void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed) 90 - { 91 - struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed); 92 - int i; 93 - 94 - for (i = 0; i < dev->rx_token_size; i++) { 95 - struct mt76_txwi_cache *t; 96 - 97 - t = mt76_rx_token_release(dev, i); 98 - if (!t || !t->ptr) 99 - continue; 100 - 101 - mt76_put_page_pool_buf(t->ptr, false); 102 - t->ptr = NULL; 103 - 104 - mt76_put_rxwi(dev, t); 105 - } 106 - 107 - mt76_free_pending_rxwi(dev); 108 - } 109 - EXPORT_SYMBOL_GPL(mt76_mmio_wed_release_rx_buf); 110 - 111 - u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size) 112 - { 113 - struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed); 114 - struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc; 115 - struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 116 - int i, len = SKB_WITH_OVERHEAD(q->buf_size); 117 - struct mt76_txwi_cache *t = NULL; 118 - 119 - for (i = 0; i < size; i++) { 120 - enum dma_data_direction dir; 121 - dma_addr_t addr; 122 - u32 offset; 123 - int token; 124 - void *buf; 125 - 126 - t = mt76_get_rxwi(dev); 127 - if (!t) 128 - goto unmap; 129 - 130 - buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); 131 - if (!buf) 132 - goto unmap; 133 - 134 - addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset; 135 - dir = page_pool_get_dma_dir(q->page_pool); 136 - dma_sync_single_for_device(dev->dma_dev, addr, len, dir); 137 - 138 - desc->buf0 = cpu_to_le32(addr); 139 - token = mt76_rx_token_consume(dev, buf, t, addr); 140 - if (token < 0) { 141 - mt76_put_page_pool_buf(buf, false); 142 - goto unmap; 143 - } 144 - 145 - token = FIELD_PREP(MT_DMA_CTL_TOKEN, token); 146 - #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 147 - token |= FIELD_PREP(MT_DMA_CTL_SDP0_H, addr >> 32); 148 - #endif 149 - desc->token |= cpu_to_le32(token); 150 - desc++; 151 - } 152 - 153 - return 0; 154 - 155 - unmap: 156 - if (t) 157 - mt76_put_rxwi(dev, t); 158 - mt76_mmio_wed_release_rx_buf(wed); 159 - 160 - return -ENOMEM; 161 - } 162 - EXPORT_SYMBOL_GPL(mt76_mmio_wed_init_rx_buf); 163 - 164 - int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed) 165 - { 166 - struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed); 167 - 168 - spin_lock_bh(&dev->token_lock); 169 - dev->token_size = wed->wlan.token_start; 170 - spin_unlock_bh(&dev->token_lock); 171 - 172 - return !wait_event_timeout(dev->tx_wait, !dev->wed_token_count, HZ); 173 - } 174 - EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_enable); 175 - 176 - void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed) 177 - { 178 - struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed); 179 - 180 - spin_lock_bh(&dev->token_lock); 181 - dev->token_size = dev->drv->token_size; 182 - spin_unlock_bh(&dev->token_lock); 183 - } 184 - EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_disable); 185 - 186 - void mt76_mmio_wed_reset_complete(struct mtk_wed_device *wed) 187 - { 188 - struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed); 189 - 190 - complete(&dev->mmio.wed_reset_complete); 191 - } 192 - EXPORT_SYMBOL_GPL(mt76_mmio_wed_reset_complete); 193 - #endif /*CONFIG_NET_MEDIATEK_SOC_WED */ 194 - 195 88 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs) 196 89 { 197 90 static const struct mt76_bus_ops mt76_mmio_ops = {
+27 -12
drivers/net/wireless/mediatek/mt76/mt76.h
··· 1082 1082 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 1083 1083 void mt76_pci_disable_aspm(struct pci_dev *pdev); 1084 1084 1085 - #ifdef CONFIG_NET_MEDIATEK_SOC_WED 1086 - int mt76_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1087 - struct net_device *netdev, enum tc_setup_type type, 1088 - void *type_data); 1089 - #endif /*CONFIG_NET_MEDIATEK_SOC_WED */ 1090 - 1091 1085 static inline u16 mt76_chip(struct mt76_dev *dev) 1092 1086 { 1093 1087 return dev->rev >> 16; ··· 1092 1098 return dev->rev & 0xffff; 1093 1099 } 1094 1100 1101 + void mt76_wed_release_rx_buf(struct mtk_wed_device *wed); 1102 + void mt76_wed_offload_disable(struct mtk_wed_device *wed); 1103 + void mt76_wed_reset_complete(struct mtk_wed_device *wed); 1104 + void mt76_wed_dma_reset(struct mt76_dev *dev); 1105 + int mt76_wed_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1106 + struct net_device *netdev, enum tc_setup_type type, 1107 + void *type_data); 1095 1108 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 1096 - u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size); 1097 - void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed); 1098 - int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed); 1099 - void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed); 1100 - void mt76_mmio_wed_reset_complete(struct mtk_wed_device *wed); 1101 - #endif /*CONFIG_NET_MEDIATEK_SOC_WED */ 1109 + u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size); 1110 + int mt76_wed_offload_enable(struct mtk_wed_device *wed); 1111 + int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset); 1112 + #else 1113 + static inline u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size) 1114 + { 1115 + return 0; 1116 + } 1117 + 1118 + static inline int mt76_wed_offload_enable(struct mtk_wed_device *wed) 1119 + { 1120 + return 0; 1121 + } 1122 + 1123 + static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, 1124 + bool reset) 1125 + { 1126 + return 0; 1127 + } 1128 + #endif /* CONFIG_NET_MEDIATEK_SOC_WED */ 1102 1129 1103 1130 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 1104 1131 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
+1 -1
drivers/net/wireless/mediatek/mt76/mt7915/dma.c
··· 614 614 mtk_wed_device_dma_reset(wed); 615 615 616 616 mt7915_dma_disable(dev, force); 617 - mt76_dma_wed_reset(&dev->mt76); 617 + mt76_wed_dma_reset(&dev->mt76); 618 618 619 619 /* reset hw queues */ 620 620 for (i = 0; i < __MT_TXQ_MAX; i++) {
+1 -1
drivers/net/wireless/mediatek/mt76/mt7915/main.c
··· 1708 1708 .set_radar_background = mt7915_set_radar_background, 1709 1709 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 1710 1710 .net_fill_forward_path = mt7915_net_fill_forward_path, 1711 - .net_setup_tc = mt76_net_setup_tc, 1711 + .net_setup_tc = mt76_wed_net_setup_tc, 1712 1712 #endif 1713 1713 };
+5 -5
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
··· 706 706 } 707 707 708 708 wed->wlan.init_buf = mt7915_wed_init_buf; 709 - wed->wlan.offload_enable = mt76_mmio_wed_offload_enable; 710 - wed->wlan.offload_disable = mt76_mmio_wed_offload_disable; 711 - wed->wlan.init_rx_buf = mt76_mmio_wed_init_rx_buf; 712 - wed->wlan.release_rx_buf = mt76_mmio_wed_release_rx_buf; 709 + wed->wlan.offload_enable = mt76_wed_offload_enable; 710 + wed->wlan.offload_disable = mt76_wed_offload_disable; 711 + wed->wlan.init_rx_buf = mt76_wed_init_rx_buf; 712 + wed->wlan.release_rx_buf = mt76_wed_release_rx_buf; 713 713 wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats; 714 714 wed->wlan.reset = mt7915_mmio_wed_reset; 715 - wed->wlan.reset_complete = mt76_mmio_wed_reset_complete; 715 + wed->wlan.reset_complete = mt76_wed_reset_complete; 716 716 717 717 dev->mt76.rx_token_size = wed->wlan.rx_npkt; 718 718
+1 -1
drivers/net/wireless/mediatek/mt76/mt7996/dma.c
··· 695 695 mtk_wed_device_dma_reset(&dev->mt76.mmio.wed); 696 696 697 697 mt7996_dma_disable(dev, force); 698 - mt76_dma_wed_reset(&dev->mt76); 698 + mt76_wed_dma_reset(&dev->mt76); 699 699 700 700 /* reset hw queues */ 701 701 for (i = 0; i < __MT_TXQ_MAX; i++) {
+1 -1
drivers/net/wireless/mediatek/mt76/mt7996/main.c
··· 1502 1502 .set_radar_background = mt7996_set_radar_background, 1503 1503 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 1504 1504 .net_fill_forward_path = mt7996_net_fill_forward_path, 1505 - .net_setup_tc = mt76_net_setup_tc, 1505 + .net_setup_tc = mt76_wed_net_setup_tc, 1506 1506 #endif 1507 1507 };
+5 -5
drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
··· 410 410 wed->wlan.amsdu_max_len = 1536; 411 411 412 412 wed->wlan.init_buf = mt7996_wed_init_buf; 413 - wed->wlan.init_rx_buf = mt76_mmio_wed_init_rx_buf; 414 - wed->wlan.release_rx_buf = mt76_mmio_wed_release_rx_buf; 415 - wed->wlan.offload_enable = mt76_mmio_wed_offload_enable; 416 - wed->wlan.offload_disable = mt76_mmio_wed_offload_disable; 413 + wed->wlan.init_rx_buf = mt76_wed_init_rx_buf; 414 + wed->wlan.release_rx_buf = mt76_wed_release_rx_buf; 415 + wed->wlan.offload_enable = mt76_wed_offload_enable; 416 + wed->wlan.offload_disable = mt76_wed_offload_disable; 417 417 if (!hif2) { 418 418 wed->wlan.reset = mt7996_mmio_wed_reset; 419 - wed->wlan.reset_complete = mt76_mmio_wed_reset_complete; 419 + wed->wlan.reset_complete = mt76_wed_reset_complete; 420 420 } 421 421 422 422 if (mtk_wed_device_attach(wed))
+213
drivers/net/wireless/mediatek/mt76/wed.c
··· 1 + // SPDX-License-Identifier: ISC 2 + /* 3 + * Copyright (C) 2023 Lorenzo Bianconi <lorenzo@kernel.org> 4 + */ 5 + 6 + #include "mt76.h" 7 + #include "dma.h" 8 + 9 + void mt76_wed_release_rx_buf(struct mtk_wed_device *wed) 10 + { 11 + struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed); 12 + int i; 13 + 14 + for (i = 0; i < dev->rx_token_size; i++) { 15 + struct mt76_txwi_cache *t; 16 + 17 + t = mt76_rx_token_release(dev, i); 18 + if (!t || !t->ptr) 19 + continue; 20 + 21 + mt76_put_page_pool_buf(t->ptr, false); 22 + t->ptr = NULL; 23 + 24 + mt76_put_rxwi(dev, t); 25 + } 26 + 27 + mt76_free_pending_rxwi(dev); 28 + } 29 + EXPORT_SYMBOL_GPL(mt76_wed_release_rx_buf); 30 + 31 + #ifdef CONFIG_NET_MEDIATEK_SOC_WED 32 + u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size) 33 + { 34 + struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed); 35 + struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc; 36 + struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 37 + int i, len = SKB_WITH_OVERHEAD(q->buf_size); 38 + struct mt76_txwi_cache *t = NULL; 39 + 40 + for (i = 0; i < size; i++) { 41 + enum dma_data_direction dir; 42 + dma_addr_t addr; 43 + u32 offset; 44 + int token; 45 + void *buf; 46 + 47 + t = mt76_get_rxwi(dev); 48 + if (!t) 49 + goto unmap; 50 + 51 + buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); 52 + if (!buf) 53 + goto unmap; 54 + 55 + addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset; 56 + dir = page_pool_get_dma_dir(q->page_pool); 57 + dma_sync_single_for_device(dev->dma_dev, addr, len, dir); 58 + 59 + desc->buf0 = cpu_to_le32(addr); 60 + token = mt76_rx_token_consume(dev, buf, t, addr); 61 + if (token < 0) { 62 + mt76_put_page_pool_buf(buf, false); 63 + goto unmap; 64 + } 65 + 66 + token = FIELD_PREP(MT_DMA_CTL_TOKEN, token); 67 + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 68 + token |= FIELD_PREP(MT_DMA_CTL_SDP0_H, addr >> 32); 69 + #endif 70 + desc->token |= cpu_to_le32(token); 71 + desc++; 72 + } 73 + 74 + return 0; 75 + 76 + unmap: 77 + if (t) 78 + mt76_put_rxwi(dev, t); 79 + mt76_wed_release_rx_buf(wed); 80 + 81 + return -ENOMEM; 82 + } 83 + EXPORT_SYMBOL_GPL(mt76_wed_init_rx_buf); 84 + 85 + int mt76_wed_offload_enable(struct mtk_wed_device *wed) 86 + { 87 + struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed); 88 + 89 + spin_lock_bh(&dev->token_lock); 90 + dev->token_size = wed->wlan.token_start; 91 + spin_unlock_bh(&dev->token_lock); 92 + 93 + return !wait_event_timeout(dev->tx_wait, !dev->wed_token_count, HZ); 94 + } 95 + EXPORT_SYMBOL_GPL(mt76_wed_offload_enable); 96 + 97 + int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) 98 + { 99 + int ret = 0, type, ring; 100 + u16 flags; 101 + 102 + if (!q || !q->ndesc) 103 + return -EINVAL; 104 + 105 + flags = q->flags; 106 + if (!q->wed || !mtk_wed_device_active(q->wed)) 107 + q->flags &= ~MT_QFLAG_WED; 108 + 109 + if (!(q->flags & MT_QFLAG_WED)) 110 + return 0; 111 + 112 + type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags); 113 + ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags); 114 + 115 + switch (type) { 116 + case MT76_WED_Q_TX: 117 + ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs, 118 + reset); 119 + if (!ret) 120 + q->wed_regs = q->wed->tx_ring[ring].reg_base; 121 + break; 122 + case MT76_WED_Q_TXFREE: 123 + /* WED txfree queue needs ring to be initialized before setup */ 124 + q->flags = 0; 125 + mt76_dma_queue_reset(dev, q); 126 + mt76_dma_rx_fill(dev, q, false); 127 + 128 + ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs); 129 + if (!ret) 130 + q->wed_regs = q->wed->txfree_ring.reg_base; 131 + break; 132 + case MT76_WED_Q_RX: 133 + ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs, 134 + reset); 135 + if (!ret) 136 + q->wed_regs = q->wed->rx_ring[ring].reg_base; 137 + break; 138 + case MT76_WED_RRO_Q_DATA: 139 + q->flags &= ~MT_QFLAG_WED; 140 + __mt76_dma_queue_reset(dev, q, false); 141 + mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs); 142 + q->head = q->ndesc - 1; 143 + q->queued = q->head; 144 + break; 145 + case MT76_WED_RRO_Q_MSDU_PG: 146 + q->flags &= ~MT_QFLAG_WED; 147 + __mt76_dma_queue_reset(dev, q, false); 148 + mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs); 149 + q->head = q->ndesc - 1; 150 + q->queued = q->head; 151 + break; 152 + case MT76_WED_RRO_Q_IND: 153 + q->flags &= ~MT_QFLAG_WED; 154 + mt76_dma_queue_reset(dev, q); 155 + mt76_dma_rx_fill(dev, q, false); 156 + mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs); 157 + break; 158 + default: 159 + ret = -EINVAL; 160 + break; 161 + } 162 + q->flags = flags; 163 + 164 + return ret; 165 + } 166 + EXPORT_SYMBOL_GPL(mt76_wed_dma_setup); 167 + #endif /*CONFIG_NET_MEDIATEK_SOC_WED */ 168 + 169 + void mt76_wed_offload_disable(struct mtk_wed_device *wed) 170 + { 171 + struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed); 172 + 173 + spin_lock_bh(&dev->token_lock); 174 + dev->token_size = dev->drv->token_size; 175 + spin_unlock_bh(&dev->token_lock); 176 + } 177 + EXPORT_SYMBOL_GPL(mt76_wed_offload_disable); 178 + 179 + void mt76_wed_reset_complete(struct mtk_wed_device *wed) 180 + { 181 + struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed); 182 + 183 + complete(&dev->mmio.wed_reset_complete); 184 + } 185 + EXPORT_SYMBOL_GPL(mt76_wed_reset_complete); 186 + 187 + int mt76_wed_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 188 + struct net_device *netdev, enum tc_setup_type type, 189 + void *type_data) 190 + { 191 + struct mt76_phy *phy = hw->priv; 192 + struct mtk_wed_device *wed = &phy->dev->mmio.wed; 193 + 194 + if (!mtk_wed_device_active(wed)) 195 + return -EOPNOTSUPP; 196 + 197 + return mtk_wed_device_setup_tc(wed, netdev, type, type_data); 198 + } 199 + EXPORT_SYMBOL_GPL(mt76_wed_net_setup_tc); 200 + 201 + void mt76_wed_dma_reset(struct mt76_dev *dev) 202 + { 203 + struct mt76_mmio *mmio = &dev->mmio; 204 + 205 + if (!test_bit(MT76_STATE_WED_RESET, &dev->phy.state)) 206 + return; 207 + 208 + complete(&mmio->wed_reset); 209 + 210 + if (!wait_for_completion_timeout(&mmio->wed_reset_complete, 3 * HZ)) 211 + dev_err(dev->dev, "wed reset complete timeout\n"); 212 + } 213 + EXPORT_SYMBOL_GPL(mt76_wed_dma_reset);