Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'rtw-next-2025-09-22' of https://github.com/pkshih/rtw

Ping-Ke Shih says:
==================
rtw-next patches for v6.18

Some small fixes and features are listed:

rtw88:

* correct LED function

rtw89:

* fix wait/completion racing of sending NULL data

* implement beacon tracking feature

* implement report channel noise function supported by RTL8852A

* correct RTL8851B RF calibration

* preparation of PCI TX/RX ring and interrupts for coming RTL8922DE
==================

Signed-off-by: Johannes Berg <johannes.berg@intel.com>

+2586 -404
+24 -3
drivers/net/wireless/realtek/rtl8xxxu/core.c
··· 1901 1901 priv->efuse_wifi.raw, EFUSE_MAP_LEN, true); 1902 1902 } 1903 1903 1904 + static ssize_t read_file_efuse(struct file *file, char __user *user_buf, 1905 + size_t count, loff_t *ppos) 1906 + { 1907 + struct rtl8xxxu_priv *priv = file_inode(file)->i_private; 1908 + 1909 + return simple_read_from_buffer(user_buf, count, ppos, 1910 + priv->efuse_wifi.raw, EFUSE_MAP_LEN); 1911 + } 1912 + 1913 + static const struct debugfs_short_fops fops_efuse = { 1914 + .read = read_file_efuse, 1915 + }; 1916 + 1917 + static void rtl8xxxu_debugfs_init(struct rtl8xxxu_priv *priv) 1918 + { 1919 + struct dentry *phydir; 1920 + 1921 + phydir = debugfs_create_dir("rtl8xxxu", priv->hw->wiphy->debugfsdir); 1922 + debugfs_create_file("efuse", 0400, phydir, priv, &fops_efuse); 1923 + } 1924 + 1904 1925 void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv) 1905 1926 { 1906 1927 u8 val8; ··· 7836 7815 untested = 0; 7837 7816 break; 7838 7817 case 0x2357: 7839 - if (id->idProduct == 0x0109 || id->idProduct == 0x0135) 7818 + if (id->idProduct == 0x0109 || id->idProduct == 0x010c || 7819 + id->idProduct == 0x0135) 7840 7820 untested = 0; 7841 7821 break; 7842 7822 case 0x0b05: ··· 7996 7974 } 7997 7975 7998 7976 rtl8xxxu_init_led(priv); 7977 + rtl8xxxu_debugfs_init(priv); 7999 7978 8000 7979 return 0; 8001 7980 ··· 8194 8171 {USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x11f2, 0xff, 0xff, 0xff), 8195 8172 .driver_info = (unsigned long)&rtl8192cu_fops}, 8196 8173 {USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff), 8197 - .driver_info = (unsigned long)&rtl8192cu_fops}, 8198 - {USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8188, 0xff, 0xff, 0xff), 8199 8174 .driver_info = (unsigned long)&rtl8192cu_fops}, 8200 8175 {USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8189, 0xff, 0xff, 0xff), 8201 8176 .driver_info = (unsigned long)&rtl8192cu_fops},
-1
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
··· 291 291 {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/ 292 292 {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/ 293 293 {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/ 294 - {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ 295 294 {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ 296 295 {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/ 297 296 {RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/
+7 -6
drivers/net/wireless/realtek/rtw88/led.c
··· 6 6 #include "debug.h" 7 7 #include "led.h" 8 8 9 - static int rtw_led_set_blocking(struct led_classdev *led, 10 - enum led_brightness brightness) 9 + static int rtw_led_set(struct led_classdev *led, 10 + enum led_brightness brightness) 11 11 { 12 12 struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev); 13 13 14 + mutex_lock(&rtwdev->mutex); 15 + 14 16 rtwdev->chip->ops->led_set(led, brightness); 17 + 18 + mutex_unlock(&rtwdev->mutex); 15 19 16 20 return 0; 17 21 } ··· 40 36 if (!rtwdev->chip->ops->led_set) 41 37 return; 42 38 43 - if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) 44 - led->brightness_set = rtwdev->chip->ops->led_set; 45 - else 46 - led->brightness_set_blocking = rtw_led_set_blocking; 39 + led->brightness_set_blocking = rtw_led_set; 47 40 48 41 snprintf(rtwdev->led_name, sizeof(rtwdev->led_name), 49 42 "rtw88-%s", dev_name(rtwdev->dev));
+4
drivers/net/wireless/realtek/rtw88/sdio.c
··· 144 144 145 145 static bool rtw_sdio_use_direct_io(struct rtw_dev *rtwdev, u32 addr) 146 146 { 147 + if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags) && 148 + !rtw_sdio_is_bus_addr(addr)) 149 + return false; 150 + 147 151 return !rtw_sdio_is_sdio30_supported(rtwdev) || 148 152 rtw_sdio_is_bus_addr(addr); 149 153 }
+10 -1
drivers/net/wireless/realtek/rtw89/chan.c
··· 281 281 { 282 282 struct rtw89_hal *hal = &rtwdev->hal; 283 283 struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt; 284 + int i, j; 284 285 285 286 hal->entity_pause = false; 286 287 bitmap_zero(hal->entity_map, NUM_OF_RTW89_CHANCTX); ··· 289 288 atomic_set(&hal->roc_chanctx_idx, RTW89_CHANCTX_IDLE); 290 289 291 290 INIT_LIST_HEAD(&mgnt->active_list); 291 + 292 + for (i = 0; i < RTW89_MAX_INTERFACE_NUM; i++) { 293 + for (j = 0; j < __RTW89_MLD_MAX_LINK_NUM; j++) 294 + mgnt->chanctx_tbl[i][j] = RTW89_CHANCTX_IDLE; 295 + } 292 296 293 297 rtw89_config_default_chandef(rtwdev); 294 298 } ··· 359 353 360 354 const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev, 361 355 const char *caller_message, 362 - u8 link_index) 356 + u8 link_index, bool nullchk) 363 357 { 364 358 struct rtw89_hal *hal = &rtwdev->hal; 365 359 struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt; ··· 406 400 return rtw89_chan_get(rtwdev, chanctx_idx); 407 401 408 402 dflt: 403 + if (unlikely(nullchk)) 404 + return NULL; 405 + 409 406 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 410 407 "%s (%s): prefetch NULL on link index %u\n", 411 408 __func__, caller_message ?: "", link_index);
+8 -2
drivers/net/wireless/realtek/rtw89/chan.h
··· 180 180 181 181 const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev, 182 182 const char *caller_message, 183 - u8 link_index); 183 + u8 link_index, bool nullchk); 184 184 185 185 #define rtw89_mgnt_chan_get(rtwdev, link_index) \ 186 - __rtw89_mgnt_chan_get(rtwdev, __func__, link_index) 186 + __rtw89_mgnt_chan_get(rtwdev, __func__, link_index, false) 187 + 188 + static inline const struct rtw89_chan * 189 + rtw89_mgnt_chan_get_or_null(struct rtw89_dev *rtwdev, u8 link_index) 190 + { 191 + return __rtw89_mgnt_chan_get(rtwdev, NULL, link_index, true); 192 + } 187 193 188 194 struct rtw89_mcc_links_info { 189 195 struct rtw89_vif_link *links[NUM_OF_RTW89_MCC_ROLES];
+4 -1
drivers/net/wireless/realtek/rtw89/coex.c
··· 93 93 [CXST_E2G] = __DEF_FBTC_SLOT(5, 0xea5a5a5a, SLOT_MIX), 94 94 [CXST_E5G] = __DEF_FBTC_SLOT(5, 0xffffffff, SLOT_ISO), 95 95 [CXST_EBT] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX), 96 - [CXST_ENULL] = __DEF_FBTC_SLOT(5, 0xaaaaaaaa, SLOT_ISO), 96 + [CXST_ENULL] = __DEF_FBTC_SLOT(5, 0x55555555, SLOT_MIX), 97 97 [CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX), 98 98 [CXST_W1FDD] = __DEF_FBTC_SLOT(50, 0xffffffff, SLOT_ISO), 99 99 [CXST_B1FDD] = __DEF_FBTC_SLOT(50, 0xffffdfff, SLOT_ISO), ··· 4153 4153 s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype); 4154 4154 _slot_set_le(btc, CXST_ENULL, s_def[CXST_ENULL].dur, 4155 4155 s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype); 4156 + _slot_set_dur(btc, CXST_EBT, dur_2); 4156 4157 break; 4157 4158 case BTC_CXP_OFFE_DEF2: 4158 4159 _slot_set(btc, CXST_E2G, 20, cxtbl[1], SLOT_ISO); ··· 4163 4162 s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype); 4164 4163 _slot_set_le(btc, CXST_ENULL, s_def[CXST_ENULL].dur, 4165 4164 s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype); 4165 + _slot_set_dur(btc, CXST_EBT, dur_2); 4166 4166 break; 4167 4167 case BTC_CXP_OFFE_2GBWMIXB: 4168 4168 if (a2dp->exist) ··· 4172 4170 _slot_set(btc, CXST_E2G, 5, tbl_w1, SLOT_MIX); 4173 4171 _slot_set_le(btc, CXST_EBT, cpu_to_le16(40), 4174 4172 s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype); 4173 + _slot_set_dur(btc, CXST_EBT, dur_2); 4175 4174 break; 4176 4175 case BTC_CXP_OFFE_WL: /* for 4-way */ 4177 4176 _slot_set(btc, CXST_E2G, 5, cxtbl[1], SLOT_MIX);
+642 -44
drivers/net/wireless/realtek/rtw89/core.c
··· 2 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 3 */ 4 4 #include <linux/ip.h> 5 + #include <linux/sort.h> 5 6 #include <linux/udp.h> 6 7 7 8 #include "cam.h" ··· 273 272 return NULL; 274 273 } 275 274 276 - bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate) 275 + bool rtw89_legacy_rate_to_bitrate(struct rtw89_dev *rtwdev, u8 legacy_rate, u16 *bitrate) 277 276 { 278 - struct ieee80211_rate rate; 277 + const struct ieee80211_rate *rate; 279 278 280 - if (unlikely(rpt_rate >= ARRAY_SIZE(rtw89_bitrates))) { 281 - rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "invalid rpt rate %d\n", rpt_rate); 279 + if (unlikely(legacy_rate >= ARRAY_SIZE(rtw89_bitrates))) { 280 + rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 281 + "invalid legacy rate %d\n", legacy_rate); 282 282 return false; 283 283 } 284 284 285 - rate = rtw89_bitrates[rpt_rate]; 286 - *bitrate = rate.bitrate; 285 + rate = &rtw89_bitrates[legacy_rate]; 286 + *bitrate = rate->bitrate; 287 287 288 288 return true; 289 289 } ··· 699 697 desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */ 700 698 } 701 699 700 + u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel) 701 + { 702 + switch (qsel) { 703 + default: 704 + rtw89_warn(rtwdev, "Cannot map qsel to dma: %d\n", qsel); 705 + fallthrough; 706 + case RTW89_TX_QSEL_BE_0: 707 + case RTW89_TX_QSEL_BE_1: 708 + case RTW89_TX_QSEL_BE_2: 709 + case RTW89_TX_QSEL_BE_3: 710 + return RTW89_TXCH_ACH0; 711 + case RTW89_TX_QSEL_BK_0: 712 + case RTW89_TX_QSEL_BK_1: 713 + case RTW89_TX_QSEL_BK_2: 714 + case RTW89_TX_QSEL_BK_3: 715 + return RTW89_TXCH_ACH1; 716 + case RTW89_TX_QSEL_VI_0: 717 + case RTW89_TX_QSEL_VI_1: 718 + case RTW89_TX_QSEL_VI_2: 719 + case RTW89_TX_QSEL_VI_3: 720 + return RTW89_TXCH_ACH2; 721 + case RTW89_TX_QSEL_VO_0: 722 + case RTW89_TX_QSEL_VO_1: 723 + case RTW89_TX_QSEL_VO_2: 724 + case RTW89_TX_QSEL_VO_3: 725 + return RTW89_TXCH_ACH3; 726 + case RTW89_TX_QSEL_B0_MGMT: 727 + return RTW89_TXCH_CH8; 728 + case RTW89_TX_QSEL_B0_HI: 729 + return RTW89_TXCH_CH9; 730 + case RTW89_TX_QSEL_B1_MGMT: 731 + return RTW89_TXCH_CH10; 732 + case RTW89_TX_QSEL_B1_HI: 733 + return RTW89_TXCH_CH11; 734 + } 735 + } 736 + EXPORT_SYMBOL(rtw89_core_get_ch_dma); 737 + 738 + u8 rtw89_core_get_ch_dma_v1(struct rtw89_dev *rtwdev, u8 qsel) 739 + { 740 + switch (qsel) { 741 + default: 742 + rtw89_warn(rtwdev, "Cannot map qsel to dma v1: %d\n", qsel); 743 + fallthrough; 744 + case RTW89_TX_QSEL_BE_0: 745 + case RTW89_TX_QSEL_BK_0: 746 + return RTW89_TXCH_ACH0; 747 + case RTW89_TX_QSEL_VI_0: 748 + case RTW89_TX_QSEL_VO_0: 749 + return RTW89_TXCH_ACH2; 750 + case RTW89_TX_QSEL_B0_MGMT: 751 + case RTW89_TX_QSEL_B0_HI: 752 + return RTW89_TXCH_CH8; 753 + case RTW89_TX_QSEL_B1_MGMT: 754 + case RTW89_TX_QSEL_B1_HI: 755 + return RTW89_TXCH_CH10; 756 + } 757 + } 758 + EXPORT_SYMBOL(rtw89_core_get_ch_dma_v1); 759 + 702 760 static void 703 761 rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev, 704 762 struct rtw89_core_tx_request *tx_req) ··· 772 710 u8 qsel, ch_dma; 773 711 774 712 qsel = rtw89_core_get_qsel_mgmt(rtwdev, tx_req); 775 - ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel); 713 + ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel); 776 714 777 715 desc_info->qsel = qsel; 778 716 desc_info->ch_dma = ch_dma; ··· 989 927 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 990 928 tid_indicate = rtw89_core_get_tid_indicate(rtwdev, tid); 991 929 qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : rtw89_core_get_qsel(rtwdev, tid); 992 - ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel); 930 + ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel); 993 931 994 932 desc_info->ch_dma = ch_dma; 995 933 desc_info->tid_indicate = tid_indicate; ··· 1135 1073 } 1136 1074 } 1137 1075 1076 + static void rtw89_tx_wait_work(struct wiphy *wiphy, struct wiphy_work *work) 1077 + { 1078 + struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 1079 + tx_wait_work.work); 1080 + 1081 + rtw89_tx_wait_list_clear(rtwdev); 1082 + } 1083 + 1138 1084 void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel) 1139 1085 { 1140 1086 u8 ch_dma; 1141 1087 1142 - ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel); 1088 + ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel); 1143 1089 1144 1090 rtw89_hci_tx_kick_off(rtwdev, ch_dma); 1145 1091 } 1146 1092 1147 1093 int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 1148 - int qsel, unsigned int timeout) 1094 + struct rtw89_tx_wait_info *wait, int qsel, 1095 + unsigned int timeout) 1149 1096 { 1150 - struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 1151 - struct rtw89_tx_wait_info *wait; 1152 1097 unsigned long time_left; 1153 1098 int ret = 0; 1154 1099 1155 - wait = kzalloc(sizeof(*wait), GFP_KERNEL); 1156 - if (!wait) { 1157 - rtw89_core_tx_kick_off(rtwdev, qsel); 1158 - return 0; 1159 - } 1160 - 1161 - init_completion(&wait->completion); 1162 - rcu_assign_pointer(skb_data->wait, wait); 1100 + lockdep_assert_wiphy(rtwdev->hw->wiphy); 1163 1101 1164 1102 rtw89_core_tx_kick_off(rtwdev, qsel); 1165 1103 time_left = wait_for_completion_timeout(&wait->completion, 1166 1104 msecs_to_jiffies(timeout)); 1167 - if (time_left == 0) 1168 - ret = -ETIMEDOUT; 1169 - else if (!wait->tx_done) 1170 - ret = -EAGAIN; 1171 1105 1172 - rcu_assign_pointer(skb_data->wait, NULL); 1173 - kfree_rcu(wait, rcu_head); 1106 + if (time_left == 0) { 1107 + ret = -ETIMEDOUT; 1108 + list_add_tail(&wait->list, &rtwdev->tx_waits); 1109 + wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->tx_wait_work, 1110 + RTW89_TX_WAIT_WORK_TIMEOUT); 1111 + } else { 1112 + if (!wait->tx_done) 1113 + ret = -EAGAIN; 1114 + rtw89_tx_wait_release(wait); 1115 + } 1174 1116 1175 1117 return ret; 1176 1118 } ··· 1223 1157 static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev, 1224 1158 struct rtw89_vif_link *rtwvif_link, 1225 1159 struct rtw89_sta_link *rtwsta_link, 1226 - struct sk_buff *skb, int *qsel, bool sw_mld) 1160 + struct sk_buff *skb, int *qsel, bool sw_mld, 1161 + struct rtw89_tx_wait_info *wait) 1227 1162 { 1228 1163 struct ieee80211_sta *sta = rtwsta_link_to_sta_safe(rtwsta_link); 1229 1164 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 1165 + struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 1230 1166 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 1231 1167 struct rtw89_core_tx_request tx_req = {}; 1232 1168 int ret; ··· 1244 1176 rtw89_wow_parse_akm(rtwdev, skb); 1245 1177 rtw89_core_tx_update_desc_info(rtwdev, &tx_req); 1246 1178 rtw89_core_tx_wake(rtwdev, &tx_req); 1179 + 1180 + rcu_assign_pointer(skb_data->wait, wait); 1247 1181 1248 1182 ret = rtw89_hci_tx_write(rtwdev, &tx_req); 1249 1183 if (ret) { ··· 1283 1213 } 1284 1214 } 1285 1215 1286 - return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false); 1216 + return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false, 1217 + NULL); 1287 1218 } 1288 1219 1289 1220 static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info) ··· 1906 1835 1907 1836 tmp_rpl = le32_get_bits(ie->w0, RTW89_PHY_STS_IE00_W0_RPL); 1908 1837 phy_ppdu->rpl_avg = tmp_rpl >> 1; 1838 + 1839 + if (!phy_ppdu->hdr_2_en) 1840 + phy_ppdu->rx_path_en = 1841 + le32_get_bits(ie->w3, RTW89_PHY_STS_IE00_W3_RX_PATH_EN); 1909 1842 } 1910 1843 1911 1844 static void rtw89_core_parse_phy_status_ie00_v2(struct rtw89_dev *rtwdev, ··· 2296 2221 WRITE_ONCE(rtwvif_link->sync_bcn_tsf, le64_to_cpu(mgmt->u.beacon.timestamp)); 2297 2222 } 2298 2223 2224 + static u32 rtw89_bcn_calc_min_tbtt(struct rtw89_dev *rtwdev, u32 tbtt1, u32 tbtt2) 2225 + { 2226 + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2227 + u32 close_bcn_intvl_th = bcn_track->close_bcn_intvl_th; 2228 + u32 tbtt_diff_th = bcn_track->tbtt_diff_th; 2229 + 2230 + if (tbtt2 > tbtt1) 2231 + swap(tbtt1, tbtt2); 2232 + 2233 + if (tbtt1 - tbtt2 > tbtt_diff_th) 2234 + return tbtt1; 2235 + else if (tbtt2 > close_bcn_intvl_th) 2236 + return tbtt2; 2237 + else if (tbtt1 > close_bcn_intvl_th) 2238 + return tbtt1; 2239 + else 2240 + return tbtt2; 2241 + } 2242 + 2243 + static void rtw89_bcn_cfg_tbtt_offset(struct rtw89_dev *rtwdev, 2244 + struct rtw89_vif_link *rtwvif_link) 2245 + { 2246 + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2247 + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2248 + u32 offset = bcn_track->tbtt_offset; 2249 + 2250 + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2251 + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 2252 + const struct rtw89_port_reg *p = mac->port_base; 2253 + u32 bcnspc, val; 2254 + 2255 + bcnspc = rtw89_read32_port_mask(rtwdev, rtwvif_link, 2256 + p->bcn_space, B_AX_BCN_SPACE_MASK); 2257 + val = bcnspc - (offset / 1024); 2258 + val = u32_encode_bits(val, B_AX_TBTT_SHIFT_OFST_MAG) | 2259 + B_AX_TBTT_SHIFT_OFST_SIGN; 2260 + 2261 + rtw89_write16_port_mask(rtwdev, rtwvif_link, p->tbtt_shift, 2262 + B_AX_TBTT_SHIFT_OFST_MASK, val); 2263 + 2264 + return; 2265 + } 2266 + 2267 + rtw89_fw_h2c_tbtt_tuning(rtwdev, rtwvif_link, offset); 2268 + } 2269 + 2270 + static void rtw89_bcn_update_tbtt_offset(struct rtw89_dev *rtwdev, 2271 + struct rtw89_vif_link *rtwvif_link) 2272 + { 2273 + struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; 2274 + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2275 + u32 *tbtt_us = bcn_stat->tbtt_us; 2276 + u32 offset = tbtt_us[0]; 2277 + u8 i; 2278 + 2279 + for (i = 1; i < RTW89_BCN_TRACK_STAT_NR; i++) 2280 + offset = rtw89_bcn_calc_min_tbtt(rtwdev, tbtt_us[i], offset); 2281 + 2282 + if (bcn_track->tbtt_offset == offset) 2283 + return; 2284 + 2285 + bcn_track->tbtt_offset = offset; 2286 + rtw89_bcn_cfg_tbtt_offset(rtwdev, rtwvif_link); 2287 + } 2288 + 2289 + static int cmp_u16(const void *a, const void *b) 2290 + { 2291 + return *(const u16 *)a - *(const u16 *)b; 2292 + } 2293 + 2294 + static u16 _rtw89_bcn_calc_drift(u16 tbtt, u16 offset, u16 beacon_int) 2295 + { 2296 + if (tbtt < offset) 2297 + return beacon_int - offset + tbtt; 2298 + 2299 + return tbtt - offset; 2300 + } 2301 + 2302 + static void rtw89_bcn_calc_drift(struct rtw89_dev *rtwdev) 2303 + { 2304 + struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; 2305 + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2306 + u16 offset_tu = bcn_track->tbtt_offset / 1024; 2307 + u16 *tbtt_tu = bcn_stat->tbtt_tu; 2308 + u16 *drift = bcn_stat->drift; 2309 + u8 i; 2310 + 2311 + bcn_stat->tbtt_tu_min = U16_MAX; 2312 + bcn_stat->tbtt_tu_max = 0; 2313 + for (i = 0; i < RTW89_BCN_TRACK_STAT_NR; i++) { 2314 + drift[i] = _rtw89_bcn_calc_drift(tbtt_tu[i], offset_tu, 2315 + bcn_track->beacon_int); 2316 + 2317 + bcn_stat->tbtt_tu_min = min(bcn_stat->tbtt_tu_min, tbtt_tu[i]); 2318 + bcn_stat->tbtt_tu_max = max(bcn_stat->tbtt_tu_max, tbtt_tu[i]); 2319 + } 2320 + 2321 + sort(drift, RTW89_BCN_TRACK_STAT_NR, sizeof(*drift), cmp_u16, NULL); 2322 + } 2323 + 2324 + static void rtw89_bcn_calc_distribution(struct rtw89_dev *rtwdev) 2325 + { 2326 + struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; 2327 + struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist; 2328 + u16 lower_bound, upper_bound, outlier_count = 0; 2329 + u16 *drift = bcn_stat->drift; 2330 + u16 *bins = bcn_dist->bins; 2331 + u16 q1, q3, iqr, tmp; 2332 + u8 i; 2333 + 2334 + BUILD_BUG_ON(RTW89_BCN_TRACK_STAT_NR % 4 != 0); 2335 + 2336 + memset(bcn_dist, 0, sizeof(*bcn_dist)); 2337 + 2338 + bcn_dist->min = drift[0]; 2339 + bcn_dist->max = drift[RTW89_BCN_TRACK_STAT_NR - 1]; 2340 + 2341 + tmp = RTW89_BCN_TRACK_STAT_NR / 4; 2342 + q1 = ((drift[tmp] + drift[tmp - 1]) * RTW89_BCN_TRACK_SCALE_FACTOR) / 2; 2343 + 2344 + tmp = (RTW89_BCN_TRACK_STAT_NR * 3) / 4; 2345 + q3 = ((drift[tmp] + drift[tmp - 1]) * RTW89_BCN_TRACK_SCALE_FACTOR) / 2; 2346 + 2347 + iqr = q3 - q1; 2348 + tmp = (3 * iqr) / 2; 2349 + 2350 + if (bcn_dist->min <= 5) 2351 + lower_bound = bcn_dist->min; 2352 + else if (q1 > tmp) 2353 + lower_bound = (q1 - tmp) / RTW89_BCN_TRACK_SCALE_FACTOR; 2354 + else 2355 + lower_bound = 0; 2356 + 2357 + upper_bound = (q3 + tmp) / RTW89_BCN_TRACK_SCALE_FACTOR; 2358 + 2359 + for (i = 0; i < RTW89_BCN_TRACK_STAT_NR; i++) { 2360 + u16 tbtt = bcn_stat->tbtt_tu[i]; 2361 + u16 min = bcn_stat->tbtt_tu_min; 2362 + u8 bin_idx; 2363 + 2364 + /* histogram */ 2365 + bin_idx = min((tbtt - min) / RTW89_BCN_TRACK_BIN_WIDTH, 2366 + RTW89_BCN_TRACK_MAX_BIN_NUM - 1); 2367 + bins[bin_idx]++; 2368 + 2369 + /* boxplot outlier */ 2370 + if (drift[i] < lower_bound || drift[i] > upper_bound) 2371 + outlier_count++; 2372 + } 2373 + 2374 + bcn_dist->outlier_count = outlier_count; 2375 + bcn_dist->lower_bound = lower_bound; 2376 + bcn_dist->upper_bound = upper_bound; 2377 + } 2378 + 2379 + static u8 rtw89_bcn_get_coverage(struct rtw89_dev *rtwdev, u16 threshold) 2380 + { 2381 + struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; 2382 + int l = 0, r = RTW89_BCN_TRACK_STAT_NR - 1, m; 2383 + u16 *drift = bcn_stat->drift; 2384 + int index = -1; 2385 + u8 count = 0; 2386 + 2387 + while (l <= r) { 2388 + m = l + (r - l) / 2; 2389 + 2390 + if (drift[m] <= threshold) { 2391 + index = m; 2392 + l = m + 1; 2393 + } else { 2394 + r = m - 1; 2395 + } 2396 + } 2397 + 2398 + count = (index == -1) ? 0 : (index + 1); 2399 + 2400 + return (count * PERCENT) / RTW89_BCN_TRACK_STAT_NR; 2401 + } 2402 + 2403 + static u16 rtw89_bcn_get_histogram_bound(struct rtw89_dev *rtwdev, u8 target) 2404 + { 2405 + struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; 2406 + struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist; 2407 + u16 tbtt_tu_max = bcn_stat->tbtt_tu_max; 2408 + u16 upper, lower = bcn_stat->tbtt_tu_min; 2409 + u8 i, count = 0; 2410 + 2411 + for (i = 0; i < RTW89_BCN_TRACK_MAX_BIN_NUM; i++) { 2412 + upper = lower + RTW89_BCN_TRACK_BIN_WIDTH - 1; 2413 + if (i == RTW89_BCN_TRACK_MAX_BIN_NUM - 1) 2414 + upper = max(upper, tbtt_tu_max); 2415 + 2416 + count += bcn_dist->bins[i]; 2417 + if (count > target) 2418 + break; 2419 + 2420 + lower = upper + 1; 2421 + } 2422 + 2423 + return upper; 2424 + } 2425 + 2426 + static u16 rtw89_bcn_get_rx_time(struct rtw89_dev *rtwdev, 2427 + const struct rtw89_chan *chan) 2428 + { 2429 + #define RTW89_SYMBOL_TIME_2GHZ 192 2430 + #define RTW89_SYMBOL_TIME_5GHZ 20 2431 + #define RTW89_SYMBOL_TIME_6GHZ 20 2432 + struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; 2433 + u16 bitrate, val; 2434 + 2435 + if (!rtw89_legacy_rate_to_bitrate(rtwdev, pkt_stat->beacon_rate, &bitrate)) 2436 + return 0; 2437 + 2438 + val = (pkt_stat->beacon_len * 8 * RTW89_BCN_TRACK_SCALE_FACTOR) / bitrate; 2439 + 2440 + switch (chan->band_type) { 2441 + default: 2442 + case RTW89_BAND_2G: 2443 + val += RTW89_SYMBOL_TIME_2GHZ; 2444 + break; 2445 + case RTW89_BAND_5G: 2446 + val += RTW89_SYMBOL_TIME_5GHZ; 2447 + break; 2448 + case RTW89_BAND_6G: 2449 + val += RTW89_SYMBOL_TIME_6GHZ; 2450 + break; 2451 + } 2452 + 2453 + /* convert to millisecond */ 2454 + return DIV_ROUND_UP(val, 1000); 2455 + } 2456 + 2457 + static void rtw89_bcn_calc_timeout(struct rtw89_dev *rtwdev, 2458 + struct rtw89_vif_link *rtwvif_link) 2459 + { 2460 + #define RTW89_BCN_TRACK_EXTEND_TIMEOUT 5 2461 + #define RTW89_BCN_TRACK_COVERAGE_TH 0 /* unit: TU */ 2462 + #define RTW89_BCN_TRACK_STRONG_RSSI 80 2463 + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2464 + struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; 2465 + struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; 2466 + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2467 + struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist; 2468 + u16 outlier_high_bcn_th = bcn_track->outlier_high_bcn_th; 2469 + u16 outlier_low_bcn_th = bcn_track->outlier_low_bcn_th; 2470 + u8 rssi = ewma_rssi_read(&rtwdev->phystat.bcn_rssi); 2471 + u16 target_bcn_th = bcn_track->target_bcn_th; 2472 + u16 low_bcn_th = bcn_track->low_bcn_th; 2473 + u16 med_bcn_th = bcn_track->med_bcn_th; 2474 + u16 beacon_int = bcn_track->beacon_int; 2475 + u16 bcn_timeout; 2476 + 2477 + if (pkt_stat->beacon_nr < low_bcn_th) { 2478 + bcn_timeout = (RTW89_BCN_TRACK_TARGET_BCN * beacon_int) / PERCENT; 2479 + goto out; 2480 + } 2481 + 2482 + if (bcn_dist->outlier_count >= outlier_high_bcn_th) { 2483 + bcn_timeout = bcn_dist->max; 2484 + goto out; 2485 + } 2486 + 2487 + if (pkt_stat->beacon_nr < med_bcn_th) { 2488 + if (bcn_dist->outlier_count > outlier_low_bcn_th) 2489 + bcn_timeout = (bcn_dist->max + bcn_dist->upper_bound) / 2; 2490 + else 2491 + bcn_timeout = bcn_dist->upper_bound + 2492 + RTW89_BCN_TRACK_EXTEND_TIMEOUT; 2493 + 2494 + goto out; 2495 + } 2496 + 2497 + if (rssi >= RTW89_BCN_TRACK_STRONG_RSSI) { 2498 + if (rtw89_bcn_get_coverage(rtwdev, RTW89_BCN_TRACK_COVERAGE_TH) >= 90) { 2499 + /* ideal case */ 2500 + bcn_timeout = 0; 2501 + } else { 2502 + u16 offset_tu = bcn_track->tbtt_offset / 1024; 2503 + u16 upper_bound; 2504 + 2505 + upper_bound = 2506 + rtw89_bcn_get_histogram_bound(rtwdev, target_bcn_th); 2507 + bcn_timeout = 2508 + _rtw89_bcn_calc_drift(upper_bound, offset_tu, beacon_int); 2509 + } 2510 + 2511 + goto out; 2512 + } 2513 + 2514 + bcn_timeout = bcn_stat->drift[target_bcn_th]; 2515 + 2516 + out: 2517 + bcn_track->bcn_timeout = bcn_timeout + rtw89_bcn_get_rx_time(rtwdev, chan); 2518 + } 2519 + 2520 + static void rtw89_bcn_update_timeout(struct rtw89_dev *rtwdev, 2521 + struct rtw89_vif_link *rtwvif_link) 2522 + { 2523 + rtw89_bcn_calc_drift(rtwdev); 2524 + rtw89_bcn_calc_distribution(rtwdev); 2525 + rtw89_bcn_calc_timeout(rtwdev, rtwvif_link); 2526 + } 2527 + 2528 + static void rtw89_core_bcn_track(struct rtw89_dev *rtwdev) 2529 + { 2530 + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2531 + struct rtw89_vif_link *rtwvif_link; 2532 + struct rtw89_vif *rtwvif; 2533 + unsigned int link_id; 2534 + 2535 + if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw)) 2536 + return; 2537 + 2538 + if (!rtwdev->lps_enabled) 2539 + return; 2540 + 2541 + if (!bcn_track->is_data_ready) 2542 + return; 2543 + 2544 + rtw89_for_each_rtwvif(rtwdev, rtwvif) { 2545 + rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2546 + if (!(rtwvif_link->wifi_role == RTW89_WIFI_ROLE_STATION || 2547 + rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)) 2548 + continue; 2549 + 2550 + rtw89_bcn_update_tbtt_offset(rtwdev, rtwvif_link); 2551 + rtw89_bcn_update_timeout(rtwdev, rtwvif_link); 2552 + } 2553 + } 2554 + } 2555 + 2556 + static bool rtw89_core_bcn_track_can_lps(struct rtw89_dev *rtwdev) 2557 + { 2558 + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2559 + 2560 + if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw)) 2561 + return true; 2562 + 2563 + return bcn_track->is_data_ready; 2564 + } 2565 + 2566 + static void rtw89_core_bcn_track_assoc(struct rtw89_dev *rtwdev, 2567 + struct rtw89_vif_link *rtwvif_link) 2568 + { 2569 + #define RTW89_BCN_TRACK_MED_BCN 70 2570 + #define RTW89_BCN_TRACK_LOW_BCN 30 2571 + #define RTW89_BCN_TRACK_OUTLIER_HIGH_BCN 30 2572 + #define RTW89_BCN_TRACK_OUTLIER_LOW_BCN 20 2573 + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2574 + u32 period = jiffies_to_msecs(RTW89_TRACK_WORK_PERIOD); 2575 + struct ieee80211_bss_conf *bss_conf; 2576 + u32 beacons_in_period; 2577 + u32 bcn_intvl_us; 2578 + u16 beacon_int; 2579 + u8 dtim; 2580 + 2581 + rcu_read_lock(); 2582 + bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2583 + beacon_int = bss_conf->beacon_int; 2584 + dtim = bss_conf->dtim_period; 2585 + rcu_read_unlock(); 2586 + 2587 + beacons_in_period = period / beacon_int / dtim; 2588 + bcn_intvl_us = ieee80211_tu_to_usec(beacon_int); 2589 + 2590 + bcn_track->low_bcn_th = 2591 + (beacons_in_period * RTW89_BCN_TRACK_LOW_BCN) / PERCENT; 2592 + bcn_track->med_bcn_th = 2593 + (beacons_in_period * RTW89_BCN_TRACK_MED_BCN) / PERCENT; 2594 + bcn_track->outlier_low_bcn_th = 2595 + (RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_OUTLIER_LOW_BCN) / PERCENT; 2596 + bcn_track->outlier_high_bcn_th = 2597 + (RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_OUTLIER_HIGH_BCN) / PERCENT; 2598 + bcn_track->target_bcn_th = 2599 + (RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_TARGET_BCN) / PERCENT; 2600 + 2601 + bcn_track->close_bcn_intvl_th = ieee80211_tu_to_usec(beacon_int - 3); 2602 + bcn_track->tbtt_diff_th = (bcn_intvl_us * 85) / PERCENT; 2603 + bcn_track->beacon_int = beacon_int; 2604 + bcn_track->dtim = dtim; 2605 + } 2606 + 2607 + static void rtw89_core_bcn_track_reset(struct rtw89_dev *rtwdev) 2608 + { 2609 + memset(&rtwdev->phystat.bcn_stat, 0, sizeof(rtwdev->phystat.bcn_stat)); 2610 + memset(&rtwdev->bcn_track, 0, sizeof(rtwdev->bcn_track)); 2611 + } 2612 + 2613 + static void rtw89_vif_rx_bcn_stat(struct rtw89_dev *rtwdev, 2614 + struct ieee80211_bss_conf *bss_conf, 2615 + struct sk_buff *skb) 2616 + { 2617 + #define RTW89_APPEND_TSF_2GHZ 384 2618 + #define RTW89_APPEND_TSF_5GHZ 52 2619 + #define RTW89_APPEND_TSF_6GHZ 52 2620 + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 2621 + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); 2622 + struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; 2623 + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 2624 + u32 bcn_intvl_us = ieee80211_tu_to_usec(bss_conf->beacon_int); 2625 + u64 tsf = le64_to_cpu(mgmt->u.beacon.timestamp); 2626 + u8 wp, num = bcn_stat->num; 2627 + u16 append; 2628 + 2629 + if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw)) 2630 + return; 2631 + 2632 + switch (rx_status->band) { 2633 + default: 2634 + case NL80211_BAND_2GHZ: 2635 + append = RTW89_APPEND_TSF_2GHZ; 2636 + break; 2637 + case NL80211_BAND_5GHZ: 2638 + append = RTW89_APPEND_TSF_5GHZ; 2639 + break; 2640 + case NL80211_BAND_6GHZ: 2641 + append = RTW89_APPEND_TSF_6GHZ; 2642 + break; 2643 + } 2644 + 2645 + wp = bcn_stat->wp; 2646 + div_u64_rem(tsf - append, bcn_intvl_us, &bcn_stat->tbtt_us[wp]); 2647 + bcn_stat->tbtt_tu[wp] = bcn_stat->tbtt_us[wp] / 1024; 2648 + bcn_stat->wp = (wp + 1) % RTW89_BCN_TRACK_STAT_NR; 2649 + bcn_stat->num = umin(num + 1, RTW89_BCN_TRACK_STAT_NR); 2650 + bcn_track->is_data_ready = bcn_stat->num == RTW89_BCN_TRACK_STAT_NR; 2651 + } 2652 + 2299 2653 static void rtw89_vif_rx_stats_iter(void *data, u8 *mac, 2300 2654 struct ieee80211_vif *vif) 2301 2655 { ··· 2741 2237 struct ieee80211_bss_conf *bss_conf; 2742 2238 struct rtw89_vif_link *rtwvif_link; 2743 2239 const u8 *bssid = iter_data->bssid; 2240 + const u8 *target_bssid; 2744 2241 2745 2242 if (rtwdev->scanning && 2746 2243 (ieee80211_is_beacon(hdr->frame_control) || ··· 2763 2258 goto out; 2764 2259 } 2765 2260 2766 - if (!ether_addr_equal(bss_conf->bssid, bssid)) 2261 + target_bssid = ieee80211_is_beacon(hdr->frame_control) && 2262 + bss_conf->nontransmitted ? 2263 + bss_conf->transmitter_bssid : bss_conf->bssid; 2264 + if (!ether_addr_equal(target_bssid, bssid)) 2767 2265 goto out; 2768 2266 2769 2267 if (is_mld) { ··· 2780 2272 rtw89_vif_sync_bcn_tsf(rtwvif_link, hdr, skb->len); 2781 2273 rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu); 2782 2274 } 2783 - pkt_stat->beacon_nr++; 2784 2275 2785 2276 if (phy_ppdu) { 2786 2277 ewma_rssi_add(&rtwdev->phystat.bcn_rssi, phy_ppdu->rssi_avg); ··· 2787 2280 rtwvif_link->bcn_bw_idx = phy_ppdu->bw_idx; 2788 2281 } 2789 2282 2283 + pkt_stat->beacon_nr++; 2790 2284 pkt_stat->beacon_rate = desc_info->data_rate; 2285 + pkt_stat->beacon_len = skb->len; 2286 + 2287 + rtw89_vif_rx_bcn_stat(rtwdev, bss_conf, skb); 2791 2288 } 2792 2289 2793 2290 if (!ether_addr_equal(bss_conf->addr, hdr->addr1)) ··· 3737 3226 u8 qsel, ch_dma; 3738 3227 3739 3228 qsel = rtw89_core_get_qsel(rtwdev, tid); 3740 - ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel); 3229 + ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel); 3741 3230 3742 3231 return rtw89_hci_check_and_reclaim_tx_resource(rtwdev, ch_dma); 3743 3232 } ··· 3922 3411 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3923 3412 int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1; 3924 3413 struct rtw89_sta_link *rtwsta_link; 3414 + struct rtw89_tx_wait_info *wait; 3925 3415 struct ieee80211_sta *sta; 3926 3416 struct ieee80211_hdr *hdr; 3927 3417 struct rtw89_sta *rtwsta; ··· 3931 3419 3932 3420 if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc) 3933 3421 return 0; 3422 + 3423 + wait = kzalloc(sizeof(*wait), GFP_KERNEL); 3424 + if (!wait) 3425 + return -ENOMEM; 3426 + 3427 + init_completion(&wait->completion); 3934 3428 3935 3429 rcu_read_lock(); 3936 3430 sta = ieee80211_find_sta(vif, vif->cfg.ap_addr); ··· 3952 3434 goto out; 3953 3435 } 3954 3436 3437 + wait->skb = skb; 3438 + 3955 3439 hdr = (struct ieee80211_hdr *)skb->data; 3956 3440 if (ps) 3957 3441 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); ··· 3961 3441 rtwsta_link = rtwsta->links[rtwvif_link->link_id]; 3962 3442 if (unlikely(!rtwsta_link)) { 3963 3443 ret = -ENOLINK; 3444 + dev_kfree_skb_any(skb); 3964 3445 goto out; 3965 3446 } 3966 3447 3967 - ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true); 3448 + ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true, 3449 + wait); 3968 3450 if (ret) { 3969 3451 rtw89_warn(rtwdev, "nullfunc transmit failed: %d\n", ret); 3970 3452 dev_kfree_skb_any(skb); ··· 3975 3453 3976 3454 rcu_read_unlock(); 3977 3455 3978 - return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, qsel, 3456 + return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, wait, qsel, 3979 3457 timeout); 3980 3458 out: 3981 3459 rcu_read_unlock(); 3460 + kfree(wait); 3982 3461 3983 3462 return ret; 3984 3463 } ··· 4220 3697 vif->type == NL80211_IFTYPE_P2P_CLIENT)) 4221 3698 continue; 4222 3699 3700 + if (!rtw89_core_bcn_track_can_lps(rtwdev)) 3701 + continue; 3702 + 4223 3703 rtw89_enter_lps(rtwdev, rtwvif, true); 4224 3704 } 4225 3705 } ··· 4409 3883 rtw89_btc_ntfy_wl_sta(rtwdev); 4410 3884 } 4411 3885 rtw89_mac_bf_monitor_track(rtwdev); 3886 + rtw89_core_bcn_track(rtwdev); 4412 3887 rtw89_phy_stat_track(rtwdev); 4413 3888 rtw89_phy_env_monitor_track(rtwdev); 4414 3889 rtw89_phy_dig(rtwdev); ··· 4656 4129 4657 4130 rtw89_assoc_link_clr(rtwsta_link); 4658 4131 4659 - if (vif->type == NL80211_IFTYPE_STATION) 4132 + if (vif->type == NL80211_IFTYPE_STATION) { 4660 4133 rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, false); 4134 + rtw89_core_bcn_track_reset(rtwdev); 4135 + } 4661 4136 4662 4137 if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT) 4663 4138 rtw89_p2p_noa_once_deinit(rtwvif_link); ··· 4800 4271 BTC_ROLE_MSTS_STA_CONN_END); 4801 4272 rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta_link->htc_template, chan); 4802 4273 rtw89_phy_ul_tb_assoc(rtwdev, rtwvif_link); 4274 + rtw89_core_bcn_track_assoc(rtwdev, rtwvif_link); 4803 4275 4804 4276 ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif_link, rtwsta_link->mac_id); 4805 4277 if (ret) { ··· 5359 4829 } 5360 4830 } 5361 4831 5362 - int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond) 4832 + struct rtw89_wait_response * 4833 + rtw89_wait_for_cond_prep(struct rtw89_wait_info *wait, unsigned int cond) 5363 4834 { 5364 - struct completion *cmpl = &wait->completion; 5365 - unsigned long time_left; 4835 + struct rtw89_wait_response *prep; 5366 4836 unsigned int cur; 4837 + 4838 + /* use -EPERM _iff_ telling eval side not to make any changes */ 5367 4839 5368 4840 cur = atomic_cmpxchg(&wait->cond, RTW89_WAIT_COND_IDLE, cond); 5369 4841 if (cur != RTW89_WAIT_COND_IDLE) 5370 - return -EBUSY; 4842 + return ERR_PTR(-EPERM); 5371 4843 5372 - time_left = wait_for_completion_timeout(cmpl, RTW89_WAIT_FOR_COND_TIMEOUT); 5373 - if (time_left == 0) { 5374 - atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE); 5375 - return -ETIMEDOUT; 4844 + prep = kzalloc(sizeof(*prep), GFP_KERNEL); 4845 + if (!prep) 4846 + return ERR_PTR(-ENOMEM); 4847 + 4848 + init_completion(&prep->completion); 4849 + 4850 + rcu_assign_pointer(wait->resp, prep); 4851 + 4852 + return prep; 4853 + } 4854 + 4855 + int rtw89_wait_for_cond_eval(struct rtw89_wait_info *wait, 4856 + struct rtw89_wait_response *prep, int err) 4857 + { 4858 + unsigned long time_left; 4859 + 4860 + if (IS_ERR(prep)) { 4861 + err = err ?: PTR_ERR(prep); 4862 + 4863 + /* special error case: no permission to reset anything */ 4864 + if (PTR_ERR(prep) == -EPERM) 4865 + return err; 4866 + 4867 + goto reset; 5376 4868 } 4869 + 4870 + if (err) 4871 + goto cleanup; 4872 + 4873 + time_left = wait_for_completion_timeout(&prep->completion, 4874 + RTW89_WAIT_FOR_COND_TIMEOUT); 4875 + if (time_left == 0) { 4876 + err = -ETIMEDOUT; 4877 + goto cleanup; 4878 + } 4879 + 4880 + wait->data = prep->data; 4881 + 4882 + cleanup: 4883 + rcu_assign_pointer(wait->resp, NULL); 4884 + kfree_rcu(prep, rcu_head); 4885 + 4886 + reset: 4887 + atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE); 4888 + 4889 + if (err) 4890 + return err; 5377 4891 5378 4892 if (wait->data.err) 5379 4893 return -EFAULT; ··· 5425 4851 return 0; 5426 4852 } 5427 4853 4854 + static void rtw89_complete_cond_resp(struct rtw89_wait_response *resp, 4855 + const struct rtw89_completion_data *data) 4856 + { 4857 + resp->data = *data; 4858 + complete(&resp->completion); 4859 + } 4860 + 5428 4861 void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond, 5429 4862 const struct rtw89_completion_data *data) 5430 4863 { 4864 + struct rtw89_wait_response *resp; 5431 4865 unsigned int cur; 4866 + 4867 + guard(rcu)(); 4868 + 4869 + resp = rcu_dereference(wait->resp); 4870 + if (!resp) 4871 + return; 5432 4872 5433 4873 cur = atomic_cmpxchg(&wait->cond, cond, RTW89_WAIT_COND_IDLE); 5434 4874 if (cur != cond) 5435 4875 return; 5436 4876 5437 - wait->data = *data; 5438 - complete(&wait->completion); 4877 + rtw89_complete_cond_resp(resp, data); 5439 4878 } 5440 4879 5441 4880 void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event) ··· 5495 4908 { 5496 4909 int ret; 5497 4910 4911 + rtw89_phy_init_bb_afe(rtwdev); 4912 + 5498 4913 ret = rtw89_mac_init(rtwdev); 5499 4914 if (ret) { 5500 4915 rtw89_err(rtwdev, "mac init fail, ret:%d\n", ret); ··· 5541 4952 rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON); 5542 4953 rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.log.enable); 5543 4954 rtw89_fw_h2c_init_ba_cam(rtwdev); 4955 + rtw89_tas_fw_timer_enable(rtwdev, true); 5544 4956 5545 4957 return 0; 5546 4958 } ··· 5557 4967 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 5558 4968 return; 5559 4969 4970 + rtw89_tas_fw_timer_enable(rtwdev, false); 5560 4971 rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_OFF); 5561 4972 5562 4973 clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags); ··· 5569 4978 wiphy_work_cancel(wiphy, &btc->dhcp_notify_work); 5570 4979 wiphy_work_cancel(wiphy, &btc->icmp_notify_work); 5571 4980 cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work); 4981 + wiphy_delayed_work_cancel(wiphy, &rtwdev->tx_wait_work); 5572 4982 wiphy_delayed_work_cancel(wiphy, &rtwdev->track_work); 5573 4983 wiphy_delayed_work_cancel(wiphy, &rtwdev->track_ps_work); 5574 4984 wiphy_delayed_work_cancel(wiphy, &rtwdev->chanctx_work); ··· 5795 5203 INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]); 5796 5204 } 5797 5205 INIT_LIST_HEAD(&rtwdev->scan_info.chan_list); 5206 + INIT_LIST_HEAD(&rtwdev->tx_waits); 5798 5207 INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work); 5799 5208 INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work); 5800 5209 INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work); ··· 5807 5214 wiphy_delayed_work_init(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work); 5808 5215 wiphy_delayed_work_init(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work); 5809 5216 wiphy_delayed_work_init(&rtwdev->mcc_prepare_done_work, rtw89_mcc_prepare_done_work); 5217 + wiphy_delayed_work_init(&rtwdev->tx_wait_work, rtw89_tx_wait_work); 5810 5218 INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work); 5811 5219 wiphy_delayed_work_init(&rtwdev->antdiv_work, rtw89_phy_antdiv_work); 5812 5220 rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0); ··· 6407 5813 return ret; 6408 5814 } 6409 5815 5816 + rtw89_phy_dm_init_data(rtwdev); 6410 5817 rtw89_debugfs_init(rtwdev); 6411 5818 6412 5819 return 0; ··· 6457 5862 ops->remain_on_channel = NULL; 6458 5863 ops->cancel_remain_on_channel = NULL; 6459 5864 } 5865 + 5866 + if (!chip->support_noise) 5867 + ops->get_survey = NULL; 6460 5868 6461 5869 driver_data_size = sizeof(struct rtw89_dev) + bus_data_size; 6462 5870 hw = ieee80211_alloc_hw(driver_data_size, ops);
+138 -12
drivers/net/wireless/realtek/rtw89/core.h
··· 1011 1011 u32 ptcl_dbg; 1012 1012 u32 ptcl_dbg_info; 1013 1013 u32 bcn_drop_all; 1014 + u32 bcn_psr_rpt; 1014 1015 u32 hiq_win[RTW89_PORT_NUM]; 1015 1016 }; 1016 1017 ··· 3507 3506 bool enable; 3508 3507 }; 3509 3508 3509 + #define RTW89_TX_WAIT_WORK_TIMEOUT msecs_to_jiffies(500) 3510 3510 struct rtw89_tx_wait_info { 3511 3511 struct rcu_head rcu_head; 3512 + struct list_head list; 3512 3513 struct completion completion; 3514 + struct sk_buff *skb; 3513 3515 bool tx_done; 3514 3516 }; 3515 3517 ··· 3763 3759 void (*fill_txdesc_fwcmd)(struct rtw89_dev *rtwdev, 3764 3760 struct rtw89_tx_desc_info *desc_info, 3765 3761 void *txdesc); 3762 + u8 (*get_ch_dma)(struct rtw89_dev *rtwdev, u8 qsel); 3766 3763 int (*cfg_ctrl_path)(struct rtw89_dev *rtwdev, bool wl); 3767 3764 int (*mac_cfg_gnt)(struct rtw89_dev *rtwdev, 3768 3765 const struct rtw89_mac_ax_coex_gnt *gnt_cfg); ··· 4368 4363 (struct rtw89_dev *rtwdev, enum rtw89_chanctx_state state); 4369 4364 }; 4370 4365 4366 + #define RTW89_NHM_TH_NUM 11 4367 + #define RTW89_NHM_RPT_NUM 12 4368 + 4371 4369 struct rtw89_chip_info { 4372 4370 enum rtw89_core_chip_id chip_id; 4373 4371 enum rtw89_chip_gen chip_gen; ··· 4405 4397 bool support_ant_gain; 4406 4398 bool support_tas; 4407 4399 bool support_sar_by_ant; 4400 + bool support_noise; 4408 4401 bool ul_tb_waveform_ctrl; 4409 4402 bool ul_tb_pwr_diff; 4410 4403 bool rx_freq_frome_ie; ··· 4490 4481 bool cfo_hw_comp; 4491 4482 const struct rtw89_reg_def *dcfo_comp; 4492 4483 u8 dcfo_comp_sft; 4484 + const struct rtw89_reg_def (*nhm_report)[RTW89_NHM_RPT_NUM]; 4485 + const struct rtw89_reg_def (*nhm_th)[RTW89_NHM_TH_NUM]; 4493 4486 const struct rtw89_imr_info *imr_info; 4494 4487 const struct rtw89_imr_table *imr_dmac_table; 4495 4488 const struct rtw89_imr_table *imr_cmac_table; ··· 4553 4542 u8 buf[RTW89_COMPLETION_BUF_SIZE]; 4554 4543 }; 4555 4544 4556 - struct rtw89_wait_info { 4557 - atomic_t cond; 4545 + struct rtw89_wait_response { 4546 + struct rcu_head rcu_head; 4558 4547 struct completion completion; 4559 4548 struct rtw89_completion_data data; 4549 + }; 4550 + 4551 + struct rtw89_wait_info { 4552 + atomic_t cond; 4553 + struct rtw89_completion_data data; 4554 + struct rtw89_wait_response __rcu *resp; 4560 4555 }; 4561 4556 4562 4557 #define RTW89_WAIT_FOR_COND_TIMEOUT msecs_to_jiffies(100) 4563 4558 4564 4559 static inline void rtw89_init_wait(struct rtw89_wait_info *wait) 4565 4560 { 4566 - init_completion(&wait->completion); 4561 + rcu_assign_pointer(wait->resp, NULL); 4567 4562 atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE); 4568 4563 } 4569 4564 ··· 4639 4622 RTW89_FW_FEATURE_SCAN_OFFLOAD_EXTRA_OP, 4640 4623 RTW89_FW_FEATURE_RFK_NTFY_MCC_V0, 4641 4624 RTW89_FW_FEATURE_LPS_DACK_BY_C2H_REG, 4625 + RTW89_FW_FEATURE_BEACON_TRACKING, 4642 4626 }; 4643 4627 4644 4628 struct rtw89_fw_suit { ··· 4699 4681 struct rtw89_fw_txpwr_track_cfg *txpwr_trk; 4700 4682 struct rtw89_phy_rfk_log_fmt *rfk_log_fmt; 4701 4683 const struct rtw89_regd_data *regd; 4684 + const struct rtw89_fw_element_hdr *afe; 4702 4685 }; 4703 4686 4704 4687 enum rtw89_fw_mss_dev_type { ··· 5093 5074 struct rtw89_pkt_stat { 5094 5075 u16 beacon_nr; 5095 5076 u8 beacon_rate; 5077 + u32 beacon_len; 5096 5078 u32 rx_rate_cnt[RTW89_HW_RATE_NR]; 5079 + }; 5080 + 5081 + #define RTW89_BCN_TRACK_STAT_NR 32 5082 + #define RTW89_BCN_TRACK_SCALE_FACTOR 10 5083 + #define RTW89_BCN_TRACK_MAX_BIN_NUM 6 5084 + #define RTW89_BCN_TRACK_BIN_WIDTH 5 5085 + #define RTW89_BCN_TRACK_TARGET_BCN 80 5086 + 5087 + struct rtw89_beacon_dist { 5088 + u16 min; 5089 + u16 max; 5090 + u16 outlier_count; 5091 + u16 lower_bound; 5092 + u16 upper_bound; 5093 + u16 bins[RTW89_BCN_TRACK_MAX_BIN_NUM]; 5094 + }; 5095 + 5096 + struct rtw89_beacon_stat { 5097 + u8 num; 5098 + u8 wp; 5099 + u16 tbtt_tu_min; 5100 + u16 tbtt_tu_max; 5101 + u16 drift[RTW89_BCN_TRACK_STAT_NR]; 5102 + u32 tbtt_us[RTW89_BCN_TRACK_STAT_NR]; 5103 + u16 tbtt_tu[RTW89_BCN_TRACK_STAT_NR]; 5104 + struct rtw89_beacon_dist bcn_dist; 5097 5105 }; 5098 5106 5099 5107 DECLARE_EWMA(thermal, 4, 4); ··· 5131 5085 struct ewma_rssi bcn_rssi; 5132 5086 struct rtw89_pkt_stat cur_pkt_stat; 5133 5087 struct rtw89_pkt_stat last_pkt_stat; 5088 + struct rtw89_beacon_stat bcn_stat; 5134 5089 }; 5135 5090 5136 5091 enum rtw89_rfk_report_state { ··· 5481 5434 struct rtw89_ccx_para_info { 5482 5435 enum rtw89_env_racing_lv rac_lv; 5483 5436 u16 mntr_time; 5437 + bool nhm_incld_cca; 5484 5438 u8 nhm_manual_th_ofst; 5485 5439 u8 nhm_manual_th0; 5486 5440 enum rtw89_ifs_clm_application ifs_clm_app; ··· 5515 5467 RTW89_CCX_EDCCA_BW20_7 = 7 5516 5468 }; 5517 5469 5518 - #define RTW89_NHM_TH_NUM 11 5470 + struct rtw89_nhm_report { 5471 + struct list_head list; 5472 + struct ieee80211_channel *channel; 5473 + u8 noise; 5474 + }; 5475 + 5519 5476 #define RTW89_FAHM_TH_NUM 11 5520 - #define RTW89_NHM_RPT_NUM 12 5521 5477 #define RTW89_FAHM_RPT_NUM 12 5522 5478 #define RTW89_IFS_CLM_NUM 4 5523 5479 struct rtw89_env_monitor_info { ··· 5555 5503 u16 ifs_clm_ofdm_fa_permil; 5556 5504 u32 ifs_clm_ifs_avg[RTW89_IFS_CLM_NUM]; 5557 5505 u32 ifs_clm_cca_avg[RTW89_IFS_CLM_NUM]; 5506 + bool nhm_include_cca; 5507 + u32 nhm_sum; 5508 + u32 nhm_mntr_time; 5509 + u16 nhm_result[RTW89_NHM_RPT_NUM]; 5510 + u8 nhm_th[RTW89_NHM_RPT_NUM]; 5511 + struct rtw89_nhm_report *nhm_his[RTW89_BAND_NUM]; 5512 + struct list_head nhm_rpt_list; 5558 5513 }; 5559 5514 5560 5515 enum rtw89_ser_rcvy_step { ··· 5774 5715 u8 kck[32]; 5775 5716 u8 kek[32]; 5776 5717 u8 tk1[16]; 5777 - u8 txmickey[8]; 5778 5718 u8 rxmickey[8]; 5719 + u8 txmickey[8]; 5779 5720 __le32 igtk_keyid; 5780 5721 __le64 ipn; 5781 5722 u8 igtk[2][32]; ··· 5941 5882 struct rtw89_wait_info wait; 5942 5883 }; 5943 5884 5885 + struct rtw89_beacon_track_info { 5886 + bool is_data_ready; 5887 + u32 tbtt_offset; /* in unit of microsecond */ 5888 + u16 bcn_timeout; /* in unit of millisecond */ 5889 + 5890 + /* The following are constant and set at association. */ 5891 + u8 dtim; 5892 + u16 beacon_int; 5893 + u16 low_bcn_th; 5894 + u16 med_bcn_th; 5895 + u16 high_bcn_th; 5896 + u16 target_bcn_th; 5897 + u16 outlier_low_bcn_th; 5898 + u16 outlier_high_bcn_th; 5899 + u32 close_bcn_intvl_th; 5900 + u32 tbtt_diff_th; 5901 + }; 5902 + 5944 5903 struct rtw89_dev { 5945 5904 struct ieee80211_hw *hw; 5946 5905 struct device *dev; ··· 5973 5896 const struct rtw89_pci_info *pci_info; 5974 5897 const struct rtw89_rfe_parms *rfe_parms; 5975 5898 struct rtw89_hal hal; 5899 + struct rtw89_beacon_track_info bcn_track; 5976 5900 struct rtw89_mcc_info mcc; 5977 5901 struct rtw89_mlo_info mlo; 5978 5902 struct rtw89_mac_info mac; ··· 6002 5924 struct work_struct ba_work; 6003 5925 /* used to protect rpwm */ 6004 5926 spinlock_t rpwm_lock; 5927 + 5928 + struct list_head tx_waits; 5929 + struct wiphy_delayed_work tx_wait_work; 6005 5930 6006 5931 struct rtw89_cam_info cam_info; 6007 5932 ··· 6262 6181 list_first_entry_or_null(&p->dlink_pool, typeof(*p->links_inst), dlink_schd); \ 6263 6182 }) 6264 6183 6184 + static inline void rtw89_tx_wait_release(struct rtw89_tx_wait_info *wait) 6185 + { 6186 + dev_kfree_skb_any(wait->skb); 6187 + kfree_rcu(wait, rcu_head); 6188 + } 6189 + 6190 + static inline void rtw89_tx_wait_list_clear(struct rtw89_dev *rtwdev) 6191 + { 6192 + struct rtw89_tx_wait_info *wait, *tmp; 6193 + 6194 + lockdep_assert_wiphy(rtwdev->hw->wiphy); 6195 + 6196 + list_for_each_entry_safe(wait, tmp, &rtwdev->tx_waits, list) { 6197 + if (!completion_done(&wait->completion)) 6198 + continue; 6199 + list_del(&wait->list); 6200 + rtw89_tx_wait_release(wait); 6201 + } 6202 + } 6203 + 6265 6204 static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev, 6266 6205 struct rtw89_core_tx_request *tx_req) 6267 6206 { ··· 6291 6190 static inline void rtw89_hci_reset(struct rtw89_dev *rtwdev) 6292 6191 { 6293 6192 rtwdev->hci.ops->reset(rtwdev); 6193 + rtw89_tx_wait_list_clear(rtwdev); 6294 6194 } 6295 6195 6296 6196 static inline int rtw89_hci_start(struct rtw89_dev *rtwdev) ··· 6424 6322 static inline 6425 6323 struct rtw89_tx_skb_data *RTW89_TX_SKB_CB(struct sk_buff *skb) 6426 6324 { 6325 + /* 6326 + * This should be used by/after rtw89_hci_tx_write() and before doing 6327 + * ieee80211_tx_info_clear_status(). 6328 + */ 6427 6329 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 6428 6330 6429 - return (struct rtw89_tx_skb_data *)info->status.status_driver_data; 6331 + return (struct rtw89_tx_skb_data *)info->driver_data; 6430 6332 } 6431 6333 6432 6334 static inline u8 rtw89_read8(struct rtw89_dev *rtwdev, u32 addr) ··· 7237 7131 } 7238 7132 7239 7133 static inline 7134 + u8 rtw89_chip_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel) 7135 + { 7136 + const struct rtw89_chip_info *chip = rtwdev->chip; 7137 + 7138 + return chip->ops->get_ch_dma(rtwdev, qsel); 7139 + } 7140 + 7141 + static inline 7240 7142 void rtw89_chip_mac_cfg_gnt(struct rtw89_dev *rtwdev, 7241 7143 const struct rtw89_mac_ax_coex_gnt *gnt_cfg) 7242 7144 { ··· 7372 7258 return dev_alloc_skb(length); 7373 7259 } 7374 7260 7375 - static inline void rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev, 7261 + static inline bool rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev, 7376 7262 struct rtw89_tx_skb_data *skb_data, 7377 7263 bool tx_done) 7378 7264 { 7379 7265 struct rtw89_tx_wait_info *wait; 7266 + bool ret = false; 7380 7267 7381 7268 rcu_read_lock(); 7382 7269 ··· 7385 7270 if (!wait) 7386 7271 goto out; 7387 7272 7273 + ret = true; 7388 7274 wait->tx_done = tx_done; 7389 - complete(&wait->completion); 7275 + /* Don't access skb anymore after completion */ 7276 + complete_all(&wait->completion); 7390 7277 7391 7278 out: 7392 7279 rcu_read_unlock(); 7280 + return ret; 7393 7281 } 7394 7282 7395 7283 static inline bool rtw89_is_mlo_1_1(struct rtw89_dev *rtwdev) ··· 7476 7358 struct sk_buff *skb, bool fwdl); 7477 7359 void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel); 7478 7360 int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 7479 - int qsel, unsigned int timeout); 7361 + struct rtw89_tx_wait_info *wait, int qsel, 7362 + unsigned int timeout); 7480 7363 void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev, 7481 7364 struct rtw89_tx_desc_info *desc_info, 7482 7365 void *txdesc); ··· 7493 7374 void rtw89_core_fill_txdesc_fwcmd_v2(struct rtw89_dev *rtwdev, 7494 7375 struct rtw89_tx_desc_info *desc_info, 7495 7376 void *txdesc); 7377 + u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel); 7378 + u8 rtw89_core_get_ch_dma_v1(struct rtw89_dev *rtwdev, u8 qsel); 7496 7379 void rtw89_core_rx(struct rtw89_dev *rtwdev, 7497 7380 struct rtw89_rx_desc_info *desc_info, 7498 7381 struct sk_buff *skb); ··· 7575 7454 int rtw89_chip_info_setup(struct rtw89_dev *rtwdev); 7576 7455 void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, 7577 7456 struct rtw89_vif_link *rtwvif_link); 7578 - bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate); 7457 + bool rtw89_legacy_rate_to_bitrate(struct rtw89_dev *rtwdev, u8 legacy_rate, u16 *bitrate); 7579 7458 int rtw89_regd_setup(struct rtw89_dev *rtwdev); 7580 7459 int rtw89_regd_init_hint(struct rtw89_dev *rtwdev); 7581 7460 const char *rtw89_regd_get_string(enum rtw89_regulation_type regd); 7582 7461 void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev, 7583 7462 struct rtw89_traffic_stats *stats); 7584 - int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond); 7463 + struct rtw89_wait_response * 7464 + rtw89_wait_for_cond_prep(struct rtw89_wait_info *wait, unsigned int cond) 7465 + __acquires(rtw89_wait); 7466 + int rtw89_wait_for_cond_eval(struct rtw89_wait_info *wait, 7467 + struct rtw89_wait_response *prep, int err) 7468 + __releases(rtw89_wait); 7585 7469 void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond, 7586 7470 const struct rtw89_completion_data *data); 7587 7471 int rtw89_core_start(struct rtw89_dev *rtwdev);
+124 -1
drivers/net/wireless/realtek/rtw89/debug.c
··· 86 86 struct rtw89_debugfs_priv stations; 87 87 struct rtw89_debugfs_priv disable_dm; 88 88 struct rtw89_debugfs_priv mlo_mode; 89 + struct rtw89_debugfs_priv beacon_info; 89 90 }; 90 91 91 92 struct rtw89_debugfs_iter_data { ··· 3563 3562 return 0; 3564 3563 } 3565 3564 3565 + static int rtw89_dbg_trigger_mac_error_ax(struct rtw89_dev *rtwdev) 3566 + { 3567 + u16 val16; 3568 + u8 val8; 3569 + int ret; 3570 + 3571 + ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL); 3572 + if (ret) 3573 + return ret; 3574 + 3575 + val8 = rtw89_read8(rtwdev, R_AX_CMAC_FUNC_EN); 3576 + rtw89_write8(rtwdev, R_AX_CMAC_FUNC_EN, val8 & ~B_AX_TMAC_EN); 3577 + mdelay(1); 3578 + rtw89_write8(rtwdev, R_AX_CMAC_FUNC_EN, val8); 3579 + 3580 + val16 = rtw89_read16(rtwdev, R_AX_PTCL_IMR0); 3581 + rtw89_write16(rtwdev, R_AX_PTCL_IMR0, val16 | B_AX_F2PCMD_EMPTY_ERR_INT_EN); 3582 + rtw89_write16(rtwdev, R_AX_PTCL_IMR0, val16); 3583 + 3584 + return 0; 3585 + } 3586 + 3587 + static int rtw89_dbg_trigger_mac_error_be(struct rtw89_dev *rtwdev) 3588 + { 3589 + int ret; 3590 + 3591 + ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL); 3592 + if (ret) 3593 + return ret; 3594 + 3595 + rtw89_write32_set(rtwdev, R_BE_CMAC_FW_TRIGGER_IDCT_ISR, 3596 + B_BE_CMAC_FW_TRIG_IDCT | B_BE_CMAC_FW_ERR_IDCT_IMR); 3597 + 3598 + return 0; 3599 + } 3600 + 3601 + static int rtw89_dbg_trigger_mac_error(struct rtw89_dev *rtwdev) 3602 + { 3603 + const struct rtw89_chip_info *chip = rtwdev->chip; 3604 + 3605 + rtw89_leave_ps_mode(rtwdev); 3606 + 3607 + switch (chip->chip_gen) { 3608 + case RTW89_CHIP_AX: 3609 + return rtw89_dbg_trigger_mac_error_ax(rtwdev); 3610 + case RTW89_CHIP_BE: 3611 + return rtw89_dbg_trigger_mac_error_be(rtwdev); 3612 + default: 3613 + return -EOPNOTSUPP; 3614 + } 3615 + } 3616 + 3566 3617 static ssize_t 3567 3618 rtw89_debug_priv_fw_crash_get(struct rtw89_dev *rtwdev, 3568 3619 struct rtw89_debugfs_priv *debugfs_priv, ··· 3630 3577 enum rtw89_dbg_crash_simulation_type { 3631 3578 RTW89_DBG_SIM_CPU_EXCEPTION = 1, 3632 3579 RTW89_DBG_SIM_CTRL_ERROR = 2, 3580 + RTW89_DBG_SIM_MAC_ERROR = 3, 3633 3581 }; 3634 3582 3635 3583 static ssize_t ··· 3639 3585 const char *buf, size_t count) 3640 3586 { 3641 3587 int (*sim)(struct rtw89_dev *rtwdev); 3588 + bool announce = true; 3642 3589 u8 crash_type; 3643 3590 int ret; 3644 3591 ··· 3658 3603 case RTW89_DBG_SIM_CTRL_ERROR: 3659 3604 sim = rtw89_dbg_trigger_ctrl_error; 3660 3605 break; 3606 + case RTW89_DBG_SIM_MAC_ERROR: 3607 + sim = rtw89_dbg_trigger_mac_error; 3608 + 3609 + /* Driver SER flow won't get involved; only FW will. */ 3610 + announce = false; 3611 + break; 3661 3612 default: 3662 3613 return -EINVAL; 3663 3614 } 3664 3615 3665 - set_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags); 3616 + if (announce) 3617 + set_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags); 3618 + 3666 3619 ret = sim(rtwdev); 3667 3620 3668 3621 if (ret) ··· 4361 4298 return count; 4362 4299 } 4363 4300 4301 + static ssize_t 4302 + rtw89_debug_priv_beacon_info_get(struct rtw89_dev *rtwdev, 4303 + struct rtw89_debugfs_priv *debugfs_priv, 4304 + char *buf, size_t bufsz) 4305 + { 4306 + struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.last_pkt_stat; 4307 + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 4308 + struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; 4309 + struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist; 4310 + u16 upper, lower = bcn_stat->tbtt_tu_min; 4311 + char *p = buf, *end = buf + bufsz; 4312 + u16 *drift = bcn_stat->drift; 4313 + u8 bcn_num = bcn_stat->num; 4314 + u8 count; 4315 + u8 i; 4316 + 4317 + p += scnprintf(p, end - p, "[Beacon info]\n"); 4318 + p += scnprintf(p, end - p, "count: %u\n", pkt_stat->beacon_nr); 4319 + p += scnprintf(p, end - p, "interval: %u\n", bcn_track->beacon_int); 4320 + p += scnprintf(p, end - p, "dtim: %u\n", bcn_track->dtim); 4321 + p += scnprintf(p, end - p, "raw rssi: %lu\n", 4322 + ewma_rssi_read(&rtwdev->phystat.bcn_rssi)); 4323 + p += scnprintf(p, end - p, "hw rate: %u\n", pkt_stat->beacon_rate); 4324 + p += scnprintf(p, end - p, "length: %u\n", pkt_stat->beacon_len); 4325 + 4326 + p += scnprintf(p, end - p, "\n[Distribution]\n"); 4327 + p += scnprintf(p, end - p, "tbtt\n"); 4328 + for (i = 0; i < RTW89_BCN_TRACK_MAX_BIN_NUM; i++) { 4329 + upper = lower + RTW89_BCN_TRACK_BIN_WIDTH - 1; 4330 + if (i == RTW89_BCN_TRACK_MAX_BIN_NUM - 1) 4331 + upper = max(upper, bcn_stat->tbtt_tu_max); 4332 + 4333 + p += scnprintf(p, end - p, "%02u - %02u: %u\n", 4334 + lower, upper, bcn_dist->bins[i]); 4335 + 4336 + lower = upper + 1; 4337 + } 4338 + 4339 + p += scnprintf(p, end - p, "\ndrift\n"); 4340 + 4341 + for (i = 0; i < bcn_num; i += count) { 4342 + count = 1; 4343 + while (i + count < bcn_num && drift[i] == drift[i + count]) 4344 + count++; 4345 + 4346 + p += scnprintf(p, end - p, "%u: %u\n", drift[i], count); 4347 + } 4348 + p += scnprintf(p, end - p, "\nlower bound: %u\n", bcn_dist->lower_bound); 4349 + p += scnprintf(p, end - p, "upper bound: %u\n", bcn_dist->upper_bound); 4350 + p += scnprintf(p, end - p, "outlier count: %u\n", bcn_dist->outlier_count); 4351 + 4352 + p += scnprintf(p, end - p, "\n[Tracking]\n"); 4353 + p += scnprintf(p, end - p, "tbtt offset: %u\n", bcn_track->tbtt_offset); 4354 + p += scnprintf(p, end - p, "bcn timeout: %u\n", bcn_track->bcn_timeout); 4355 + 4356 + return p - buf; 4357 + } 4358 + 4364 4359 #define rtw89_debug_priv_get(name, opts...) \ 4365 4360 { \ 4366 4361 .cb_read = rtw89_debug_priv_ ##name## _get, \ ··· 4477 4356 .stations = rtw89_debug_priv_get(stations, RLOCK), 4478 4357 .disable_dm = rtw89_debug_priv_set_and_get(disable_dm, RWLOCK), 4479 4358 .mlo_mode = rtw89_debug_priv_set_and_get(mlo_mode, RWLOCK), 4359 + .beacon_info = rtw89_debug_priv_get(beacon_info), 4480 4360 }; 4481 4361 4482 4362 #define rtw89_debugfs_add(name, mode, fopname, parent) \ ··· 4523 4401 rtw89_debugfs_add_r(stations); 4524 4402 rtw89_debugfs_add_rw(disable_dm); 4525 4403 rtw89_debugfs_add_rw(mlo_mode); 4404 + rtw89_debugfs_add_r(beacon_info); 4526 4405 } 4527 4406 4528 4407 void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
+1
drivers/net/wireless/realtek/rtw89/debug.h
··· 56 56 #endif 57 57 58 58 #define rtw89_info(rtwdev, a...) dev_info((rtwdev)->dev, ##a) 59 + #define rtw89_info_once(rtwdev, a...) dev_info_once((rtwdev)->dev, ##a) 59 60 #define rtw89_warn(rtwdev, a...) dev_warn((rtwdev)->dev, ##a) 60 61 #define rtw89_err(rtwdev, a...) dev_err((rtwdev)->dev, ##a) 61 62
+165 -12
drivers/net/wireless/realtek/rtw89/fw.c
··· 830 830 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG), 831 831 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, CRASH_TRIGGER_TYPE_1), 832 832 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, SCAN_OFFLOAD_EXTRA_OP), 833 + __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, BEACON_TRACKING), 833 834 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG), 834 835 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE), 835 836 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER_TYPE_0), 836 837 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD), 837 838 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER), 839 + __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 122, 0, BEACON_TRACKING), 838 840 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, SCAN_OFFLOAD_EXTRA_OP), 839 841 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG), 840 842 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, CRASH_TRIGGER_TYPE_1), ··· 848 846 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 849 847 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1), 850 848 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, BEACON_LOSS_COUNT_V1), 849 + __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, LPS_DACK_BY_C2H_REG), 850 + __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, CRASH_TRIGGER_TYPE_1), 851 + __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 129, 1, BEACON_TRACKING), 851 852 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER_TYPE_0), 852 853 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), 853 854 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), ··· 869 864 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 71, 0, BEACON_LOSS_COUNT_V1), 870 865 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 76, 0, LPS_DACK_BY_C2H_REG), 871 866 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 79, 0, CRASH_TRIGGER_TYPE_1), 867 + __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 80, 0, BEACON_TRACKING), 872 868 }; 873 869 874 870 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, ··· 1286 1280 return 0; 1287 1281 } 1288 1282 1283 + static 1284 + int rtw89_build_afe_pwr_seq_from_elm(struct rtw89_dev *rtwdev, 1285 + const struct rtw89_fw_element_hdr *elm, 1286 + const union rtw89_fw_element_arg arg) 1287 + { 1288 + struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1289 + 1290 + elm_info->afe = elm; 1291 + 1292 + return 0; 1293 + } 1294 + 1289 1295 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 1290 1296 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 1291 1297 { .fw_type = RTW89_FW_BBMCU0 }, NULL}, ··· 1382 1364 }, 1383 1365 [RTW89_FW_ELEMENT_ID_REGD] = { 1384 1366 rtw89_recognize_regd_from_elm, {}, "REGD", 1367 + }, 1368 + [RTW89_FW_ELEMENT_ID_AFE_PWR_SEQ] = { 1369 + rtw89_build_afe_pwr_seq_from_elm, {}, "AFE", 1385 1370 }, 1386 1371 }; 1387 1372 ··· 1558 1537 struct rtw89_fw_hdr *fw_hdr; 1559 1538 struct sk_buff *skb; 1560 1539 u32 truncated; 1561 - u32 ret = 0; 1540 + int ret; 1562 1541 1563 1542 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1564 1543 if (!skb) { ··· 4010 3989 return ret; 4011 3990 } 4012 3991 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be); 3992 + 3993 + int rtw89_fw_h2c_tbtt_tuning(struct rtw89_dev *rtwdev, 3994 + struct rtw89_vif_link *rtwvif_link, u32 offset) 3995 + { 3996 + struct rtw89_h2c_tbtt_tuning *h2c; 3997 + u32 len = sizeof(*h2c); 3998 + struct sk_buff *skb; 3999 + int ret; 4000 + 4001 + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4002 + if (!skb) { 4003 + rtw89_err(rtwdev, "failed to alloc skb for h2c tbtt tuning\n"); 4004 + return -ENOMEM; 4005 + } 4006 + skb_put(skb, len); 4007 + h2c = (struct rtw89_h2c_tbtt_tuning *)skb->data; 4008 + 4009 + h2c->w0 = le32_encode_bits(rtwvif_link->phy_idx, RTW89_H2C_TBTT_TUNING_W0_BAND) | 4010 + le32_encode_bits(rtwvif_link->port, RTW89_H2C_TBTT_TUNING_W0_PORT); 4011 + h2c->w1 = le32_encode_bits(offset, RTW89_H2C_TBTT_TUNING_W1_SHIFT); 4012 + 4013 + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4014 + H2C_CAT_MAC, H2C_CL_MAC_PS, 4015 + H2C_FUNC_TBTT_TUNING, 0, 0, 4016 + len); 4017 + 4018 + ret = rtw89_h2c_tx(rtwdev, skb, false); 4019 + if (ret) { 4020 + rtw89_err(rtwdev, "failed to send h2c\n"); 4021 + goto fail; 4022 + } 4023 + 4024 + return 0; 4025 + fail: 4026 + dev_kfree_skb_any(skb); 4027 + 4028 + return ret; 4029 + } 4030 + 4031 + int rtw89_fw_h2c_pwr_lvl(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4032 + { 4033 + #define RTW89_BCN_TO_VAL_MIN 4 4034 + #define RTW89_BCN_TO_VAL_MAX 64 4035 + #define RTW89_DTIM_TO_VAL_MIN 7 4036 + #define RTW89_DTIM_TO_VAL_MAX 15 4037 + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 4038 + struct rtw89_h2c_pwr_lvl *h2c; 4039 + u32 len = sizeof(*h2c); 4040 + struct sk_buff *skb; 4041 + u8 bcn_to_val; 4042 + int ret; 4043 + 4044 + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4045 + if (!skb) { 4046 + rtw89_err(rtwdev, "failed to alloc skb for h2c pwr lvl\n"); 4047 + return -ENOMEM; 4048 + } 4049 + skb_put(skb, len); 4050 + h2c = (struct rtw89_h2c_pwr_lvl *)skb->data; 4051 + 4052 + bcn_to_val = clamp_t(u8, bcn_track->bcn_timeout, 4053 + RTW89_BCN_TO_VAL_MIN, RTW89_BCN_TO_VAL_MAX); 4054 + 4055 + h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_PWR_LVL_W0_MACID) | 4056 + le32_encode_bits(bcn_to_val, RTW89_H2C_PWR_LVL_W0_BCN_TO_VAL) | 4057 + le32_encode_bits(0, RTW89_H2C_PWR_LVL_W0_PS_LVL) | 4058 + le32_encode_bits(0, RTW89_H2C_PWR_LVL_W0_TRX_LVL) | 4059 + le32_encode_bits(RTW89_DTIM_TO_VAL_MIN, 4060 + RTW89_H2C_PWR_LVL_W0_DTIM_TO_VAL); 4061 + 4062 + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4063 + H2C_CAT_MAC, H2C_CL_MAC_PS, 4064 + H2C_FUNC_PS_POWER_LEVEL, 0, 0, 4065 + len); 4066 + 4067 + ret = rtw89_h2c_tx(rtwdev, skb, false); 4068 + if (ret) { 4069 + rtw89_err(rtwdev, "failed to send h2c\n"); 4070 + goto fail; 4071 + } 4072 + 4073 + return 0; 4074 + fail: 4075 + dev_kfree_skb_any(skb); 4076 + 4077 + return ret; 4078 + } 4013 4079 4014 4080 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 4015 4081 struct rtw89_vif_link *rtwvif_link, ··· 6688 6580 return ret; 6689 6581 } 6690 6582 6583 + int rtw89_fw_h2c_rf_tas_trigger(struct rtw89_dev *rtwdev, bool enable) 6584 + { 6585 + struct rtw89_h2c_rf_tas *h2c; 6586 + u32 len = sizeof(*h2c); 6587 + struct sk_buff *skb; 6588 + int ret; 6589 + 6590 + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6591 + if (!skb) { 6592 + rtw89_err(rtwdev, "failed to alloc skb for h2c RF TAS\n"); 6593 + return -ENOMEM; 6594 + } 6595 + skb_put(skb, len); 6596 + h2c = (struct rtw89_h2c_rf_tas *)skb->data; 6597 + 6598 + h2c->enable = cpu_to_le32(enable); 6599 + 6600 + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6601 + H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6602 + H2C_FUNC_RFK_TAS_OFFLOAD, 0, 0, len); 6603 + 6604 + ret = rtw89_h2c_tx(rtwdev, skb, false); 6605 + if (ret) { 6606 + rtw89_err(rtwdev, "failed to send h2c\n"); 6607 + goto fail; 6608 + } 6609 + 6610 + return 0; 6611 + fail: 6612 + dev_kfree_skb_any(skb); 6613 + 6614 + return ret; 6615 + } 6616 + 6691 6617 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 6692 6618 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 6693 6619 bool rack, bool dack) ··· 6968 6826 const struct rtw89_chip_info *chip = rtwdev->chip; 6969 6827 struct rtw89_fw_info *fw_info = &rtwdev->fw; 6970 6828 const u32 *c2h_reg = chip->c2h_regs; 6971 - u32 ret, timeout; 6829 + u32 timeout; 6972 6830 u8 i, val; 6831 + int ret; 6973 6832 6974 6833 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 6975 6834 ··· 7008 6865 struct rtw89_mac_h2c_info *h2c_info, 7009 6866 struct rtw89_mac_c2h_info *c2h_info) 7010 6867 { 7011 - u32 ret; 6868 + int ret; 7012 6869 7013 6870 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 7014 6871 lockdep_assert_wiphy(rtwdev->hw->wiphy); ··· 7266 7123 struct rtw89_pktofld_info *info; 7267 7124 u8 probe_count = 0; 7268 7125 7269 - ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 7270 7126 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 7271 7127 ch_info->bw = RTW89_SCAN_WIDTH; 7272 7128 ch_info->tx_pkt = true; ··· 7406 7264 struct rtw89_pktofld_info *info; 7407 7265 u8 probe_count = 0, i; 7408 7266 7409 - ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 7410 7267 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 7411 7268 ch_info->bw = RTW89_SCAN_WIDTH; 7412 7269 ch_info->tx_null = false; ··· 8726 8585 goto fail; 8727 8586 } 8728 8587 8729 - /* not support TKIP yet */ 8730 8588 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) | 8731 - le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) | 8589 + le32_encode_bits(!!memchr_inv(gtk_info->txmickey, 0, 8590 + sizeof(gtk_info->txmickey)), 8591 + RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) | 8732 8592 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0, 8733 8593 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) | 8734 8594 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) | ··· 8821 8679 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 8822 8680 struct rtw89_wait_info *wait, unsigned int cond) 8823 8681 { 8824 - int ret; 8682 + struct rtw89_wait_response *prep; 8683 + int ret = 0; 8684 + 8685 + lockdep_assert_wiphy(rtwdev->hw->wiphy); 8686 + 8687 + prep = rtw89_wait_for_cond_prep(wait, cond); 8688 + if (IS_ERR(prep)) 8689 + goto out; 8825 8690 8826 8691 ret = rtw89_h2c_tx(rtwdev, skb, false); 8827 8692 if (ret) { 8828 8693 rtw89_err(rtwdev, "failed to send h2c\n"); 8829 8694 dev_kfree_skb_any(skb); 8830 - return -EBUSY; 8695 + ret = -EBUSY; 8696 + goto out; 8831 8697 } 8832 8698 8833 - if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 8834 - return 1; 8699 + if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) { 8700 + ret = 1; 8701 + goto out; 8702 + } 8835 8703 8836 - return rtw89_wait_for_cond(wait, cond); 8704 + out: 8705 + return rtw89_wait_for_cond_eval(wait, prep, ret); 8837 8706 } 8838 8707 8839 8708 #define H2C_ADD_MCC_LEN 16
+75 -2
drivers/net/wireless/realtek/rtw89/fw.h
··· 1602 1602 #define RTW89_H2C_BCN_UPD_BE_W7_ECSA_OFST GENMASK(30, 16) 1603 1603 #define RTW89_H2C_BCN_UPD_BE_W7_PROTECTION_KEY_ID BIT(31) 1604 1604 1605 + struct rtw89_h2c_tbtt_tuning { 1606 + __le32 w0; 1607 + __le32 w1; 1608 + } __packed; 1609 + 1610 + #define RTW89_H2C_TBTT_TUNING_W0_BAND GENMASK(3, 0) 1611 + #define RTW89_H2C_TBTT_TUNING_W0_PORT GENMASK(7, 4) 1612 + #define RTW89_H2C_TBTT_TUNING_W1_SHIFT GENMASK(31, 0) 1613 + 1614 + struct rtw89_h2c_pwr_lvl { 1615 + __le32 w0; 1616 + __le32 w1; 1617 + } __packed; 1618 + 1619 + #define RTW89_H2C_PWR_LVL_W0_MACID GENMASK(7, 0) 1620 + #define RTW89_H2C_PWR_LVL_W0_BCN_TO_VAL GENMASK(15, 8) 1621 + #define RTW89_H2C_PWR_LVL_W0_PS_LVL GENMASK(19, 16) 1622 + #define RTW89_H2C_PWR_LVL_W0_TRX_LVL GENMASK(23, 20) 1623 + #define RTW89_H2C_PWR_LVL_W0_BCN_TO_LVL GENMASK(27, 24) 1624 + #define RTW89_H2C_PWR_LVL_W0_DTIM_TO_VAL GENMASK(31, 28) 1625 + #define RTW89_H2C_PWR_LVL_W1_MACID_EXT GENMASK(7, 0) 1626 + 1605 1627 struct rtw89_h2c_role_maintain { 1606 1628 __le32 w0; 1607 1629 }; ··· 3984 3962 RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_2GHZ = 24, 3985 3963 RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_5GHZ = 25, 3986 3964 RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ = 26, 3965 + RTW89_FW_ELEMENT_ID_AFE_PWR_SEQ = 27, 3987 3966 3988 3967 RTW89_FW_ELEMENT_ID_NUM, 3989 3968 }; ··· 4090 4067 BIT(RTW89_FW_TXPWR_TRK_TYPE_2G_CCK_A_N) | \ 4091 4068 BIT(RTW89_FW_TXPWR_TRK_TYPE_2G_CCK_A_P)) 4092 4069 4070 + enum rtw89_fw_afe_action { 4071 + RTW89_FW_AFE_ACTION_WRITE = 0, 4072 + RTW89_FW_AFE_ACTION_DELAY = 1, 4073 + RTW89_FW_AFE_ACTION_POLL = 2, 4074 + }; 4075 + 4076 + enum rtw89_fw_afe_cat { 4077 + RTW89_FW_AFE_CAT_BB = 0, 4078 + RTW89_FW_AFE_CAT_BB1 = 1, 4079 + RTW89_FW_AFE_CAT_MAC = 2, 4080 + RTW89_FW_AFE_CAT_MAC1 = 3, 4081 + RTW89_FW_AFE_CAT_AFEDIG = 4, 4082 + RTW89_FW_AFE_CAT_AFEDIG1 = 5, 4083 + }; 4084 + 4085 + enum rtw89_fw_afe_class { 4086 + RTW89_FW_AFE_CLASS_P0 = 0, 4087 + RTW89_FW_AFE_CLASS_P1 = 1, 4088 + RTW89_FW_AFE_CLASS_P2 = 2, 4089 + RTW89_FW_AFE_CLASS_P3 = 3, 4090 + RTW89_FW_AFE_CLASS_P4 = 4, 4091 + RTW89_FW_AFE_CLASS_CMN = 5, 4092 + }; 4093 + 4093 4094 struct rtw89_fw_element_hdr { 4094 4095 __le32 id; /* enum rtw89_fw_element_id */ 4095 4096 __le32 size; /* exclude header size */ ··· 4151 4104 u8 rsvd1[3]; 4152 4105 __le16 offset[]; 4153 4106 } __packed rfk_log_fmt; 4107 + struct { 4108 + u8 rsvd[8]; 4109 + struct rtw89_phy_afe_info { 4110 + __le32 action; /* enum rtw89_fw_afe_action */ 4111 + __le32 cat; /* enum rtw89_fw_afe_cat */ 4112 + __le32 class; /* enum rtw89_fw_afe_class */ 4113 + __le32 addr; 4114 + __le32 mask; 4115 + __le32 val; 4116 + } __packed infos[]; 4117 + } __packed afe; 4154 4118 struct __rtw89_fw_txpwr_element txpwr; 4155 4119 struct __rtw89_fw_regd_element regd; 4156 4120 } __packed u; ··· 4259 4201 H2C_FUNC_MAC_LPS_PARM = 0x0, 4260 4202 H2C_FUNC_P2P_ACT = 0x1, 4261 4203 H2C_FUNC_IPS_CFG = 0x3, 4204 + H2C_FUNC_PS_POWER_LEVEL = 0x7, 4205 + H2C_FUNC_TBTT_TUNING = 0xA, 4262 4206 4263 4207 NUM_OF_RTW89_PS_H2C_FUNC, 4264 4208 }; ··· 4430 4370 H2C_FUNC_RFK_DACK_OFFLOAD = 0x5, 4431 4371 H2C_FUNC_RFK_RXDCK_OFFLOAD = 0x6, 4432 4372 H2C_FUNC_RFK_PRE_NOTIFY = 0x8, 4373 + H2C_FUNC_RFK_TAS_OFFLOAD = 0x9, 4433 4374 }; 4434 4375 4435 4376 struct rtw89_fw_h2c_rf_get_mccch { ··· 4612 4551 u8 rxdck_dbg_en; 4613 4552 } __packed; 4614 4553 4554 + struct rtw89_h2c_rf_tas { 4555 + __le32 enable; 4556 + } __packed; 4557 + 4615 4558 struct rtw89_h2c_rf_rxdck { 4616 4559 struct rtw89_h2c_rf_rxdck_v0 v0; 4617 4560 u8 is_chl_k; ··· 4748 4683 u8 version; 4749 4684 } __packed; 4750 4685 4751 - struct rtw89_c2h_rf_tas_info { 4752 - struct rtw89_c2h_hdr hdr; 4686 + struct rtw89_c2h_rf_tas_rpt_log { 4753 4687 __le32 cur_idx; 4754 4688 __le16 txpwr_history[20]; 4689 + } __packed; 4690 + 4691 + struct rtw89_c2h_rf_tas_info { 4692 + struct rtw89_c2h_hdr hdr; 4693 + struct rtw89_c2h_rf_tas_rpt_log content; 4755 4694 } __packed; 4756 4695 4757 4696 #define RTW89_FW_RSVD_PLE_SIZE 0x800 ··· 4819 4750 struct rtw89_vif_link *rtwvif_link); 4820 4751 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev, 4821 4752 struct rtw89_vif_link *rtwvif_link); 4753 + int rtw89_fw_h2c_tbtt_tuning(struct rtw89_dev *rtwdev, 4754 + struct rtw89_vif_link *rtwvif_link, u32 offset); 4755 + int rtw89_fw_h2c_pwr_lvl(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link); 4822 4756 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif, 4823 4757 struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr); 4824 4758 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, ··· 4898 4826 const struct rtw89_chan *chan); 4899 4827 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 4900 4828 const struct rtw89_chan *chan, bool is_chl_k); 4829 + int rtw89_fw_h2c_rf_tas_trigger(struct rtw89_dev *rtwdev, bool enable); 4901 4830 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 4902 4831 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 4903 4832 bool rack, bool dack);
+45 -27
drivers/net/wireless/realtek/rtw89/mac.c
··· 9 9 #include "fw.h" 10 10 #include "mac.h" 11 11 #include "pci.h" 12 + #include "phy.h" 12 13 #include "ps.h" 13 14 #include "reg.h" 14 15 #include "util.h" ··· 178 177 struct rtw89_mac_dle_dfi_qempty *qempty) 179 178 { 180 179 struct rtw89_mac_dle_dfi_ctrl ctrl; 181 - u32 ret; 180 + int ret; 182 181 183 182 ctrl.type = qempty->dle_type; 184 183 ctrl.target = DLE_DFI_TYPE_QEMPTY; ··· 986 985 struct rtw89_hfc_ch_info *info = param->ch_info; 987 986 const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg; 988 987 u32 val; 989 - u32 ret; 988 + int ret; 990 989 991 990 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 992 991 if (ret) ··· 1177 1176 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1178 1177 const struct rtw89_chip_info *chip = rtwdev->chip; 1179 1178 u32 dma_ch_mask = chip->dma_ch_mask; 1179 + int ret = 0; 1180 1180 u8 ch; 1181 - u32 ret = 0; 1182 1181 1183 1182 if (reset) 1184 1183 ret = hfc_reset_param(rtwdev); ··· 1194 1193 if (!en && h2c_en) { 1195 1194 mac->hfc_h2c_cfg(rtwdev); 1196 1195 mac->hfc_func_en(rtwdev, en, h2c_en); 1197 - return ret; 1196 + return 0; 1198 1197 } 1199 1198 1200 1199 for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) { ··· 2414 2413 2415 2414 static int scheduler_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) 2416 2415 { 2417 - u32 ret; 2416 + int ret; 2418 2417 u32 reg; 2419 2418 u32 val; 2420 2419 ··· 2955 2954 struct rtw89_mac_h2c_info h2c_info = {}; 2956 2955 enum rtw89_mac_c2h_type c2h_type; 2957 2956 u8 content_len; 2958 - u32 ret; 2957 + int ret; 2959 2958 2960 2959 if (chip->chip_gen == RTW89_CHIP_AX) 2961 2960 content_len = 0; ··· 3106 3105 static int rtw89_hw_sch_tx_en_h2c(struct rtw89_dev *rtwdev, u8 band, 3107 3106 u16 tx_en_u16, u16 mask_u16) 3108 3107 { 3109 - u32 ret; 3110 3108 struct rtw89_mac_c2h_info c2h_info = {0}; 3111 3109 struct rtw89_mac_h2c_info h2c_info = {0}; 3112 3110 struct rtw89_h2creg_sch_tx_en *sch_tx_en = &h2c_info.u.sch_tx_en; 3111 + int ret; 3113 3112 3114 3113 h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN; 3115 3114 h2c_info.content_len = sizeof(*sch_tx_en) - RTW89_H2CREG_HDR_LEN; ··· 4198 4197 .ptcl_dbg = R_AX_PTCL_DBG, 4199 4198 .ptcl_dbg_info = R_AX_PTCL_DBG_INFO, 4200 4199 .bcn_drop_all = R_AX_BCN_DROP_ALL0, 4200 + .bcn_psr_rpt = R_AX_BCN_PSR_RPT_P0, 4201 4201 .hiq_win = {R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG, 4202 4202 R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2, 4203 4203 R_AX_PORT_HGQ_WINDOW_CFG + 3}, ··· 4651 4649 BCN_ERLY_DEF); 4652 4650 } 4653 4651 4654 - static void rtw89_mac_port_cfg_tbtt_shift(struct rtw89_dev *rtwdev, 4655 - struct rtw89_vif_link *rtwvif_link) 4652 + static void rtw89_mac_port_cfg_bcn_psr_rpt(struct rtw89_dev *rtwdev, 4653 + struct rtw89_vif_link *rtwvif_link) 4656 4654 { 4657 4655 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4658 4656 const struct rtw89_port_reg *p = mac->port_base; 4659 - u16 val; 4657 + struct ieee80211_bss_conf *bss_conf; 4658 + u8 bssid_index; 4659 + u32 reg; 4660 4660 4661 - if (rtwdev->chip->chip_id != RTL8852C) 4662 - return; 4661 + rcu_read_lock(); 4663 4662 4664 - if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT && 4665 - rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION) 4666 - return; 4663 + bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 4664 + if (bss_conf->nontransmitted) 4665 + bssid_index = bss_conf->bssid_index; 4666 + else 4667 + bssid_index = 0; 4667 4668 4668 - val = FIELD_PREP(B_AX_TBTT_SHIFT_OFST_MAG, 1) | 4669 - B_AX_TBTT_SHIFT_OFST_SIGN; 4669 + rcu_read_unlock(); 4670 4670 4671 - rtw89_write16_port_mask(rtwdev, rtwvif_link, p->tbtt_shift, 4672 - B_AX_TBTT_SHIFT_OFST_MASK, val); 4671 + reg = rtw89_mac_reg_by_idx(rtwdev, p->bcn_psr_rpt + rtwvif_link->port * 4, 4672 + rtwvif_link->mac_idx); 4673 + rtw89_write32_mask(rtwdev, reg, B_AX_BCAID_P0_MASK, bssid_index); 4673 4674 } 4674 4675 4675 4676 void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev, ··· 4825 4820 rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif_link); 4826 4821 rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif_link); 4827 4822 rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif_link); 4828 - rtw89_mac_port_cfg_tbtt_shift(rtwdev, rtwvif_link); 4829 4823 rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif_link); 4830 4824 rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif_link); 4831 4825 rtw89_mac_port_cfg_func_en(rtwdev, rtwvif_link, true); 4832 4826 rtw89_mac_port_tsf_resync_all(rtwdev); 4833 4827 fsleep(BCN_ERLY_SET_DLY); 4834 4828 rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif_link); 4829 + rtw89_mac_port_cfg_bcn_psr_rpt(rtwdev, rtwvif_link); 4835 4830 4836 4831 return 0; 4837 4832 } ··· 5046 5041 if (op_chan) { 5047 5042 rtw89_mac_enable_aps_bcn_by_chan(rtwdev, op_chan, false); 5048 5043 ieee80211_stop_queues(rtwdev->hw); 5044 + } else { 5045 + rtw89_phy_nhm_get_result(rtwdev, band, chan); 5049 5046 } 5050 5047 return; 5051 5048 case RTW89_SCAN_END_SCAN_NOTIFY: ··· 5078 5071 RTW89_CHANNEL_WIDTH_20); 5079 5072 rtw89_assign_entity_chan(rtwdev, rtwvif_link->chanctx_idx, 5080 5073 &new); 5074 + rtw89_phy_nhm_trigger(rtwdev); 5081 5075 } 5082 5076 break; 5083 5077 default: ··· 5244 5236 } 5245 5237 5246 5238 static void 5239 + rtw89_mac_c2h_bcn_upd_done(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 5240 + { 5241 + } 5242 + 5243 + static void 5247 5244 rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, 5248 5245 u32 len) 5249 5246 { ··· 5268 5255 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(pkt_id, pkt_op); 5269 5256 5270 5257 rtw89_complete_cond(wait, cond, &data); 5258 + } 5259 + 5260 + static void 5261 + rtw89_mac_c2h_bcn_resend(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 5262 + { 5271 5263 } 5272 5264 5273 5265 static void ··· 5664 5646 [RTW89_MAC_C2H_FUNC_EFUSE_DUMP] = NULL, 5665 5647 [RTW89_MAC_C2H_FUNC_READ_RSP] = NULL, 5666 5648 [RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = rtw89_mac_c2h_pkt_ofld_rsp, 5667 - [RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL, 5649 + [RTW89_MAC_C2H_FUNC_BCN_RESEND] = rtw89_mac_c2h_bcn_resend, 5668 5650 [RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause, 5669 5651 [RTW89_MAC_C2H_FUNC_SCANOFLD_RSP] = rtw89_mac_c2h_scanofld_rsp, 5670 5652 [RTW89_MAC_C2H_FUNC_TX_DUTY_RPT] = rtw89_mac_c2h_tx_duty_rpt, ··· 5679 5661 [RTW89_MAC_C2H_FUNC_DONE_ACK] = rtw89_mac_c2h_done_ack, 5680 5662 [RTW89_MAC_C2H_FUNC_C2H_LOG] = rtw89_mac_c2h_log, 5681 5663 [RTW89_MAC_C2H_FUNC_BCN_CNT] = rtw89_mac_c2h_bcn_cnt, 5664 + [RTW89_MAC_C2H_FUNC_BCN_UPD_DONE] = rtw89_mac_c2h_bcn_upd_done, 5682 5665 }; 5683 5666 5684 5667 static ··· 5832 5813 case RTW89_MAC_C2H_CLASS_ROLE: 5833 5814 return; 5834 5815 default: 5835 - rtw89_info(rtwdev, "MAC c2h class %d not support\n", class); 5836 - return; 5816 + break; 5837 5817 } 5838 5818 if (!handler) { 5839 - rtw89_info(rtwdev, "MAC c2h class %d func %d not support\n", class, 5840 - func); 5819 + rtw89_info_once(rtwdev, "MAC c2h class %d func %d not support\n", 5820 + class, func); 5841 5821 return; 5842 5822 } 5843 5823 handler(rtwdev, skb, len); ··· 6738 6720 u8 mac_idx = rtwvif_link->mac_idx; 6739 6721 u16 set = mac->muedca_ctrl.mask; 6740 6722 u32 reg; 6741 - u32 ret; 6723 + int ret; 6742 6724 6743 6725 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 6744 6726 if (ret) ··· 6880 6862 { 6881 6863 struct rtw89_mac_h2c_info h2c_info = {}; 6882 6864 struct rtw89_mac_c2h_info c2h_info = {}; 6883 - u32 ret; 6865 + int ret; 6884 6866 6885 6867 if (RTW89_CHK_FW_FEATURE(NO_WOW_CPU_IO_RX, &rtwdev->fw)) 6886 6868 return 0;
+1
drivers/net/wireless/realtek/rtw89/mac.h
··· 419 419 RTW89_MAC_C2H_FUNC_DONE_ACK, 420 420 RTW89_MAC_C2H_FUNC_C2H_LOG, 421 421 RTW89_MAC_C2H_FUNC_BCN_CNT, 422 + RTW89_MAC_C2H_FUNC_BCN_UPD_DONE = 0x06, 422 423 RTW89_MAC_C2H_FUNC_INFO_MAX, 423 424 }; 424 425
+35
drivers/net/wireless/realtek/rtw89/mac80211.c
··· 1837 1837 } 1838 1838 #endif 1839 1839 1840 + static int rtw89_ops_get_survey(struct ieee80211_hw *hw, int idx, 1841 + struct survey_info *survey) 1842 + { 1843 + struct ieee80211_conf *conf = &hw->conf; 1844 + struct rtw89_dev *rtwdev = hw->priv; 1845 + struct rtw89_bb_ctx *bb; 1846 + 1847 + if (idx == 0) { 1848 + survey->channel = conf->chandef.chan; 1849 + survey->filled = SURVEY_INFO_NOISE_DBM; 1850 + survey->noise = RTW89_NOISE_DEFAULT; 1851 + 1852 + return 0; 1853 + } 1854 + 1855 + rtw89_for_each_active_bb(rtwdev, bb) { 1856 + struct rtw89_env_monitor_info *env = &bb->env_monitor; 1857 + struct rtw89_nhm_report *rpt; 1858 + 1859 + rpt = list_first_entry_or_null(&env->nhm_rpt_list, typeof(*rpt), list); 1860 + if (!rpt) 1861 + continue; 1862 + 1863 + survey->filled = SURVEY_INFO_NOISE_DBM; 1864 + survey->noise = rpt->noise - MAX_RSSI; 1865 + survey->channel = rpt->channel; 1866 + list_del_init(&rpt->list); 1867 + 1868 + return 0; 1869 + } 1870 + 1871 + return -EINVAL; 1872 + } 1873 + 1840 1874 static void rtw89_ops_rfkill_poll(struct ieee80211_hw *hw) 1841 1875 { 1842 1876 struct rtw89_dev *rtwdev = hw->priv; ··· 1903 1869 .sta_state = rtw89_ops_sta_state, 1904 1870 .set_key = rtw89_ops_set_key, 1905 1871 .ampdu_action = rtw89_ops_ampdu_action, 1872 + .get_survey = rtw89_ops_get_survey, 1906 1873 .set_rts_threshold = rtw89_ops_set_rts_threshold, 1907 1874 .sta_statistics = rtw89_ops_sta_statistics, 1908 1875 .flush = rtw89_ops_flush,
+1
drivers/net/wireless/realtek/rtw89/mac_be.c
··· 56 56 .ptcl_dbg = R_BE_PTCL_DBG, 57 57 .ptcl_dbg_info = R_BE_PTCL_DBG_INFO, 58 58 .bcn_drop_all = R_BE_BCN_DROP_ALL0, 59 + .bcn_psr_rpt = R_BE_BCN_PSR_RPT_P0, 59 60 .hiq_win = {R_BE_P0MB_HGQ_WINDOW_CFG_0, R_BE_PORT_HGQ_WINDOW_CFG, 60 61 R_BE_PORT_HGQ_WINDOW_CFG + 1, R_BE_PORT_HGQ_WINDOW_CFG + 2, 61 62 R_BE_PORT_HGQ_WINDOW_CFG + 3},
+353 -113
drivers/net/wireless/realtek/rtw89/pci.c
··· 134 134 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, 135 135 struct rtw89_pci *rtwpci) 136 136 { 137 - struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 137 + struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[RTW89_TXCH_CH12]; 138 138 u32 cnt; 139 139 140 140 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); ··· 440 440 int countdown = rtwdev->napi_budget_countdown; 441 441 u32 cnt; 442 442 443 - rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; 443 + rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RXQ]; 444 444 445 445 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 446 446 if (!cnt) ··· 464 464 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 465 465 struct ieee80211_tx_info *info; 466 466 467 - rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE); 467 + if (rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE)) 468 + return; 468 469 469 470 info = IEEE80211_SKB_CB(skb); 470 471 ieee80211_tx_info_clear_status(info); ··· 569 568 rtw89_pci_enqueue_txwd(tx_ring, txwd); 570 569 } 571 570 572 - static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, 573 - struct rtw89_pci_rpp_fmt *rpp) 571 + void rtw89_pci_parse_rpp(struct rtw89_dev *rtwdev, void *_rpp, 572 + struct rtw89_pci_rpp_info *rpp_info) 573 + { 574 + const struct rtw89_pci_rpp_fmt *rpp = _rpp; 575 + 576 + rpp_info->seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 577 + rpp_info->qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 578 + rpp_info->tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 579 + rpp_info->txch = rtw89_chip_get_ch_dma(rtwdev, rpp_info->qsel); 580 + } 581 + EXPORT_SYMBOL(rtw89_pci_parse_rpp); 582 + 583 + void rtw89_pci_parse_rpp_v1(struct rtw89_dev *rtwdev, void *_rpp, 584 + struct rtw89_pci_rpp_info *rpp_info) 585 + { 586 + const struct rtw89_pci_rpp_fmt_v1 *rpp = _rpp; 587 + 588 + rpp_info->seq = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_PCIE_SEQ_V1_MASK); 589 + rpp_info->qsel = le32_get_bits(rpp->w1, RTW89_PCI_RPP_W1_QSEL_V1_MASK); 590 + rpp_info->tx_status = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_TX_STATUS_V1_MASK); 591 + rpp_info->txch = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_DMA_CH_MASK); 592 + } 593 + EXPORT_SYMBOL(rtw89_pci_parse_rpp_v1); 594 + 595 + static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, void *rpp) 574 596 { 575 597 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 576 - struct rtw89_pci_tx_ring *tx_ring; 598 + const struct rtw89_pci_info *info = rtwdev->pci_info; 599 + struct rtw89_pci_rpp_info rpp_info = {}; 577 600 struct rtw89_pci_tx_wd_ring *wd_ring; 601 + struct rtw89_pci_tx_ring *tx_ring; 578 602 struct rtw89_pci_tx_wd *txwd; 579 - u16 seq; 580 - u8 qsel, tx_status, txch; 581 603 582 - seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 583 - qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 584 - tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 585 - txch = rtw89_core_get_ch_dma(rtwdev, qsel); 604 + info->parse_rpp(rtwdev, rpp, &rpp_info); 586 605 587 - if (txch == RTW89_TXCH_CH12) { 606 + if (rpp_info.txch == RTW89_TXCH_CH12) { 588 607 rtw89_warn(rtwdev, "should no fwcmd release report\n"); 589 608 return; 590 609 } 591 610 592 - tx_ring = &rtwpci->tx_rings[txch]; 611 + tx_ring = &rtwpci->tx.rings[rpp_info.txch]; 593 612 wd_ring = &tx_ring->wd_ring; 594 - txwd = &wd_ring->pages[seq]; 613 + txwd = &wd_ring->pages[rpp_info.seq]; 595 614 596 - rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); 615 + rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, rpp_info.seq, 616 + rpp_info.tx_status); 597 617 } 598 618 599 619 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, ··· 639 617 u32 max_cnt) 640 618 { 641 619 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 642 - struct rtw89_pci_rx_info *rx_info; 643 - struct rtw89_pci_rpp_fmt *rpp; 620 + const struct rtw89_pci_info *info = rtwdev->pci_info; 644 621 struct rtw89_rx_desc_info desc_info = {}; 622 + struct rtw89_pci_rx_info *rx_info; 645 623 struct sk_buff *skb; 646 - u32 cnt = 0; 647 - u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); 624 + void *rpp; 648 625 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 626 + u32 rpp_size = info->rpp_fmt_size; 627 + u32 cnt = 0; 649 628 u32 skb_idx; 650 629 u32 offset; 651 630 int ret; ··· 672 649 /* first segment has RX desc */ 673 650 offset = desc_info.offset + desc_info.rxd_len; 674 651 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { 675 - rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); 652 + rpp = skb->data + offset; 676 653 rtw89_pci_release_rpp(rtwdev, rpp); 677 654 } 678 655 ··· 717 694 u32 cnt; 718 695 int work_done; 719 696 720 - rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 697 + rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ]; 721 698 722 699 spin_lock_bh(&rtwpci->trx_lock); 723 700 ··· 747 724 int i; 748 725 749 726 for (i = 0; i < RTW89_RXCH_NUM; i++) { 750 - rx_ring = &rtwpci->rx_rings[i]; 727 + rx_ring = &rtwpci->rx.rings[i]; 751 728 bd_ring = &rx_ring->bd_ring; 752 729 753 730 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); ··· 820 797 } 821 798 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2); 822 799 800 + void rtw89_pci_recognize_intrs_v3(struct rtw89_dev *rtwdev, 801 + struct rtw89_pci *rtwpci, 802 + struct rtw89_pci_isrs *isrs) 803 + { 804 + isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs; 805 + isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ? 806 + rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0; 807 + isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1]; 808 + 809 + /* isrs[0] is not used, so borrow to store RDU status to share common 810 + * flow in rtw89_pci_interrupt_threadfn(). 811 + */ 812 + isrs->isrs[0] = isrs->isrs[1] & (B_BE_PCIE_RDU_CH1_INT | 813 + B_BE_PCIE_RDU_CH0_INT); 814 + 815 + if (isrs->halt_c2h_isrs) 816 + rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs); 817 + if (isrs->isrs[1]) 818 + rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]); 819 + rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs); 820 + } 821 + EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v3); 822 + 823 823 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 824 824 { 825 825 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); ··· 890 844 } 891 845 EXPORT_SYMBOL(rtw89_pci_disable_intr_v2); 892 846 847 + void rtw89_pci_enable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 848 + { 849 + rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs); 850 + rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]); 851 + rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs); 852 + } 853 + EXPORT_SYMBOL(rtw89_pci_enable_intr_v3); 854 + 855 + void rtw89_pci_disable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 856 + { 857 + rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0); 858 + rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0); 859 + } 860 + EXPORT_SYMBOL(rtw89_pci_disable_intr_v3); 861 + 893 862 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) 894 863 { 895 864 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; ··· 946 885 struct rtw89_dev *rtwdev = dev; 947 886 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 948 887 const struct rtw89_pci_info *info = rtwdev->pci_info; 949 - const struct rtw89_pci_gen_def *gen_def = info->gen_def; 888 + const struct rtw89_pci_isr_def *isr_def = info->isr_def; 950 889 struct rtw89_pci_isrs isrs; 951 890 unsigned long flags; 952 891 ··· 954 893 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs); 955 894 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 956 895 957 - if (unlikely(isrs.isrs[0] & gen_def->isr_rdu)) 896 + if (unlikely(isrs.isrs[0] & isr_def->isr_rdu)) 958 897 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); 959 898 960 - if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_halt_c2h)) 899 + if (unlikely(isrs.halt_c2h_isrs & isr_def->isr_halt_c2h)) 961 900 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); 962 901 963 - if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_wdt_timeout)) 902 + if (unlikely(isrs.halt_c2h_isrs & isr_def->isr_wdt_timeout)) 964 903 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT); 965 904 966 905 if (unlikely(rtwpci->under_recovery)) ··· 1011 950 return irqret; 1012 951 } 1013 952 953 + #define DEF_TXCHADDRS_TYPE3(gen, ch_idx, txch, v...) \ 954 + [RTW89_TXCH_##ch_idx] = { \ 955 + .num = R_##gen##_##txch##_TXBD_CFG, \ 956 + .idx = R_##gen##_##txch##_TXBD_IDX ##v, \ 957 + .bdram = 0, \ 958 + .desa_l = 0, \ 959 + .desa_h = 0, \ 960 + } 961 + 962 + #define DEF_TXCHADDRS_TYPE3_GRP_BASE(gen, ch_idx, txch, grp, v...) \ 963 + [RTW89_TXCH_##ch_idx] = { \ 964 + .num = R_##gen##_##txch##_TXBD_CFG, \ 965 + .idx = R_##gen##_##txch##_TXBD_IDX ##v, \ 966 + .bdram = 0, \ 967 + .desa_l = R_##gen##_##grp##_TXBD_DESA_L, \ 968 + .desa_h = R_##gen##_##grp##_TXBD_DESA_H, \ 969 + } 970 + 1014 971 #define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \ 1015 972 [RTW89_TXCH_##ch_idx] = { \ 1016 973 .num = R_##gen##_##txch##_TXBD_NUM ##v, \ ··· 1054 975 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 1055 976 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 1056 977 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 978 + } 979 + 980 + #define DEF_RXCHADDRS_TYPE3(gen, ch_idx, rxch, v...) \ 981 + [RTW89_RXCH_##ch_idx] = { \ 982 + .num = R_##gen##_RX_##rxch##_RXBD_CONFIG, \ 983 + .idx = R_##gen##_##ch_idx##0_RXBD_IDX ##v, \ 984 + .desa_l = 0, \ 985 + .desa_h = 0, \ 986 + } 987 + 988 + #define DEF_RXCHADDRS_TYPE3_GRP_BASE(gen, ch_idx, rxch, grp, v...) \ 989 + [RTW89_RXCH_##ch_idx] = { \ 990 + .num = R_##gen##_RX_##rxch##_RXBD_CONFIG, \ 991 + .idx = R_##gen##_##ch_idx##0_RXBD_IDX ##v, \ 992 + .desa_l = R_##gen##_##grp##_RXBD_DESA_L, \ 993 + .desa_h = R_##gen##_##grp##_RXBD_DESA_H, \ 1057 994 } 1058 995 1059 996 #define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \ ··· 1149 1054 }; 1150 1055 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be); 1151 1056 1057 + const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be_v1 = { 1058 + .tx = { 1059 + DEF_TXCHADDRS_TYPE3_GRP_BASE(BE, ACH0, CH0, ACQ, _V1), 1060 + /* no CH1 */ 1061 + DEF_TXCHADDRS_TYPE3(BE, ACH2, CH2, _V1), 1062 + /* no CH3 */ 1063 + DEF_TXCHADDRS_TYPE3(BE, ACH4, CH4, _V1), 1064 + /* no CH5 */ 1065 + DEF_TXCHADDRS_TYPE3(BE, ACH6, CH6, _V1), 1066 + /* no CH7 */ 1067 + DEF_TXCHADDRS_TYPE3_GRP_BASE(BE, CH8, CH8, NACQ, _V1), 1068 + /* no CH9 */ 1069 + DEF_TXCHADDRS_TYPE3(BE, CH10, CH10, _V1), 1070 + /* no CH11 */ 1071 + DEF_TXCHADDRS_TYPE3(BE, CH12, CH12, _V1), 1072 + }, 1073 + .rx = { 1074 + DEF_RXCHADDRS_TYPE3_GRP_BASE(BE, RXQ, CH0, HOST0, _V1), 1075 + DEF_RXCHADDRS_TYPE3(BE, RPQ, CH1, _V1), 1076 + }, 1077 + }; 1078 + EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be_v1); 1079 + 1080 + #undef DEF_TXCHADDRS_TYPE3 1081 + #undef DEF_TXCHADDRS_TYPE3_GRP_BASE 1082 + #undef DEF_TXCHADDRS_TYPE2 1152 1083 #undef DEF_TXCHADDRS_TYPE1 1153 1084 #undef DEF_TXCHADDRS 1085 + #undef DEF_RXCHADDRS_TYPE3 1086 + #undef DEF_RXCHADDRS_TYPE3_GRP_BASE 1154 1087 #undef DEF_RXCHADDRS 1155 1088 1156 1089 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev, ··· 1224 1101 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) 1225 1102 { 1226 1103 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1227 - struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 1104 + struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[RTW89_TXCH_CH12]; 1228 1105 u32 cnt; 1229 1106 1230 1107 spin_lock_bh(&rtwpci->trx_lock); ··· 1240 1117 u8 txch) 1241 1118 { 1242 1119 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1243 - struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1120 + struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch]; 1244 1121 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 1245 1122 u32 cnt; 1246 1123 ··· 1257 1134 u8 txch) 1258 1135 { 1259 1136 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1260 - struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1137 + struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch]; 1261 1138 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 1262 1139 const struct rtw89_chip_info *chip = rtwdev->chip; 1263 1140 u32 bd_cnt, wd_cnt, min_cnt = 0; ··· 1265 1142 enum rtw89_debug_mask debug_mask; 1266 1143 u32 cnt; 1267 1144 1268 - rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 1145 + rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ]; 1269 1146 1270 1147 spin_lock_bh(&rtwpci->trx_lock); 1271 1148 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); ··· 1350 1227 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 1351 1228 { 1352 1229 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1353 - struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1230 + struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch]; 1354 1231 1355 1232 if (rtwdev->hci.paused) { 1356 1233 set_bit(txch, rtwpci->kick_map); ··· 1370 1247 if (!test_and_clear_bit(txch, rtwpci->kick_map)) 1371 1248 continue; 1372 1249 1373 - tx_ring = &rtwpci->tx_rings[txch]; 1250 + tx_ring = &rtwpci->tx.rings[txch]; 1374 1251 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1375 1252 } 1376 1253 } ··· 1378 1255 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) 1379 1256 { 1380 1257 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1381 - struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1258 + struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch]; 1382 1259 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1383 1260 u32 cur_idx, cur_rp; 1384 1261 u8 i; ··· 1494 1371 struct pci_dev *pdev = rtwpci->pdev; 1495 1372 struct sk_buff *skb = tx_req->skb; 1496 1373 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1497 - struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 1498 1374 bool en_wd_info = desc_info->en_wd_info; 1499 1375 u32 txwd_len; 1500 1376 u32 txwp_len; ··· 1509 1387 } 1510 1388 1511 1389 tx_data->dma = dma; 1512 - rcu_assign_pointer(skb_data->wait, NULL); 1513 1390 1514 1391 txwp_len = sizeof(*txwp_info); 1515 1392 txwd_len = chip->txwd_body_size; ··· 1642 1521 return -EINVAL; 1643 1522 } 1644 1523 1645 - tx_ring = &rtwpci->tx_rings[txch]; 1524 + tx_ring = &rtwpci->tx.rings[txch]; 1646 1525 spin_lock_bh(&rtwpci->trx_lock); 1647 1526 1648 1527 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); ··· 1728 1607 } 1729 1608 } 1730 1609 1610 + static u16 rtw89_pci_enc_bd_cfg(struct rtw89_dev *rtwdev, u16 bd_num, 1611 + u32 dma_offset) 1612 + { 1613 + u16 dma_offset_sel; 1614 + u16 num_sel; 1615 + 1616 + /* B_BE_TX_NUM_SEL_MASK, B_BE_RX_NUM_SEL_MASK: 1617 + * 0 -> 0 1618 + * 1 -> 64 = 2^6 1619 + * 2 -> 128 = 2^7 1620 + * ... 1621 + * 7 -> 4096 = 2^12 1622 + */ 1623 + num_sel = ilog2(bd_num) - 5; 1624 + 1625 + if (hweight16(bd_num) != 1) 1626 + rtw89_warn(rtwdev, "bd_num %u is not power of 2\n", bd_num); 1627 + 1628 + /* B_BE_TX_START_OFFSET_MASK, B_BE_RX_START_OFFSET_MASK: 1629 + * 0 -> 0 = 0 * 2^9 1630 + * 1 -> 512 = 1 * 2^9 1631 + * 2 -> 1024 = 2 * 2^9 1632 + * 3 -> 1536 = 3 * 2^9 1633 + * ... 1634 + * 255 -> 130560 = 255 * 2^9 1635 + */ 1636 + dma_offset_sel = dma_offset >> 9; 1637 + 1638 + if (dma_offset % 512) 1639 + rtw89_warn(rtwdev, "offset %u is not multiple of 512\n", dma_offset); 1640 + 1641 + return u16_encode_bits(num_sel, B_BE_TX_NUM_SEL_MASK) | 1642 + u16_encode_bits(dma_offset_sel, B_BE_TX_START_OFFSET_MASK); 1643 + } 1644 + 1731 1645 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) 1732 1646 { 1733 1647 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; ··· 1772 1616 struct rtw89_pci_rx_ring *rx_ring; 1773 1617 struct rtw89_pci_dma_ring *bd_ring; 1774 1618 const struct rtw89_pci_bd_ram *bd_ram; 1619 + dma_addr_t group_dma_base = 0; 1620 + u16 num_or_offset; 1621 + u32 addr_desa_l; 1622 + u32 addr_bdram; 1775 1623 u32 addr_num; 1776 1624 u32 addr_idx; 1777 - u32 addr_bdram; 1778 - u32 addr_desa_l; 1779 1625 u32 val32; 1780 1626 int i; 1781 1627 ··· 1785 1627 if (info->tx_dma_ch_mask & BIT(i)) 1786 1628 continue; 1787 1629 1788 - tx_ring = &rtwpci->tx_rings[i]; 1630 + tx_ring = &rtwpci->tx.rings[i]; 1789 1631 bd_ring = &tx_ring->bd_ring; 1790 1632 bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL; 1791 1633 addr_num = bd_ring->addr.num; ··· 1794 1636 bd_ring->wp = 0; 1795 1637 bd_ring->rp = 0; 1796 1638 1797 - rtw89_write16(rtwdev, addr_num, bd_ring->len); 1639 + if (info->group_bd_addr) { 1640 + if (addr_desa_l) 1641 + group_dma_base = bd_ring->dma; 1642 + 1643 + num_or_offset = 1644 + rtw89_pci_enc_bd_cfg(rtwdev, bd_ring->len, 1645 + bd_ring->dma - group_dma_base); 1646 + } else { 1647 + num_or_offset = bd_ring->len; 1648 + } 1649 + rtw89_write16(rtwdev, addr_num, num_or_offset); 1650 + 1798 1651 if (addr_bdram && bd_ram) { 1799 1652 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | 1800 1653 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | ··· 1813 1644 1814 1645 rtw89_write32(rtwdev, addr_bdram, val32); 1815 1646 } 1816 - rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1817 - rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); 1647 + if (addr_desa_l) { 1648 + rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1649 + rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); 1650 + } 1818 1651 } 1819 1652 1820 1653 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1821 - rx_ring = &rtwpci->rx_rings[i]; 1654 + rx_ring = &rtwpci->rx.rings[i]; 1822 1655 bd_ring = &rx_ring->bd_ring; 1823 1656 addr_num = bd_ring->addr.num; 1824 1657 addr_idx = bd_ring->addr.idx; ··· 1834 1663 rx_ring->diliver_desc.ready = false; 1835 1664 rx_ring->target_rx_tag = 0; 1836 1665 1837 - rtw89_write16(rtwdev, addr_num, bd_ring->len); 1838 - rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1839 - rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); 1666 + if (info->group_bd_addr) { 1667 + if (addr_desa_l) 1668 + group_dma_base = bd_ring->dma; 1669 + 1670 + num_or_offset = 1671 + rtw89_pci_enc_bd_cfg(rtwdev, bd_ring->len, 1672 + bd_ring->dma - group_dma_base); 1673 + } else { 1674 + num_or_offset = bd_ring->len; 1675 + } 1676 + rtw89_write16(rtwdev, addr_num, num_or_offset); 1677 + 1678 + if (addr_desa_l) { 1679 + rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1680 + rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); 1681 + } 1840 1682 1841 1683 if (info->rx_ring_eq_is_full) 1842 1684 rtw89_write16(rtwdev, addr_idx, bd_ring->wp); ··· 1882 1698 skb_queue_len(&rtwpci->h2c_queue), true); 1883 1699 continue; 1884 1700 } 1885 - rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]); 1701 + rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx.rings[txch]); 1886 1702 } 1887 1703 spin_unlock_bh(&rtwpci->trx_lock); 1888 1704 } ··· 1958 1774 return; 1959 1775 1960 1776 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1961 - tx_ring = &rtwpci->tx_rings[i]; 1777 + tx_ring = &rtwpci->tx.rings[i]; 1962 1778 tx_ring->bd_ring.addr.idx = low_power ? 1963 1779 bd_idx_addr->tx_bd_addrs[i] : 1964 1780 dma_addr_set->tx[i].idx; 1965 1781 } 1966 1782 1967 1783 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1968 - rx_ring = &rtwpci->rx_rings[i]; 1784 + rx_ring = &rtwpci->rx.rings[i]; 1969 1785 rx_ring->bd_ring.addr.idx = low_power ? 1970 1786 bd_idx_addr->rx_bd_addrs[i] : 1971 1787 dma_addr_set->rx[i].idx; ··· 2909 2725 2910 2726 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) 2911 2727 { 2912 - u32 ret; 2728 + int ret; 2913 2729 2914 2730 ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev); 2915 2731 if (ret) { ··· 3395 3211 struct pci_dev *pdev, 3396 3212 struct rtw89_pci_tx_ring *tx_ring) 3397 3213 { 3398 - int ring_sz; 3399 - u8 *head; 3400 - dma_addr_t dma; 3401 - 3402 - head = tx_ring->bd_ring.head; 3403 - dma = tx_ring->bd_ring.dma; 3404 - ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; 3405 - dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3406 - 3407 3214 tx_ring->bd_ring.head = NULL; 3408 3215 } 3409 3216 ··· 3402 3227 struct pci_dev *pdev) 3403 3228 { 3404 3229 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3230 + struct rtw89_pci_dma_pool *bd_pool = &rtwpci->tx.bd_pool; 3405 3231 const struct rtw89_pci_info *info = rtwdev->pci_info; 3406 3232 struct rtw89_pci_tx_ring *tx_ring; 3407 3233 int i; ··· 3410 3234 for (i = 0; i < RTW89_TXCH_NUM; i++) { 3411 3235 if (info->tx_dma_ch_mask & BIT(i)) 3412 3236 continue; 3413 - tx_ring = &rtwpci->tx_rings[i]; 3237 + tx_ring = &rtwpci->tx.rings[i]; 3414 3238 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 3415 3239 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 3416 3240 } 3241 + 3242 + dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma); 3417 3243 } 3418 3244 3419 3245 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, ··· 3426 3248 struct sk_buff *skb; 3427 3249 dma_addr_t dma; 3428 3250 u32 buf_sz; 3429 - u8 *head; 3430 - int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; 3431 3251 int i; 3432 3252 3433 3253 buf_sz = rx_ring->buf_sz; ··· 3441 3265 rx_ring->buf[i] = NULL; 3442 3266 } 3443 3267 3444 - head = rx_ring->bd_ring.head; 3445 - dma = rx_ring->bd_ring.dma; 3446 - dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3447 - 3448 3268 rx_ring->bd_ring.head = NULL; 3449 3269 } 3450 3270 ··· 3448 3276 struct pci_dev *pdev) 3449 3277 { 3450 3278 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3279 + struct rtw89_pci_dma_pool *bd_pool = &rtwpci->rx.bd_pool; 3451 3280 struct rtw89_pci_rx_ring *rx_ring; 3452 3281 int i; 3453 3282 3454 3283 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3455 - rx_ring = &rtwpci->rx_rings[i]; 3284 + rx_ring = &rtwpci->rx.rings[i]; 3456 3285 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3457 3286 } 3287 + 3288 + dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma); 3458 3289 } 3459 3290 3460 3291 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, ··· 3549 3374 struct pci_dev *pdev, 3550 3375 struct rtw89_pci_tx_ring *tx_ring, 3551 3376 u32 desc_size, u32 len, 3552 - enum rtw89_tx_channel txch) 3377 + enum rtw89_tx_channel txch, 3378 + void *head, dma_addr_t dma) 3553 3379 { 3554 3380 const struct rtw89_pci_ch_dma_addr *txch_addr; 3555 - int ring_sz = desc_size * len; 3556 - u8 *head; 3557 - dma_addr_t dma; 3558 3381 int ret; 3559 3382 3560 3383 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); ··· 3564 3391 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr); 3565 3392 if (ret) { 3566 3393 rtw89_err(rtwdev, "failed to get address of txch %d", txch); 3567 - goto err_free_wd_ring; 3568 - } 3569 - 3570 - head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3571 - if (!head) { 3572 - ret = -ENOMEM; 3573 3394 goto err_free_wd_ring; 3574 3395 } 3575 3396 ··· 3589 3422 struct pci_dev *pdev) 3590 3423 { 3591 3424 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3425 + struct rtw89_pci_dma_pool *bd_pool = &rtwpci->tx.bd_pool; 3592 3426 const struct rtw89_pci_info *info = rtwdev->pci_info; 3593 3427 struct rtw89_pci_tx_ring *tx_ring; 3594 - u32 desc_size; 3595 - u32 len; 3596 3428 u32 i, tx_allocated; 3429 + dma_addr_t dma; 3430 + u32 desc_size; 3431 + u32 ring_sz; 3432 + u32 pool_sz; 3433 + u32 ch_num; 3434 + void *head; 3435 + u32 len; 3597 3436 int ret; 3437 + 3438 + BUILD_BUG_ON(RTW89_PCI_TXBD_NUM_MAX % 16); 3439 + 3440 + desc_size = sizeof(struct rtw89_pci_tx_bd_32); 3441 + len = RTW89_PCI_TXBD_NUM_MAX; 3442 + ch_num = RTW89_TXCH_NUM - hweight32(info->tx_dma_ch_mask); 3443 + ring_sz = desc_size * len; 3444 + pool_sz = ring_sz * ch_num; 3445 + 3446 + head = dma_alloc_coherent(&pdev->dev, pool_sz, &dma, GFP_KERNEL); 3447 + if (!head) 3448 + return -ENOMEM; 3449 + 3450 + bd_pool->head = head; 3451 + bd_pool->dma = dma; 3452 + bd_pool->size = pool_sz; 3598 3453 3599 3454 for (i = 0; i < RTW89_TXCH_NUM; i++) { 3600 3455 if (info->tx_dma_ch_mask & BIT(i)) 3601 3456 continue; 3602 - tx_ring = &rtwpci->tx_rings[i]; 3603 - desc_size = sizeof(struct rtw89_pci_tx_bd_32); 3604 - len = RTW89_PCI_TXBD_NUM_MAX; 3457 + tx_ring = &rtwpci->tx.rings[i]; 3605 3458 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, 3606 - desc_size, len, i); 3459 + desc_size, len, i, head, dma); 3607 3460 if (ret) { 3608 3461 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); 3609 3462 goto err_free; 3610 3463 } 3464 + 3465 + head += ring_sz; 3466 + dma += ring_sz; 3611 3467 } 3612 3468 3613 3469 return 0; ··· 3638 3448 err_free: 3639 3449 tx_allocated = i; 3640 3450 for (i = 0; i < tx_allocated; i++) { 3641 - tx_ring = &rtwpci->tx_rings[i]; 3451 + tx_ring = &rtwpci->tx.rings[i]; 3642 3452 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 3643 3453 } 3454 + 3455 + dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma); 3644 3456 3645 3457 return ret; 3646 3458 } ··· 3650 3458 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, 3651 3459 struct pci_dev *pdev, 3652 3460 struct rtw89_pci_rx_ring *rx_ring, 3653 - u32 desc_size, u32 len, u32 rxch) 3461 + u32 desc_size, u32 len, u32 rxch, 3462 + void *head, dma_addr_t dma) 3654 3463 { 3655 3464 const struct rtw89_pci_info *info = rtwdev->pci_info; 3656 3465 const struct rtw89_pci_ch_dma_addr *rxch_addr; 3657 3466 struct sk_buff *skb; 3658 - u8 *head; 3659 - dma_addr_t dma; 3660 - int ring_sz = desc_size * len; 3661 3467 int buf_sz = RTW89_PCI_RX_BUF_SIZE; 3662 3468 int i, allocated; 3663 3469 int ret; ··· 3664 3474 if (ret) { 3665 3475 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); 3666 3476 return ret; 3667 - } 3668 - 3669 - head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3670 - if (!head) { 3671 - ret = -ENOMEM; 3672 - goto err; 3673 3477 } 3674 3478 3675 3479 rx_ring->bd_ring.head = head; ··· 3714 3530 rx_ring->buf[i] = NULL; 3715 3531 } 3716 3532 3717 - head = rx_ring->bd_ring.head; 3718 - dma = rx_ring->bd_ring.dma; 3719 - dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3720 - 3721 3533 rx_ring->bd_ring.head = NULL; 3722 - err: 3534 + 3723 3535 return ret; 3724 3536 } 3725 3537 ··· 3723 3543 struct pci_dev *pdev) 3724 3544 { 3725 3545 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3546 + struct rtw89_pci_dma_pool *bd_pool = &rtwpci->rx.bd_pool; 3726 3547 struct rtw89_pci_rx_ring *rx_ring; 3727 - u32 desc_size; 3728 - u32 len; 3729 3548 int i, rx_allocated; 3549 + dma_addr_t dma; 3550 + u32 desc_size; 3551 + u32 ring_sz; 3552 + u32 pool_sz; 3553 + void *head; 3554 + u32 len; 3730 3555 int ret; 3731 3556 3557 + desc_size = sizeof(struct rtw89_pci_rx_bd_32); 3558 + len = RTW89_PCI_RXBD_NUM_MAX; 3559 + ring_sz = desc_size * len; 3560 + pool_sz = ring_sz * RTW89_RXCH_NUM; 3561 + 3562 + head = dma_alloc_coherent(&pdev->dev, pool_sz, &dma, GFP_KERNEL); 3563 + if (!head) 3564 + return -ENOMEM; 3565 + 3566 + bd_pool->head = head; 3567 + bd_pool->dma = dma; 3568 + bd_pool->size = pool_sz; 3569 + 3732 3570 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3733 - rx_ring = &rtwpci->rx_rings[i]; 3734 - desc_size = sizeof(struct rtw89_pci_rx_bd_32); 3735 - len = RTW89_PCI_RXBD_NUM_MAX; 3571 + rx_ring = &rtwpci->rx.rings[i]; 3572 + 3736 3573 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, 3737 - desc_size, len, i); 3574 + desc_size, len, i, 3575 + head, dma); 3738 3576 if (ret) { 3739 3577 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); 3740 3578 goto err_free; 3741 3579 } 3580 + 3581 + head += ring_sz; 3582 + dma += ring_sz; 3742 3583 } 3743 3584 3744 3585 return 0; ··· 3767 3566 err_free: 3768 3567 rx_allocated = i; 3769 3568 for (i = 0; i < rx_allocated; i++) { 3770 - rx_ring = &rtwpci->rx_rings[i]; 3569 + rx_ring = &rtwpci->rx.rings[i]; 3771 3570 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3772 3571 } 3572 + 3573 + dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma); 3773 3574 3774 3575 return ret; 3775 3576 } ··· 3978 3775 rtw89_pci_default_intr_mask_v2(rtwdev); 3979 3776 } 3980 3777 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2); 3778 + 3779 + static void rtw89_pci_recovery_intr_mask_v3(struct rtw89_dev *rtwdev) 3780 + { 3781 + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3782 + 3783 + rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0; 3784 + rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3785 + rtwpci->intrs[0] = 0; 3786 + rtwpci->intrs[1] = 0; 3787 + } 3788 + 3789 + static void rtw89_pci_default_intr_mask_v3(struct rtw89_dev *rtwdev) 3790 + { 3791 + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3792 + 3793 + rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0; 3794 + rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3795 + rtwpci->intrs[0] = 0; 3796 + rtwpci->intrs[1] = B_BE_PCIE_RDU_CH1_IMR | 3797 + B_BE_PCIE_RDU_CH0_IMR | 3798 + B_BE_PCIE_RX_RX0P2_IMR0_V1 | 3799 + B_BE_PCIE_RX_RPQ0_IMR0_V1; 3800 + } 3801 + 3802 + void rtw89_pci_config_intr_mask_v3(struct rtw89_dev *rtwdev) 3803 + { 3804 + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3805 + 3806 + if (rtwpci->under_recovery) 3807 + rtw89_pci_recovery_intr_mask_v3(rtwdev); 3808 + else 3809 + rtw89_pci_default_intr_mask_v3(rtwdev); 3810 + } 3811 + EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v3); 3981 3812 3982 3813 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, 3983 3814 struct pci_dev *pdev) ··· 4395 4158 4396 4159 static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev) 4397 4160 { 4398 - u32 ret; 4161 + int ret; 4399 4162 4400 4163 if (rtwdev->chip->chip_id == RTL8852C) 4401 4164 return 0; ··· 4409 4172 return ret; 4410 4173 4411 4174 rtw89_pci_ctrl_dma_all(rtwdev, true); 4412 - return ret; 4175 + return 0; 4413 4176 } 4414 4177 4415 4178 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, ··· 4465 4228 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); 4466 4229 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4467 4230 const struct rtw89_pci_info *info = rtwdev->pci_info; 4468 - const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4231 + const struct rtw89_pci_isr_def *isr_def = info->isr_def; 4469 4232 unsigned long flags; 4470 4233 int work_done; 4471 4234 4472 4235 rtwdev->napi_budget_countdown = budget; 4473 4236 4474 - rtw89_write32(rtwdev, gen_def->isr_clear_rpq.addr, gen_def->isr_clear_rpq.data); 4237 + rtw89_write32(rtwdev, isr_def->isr_clear_rpq.addr, isr_def->isr_clear_rpq.data); 4475 4238 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 4476 4239 if (work_done == budget) 4477 4240 return budget; 4478 4241 4479 - rtw89_write32(rtwdev, gen_def->isr_clear_rxq.addr, gen_def->isr_clear_rxq.data); 4242 + rtw89_write32(rtwdev, isr_def->isr_clear_rxq.addr, isr_def->isr_clear_rxq.data); 4480 4243 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 4481 4244 if (work_done < budget && napi_complete_done(napi, work_done)) { 4482 4245 spin_lock_irqsave(&rtwpci->irq_lock, flags); ··· 4631 4394 }; 4632 4395 EXPORT_SYMBOL(rtw89_pci_err_handler); 4633 4396 4634 - const struct rtw89_pci_gen_def rtw89_pci_gen_ax = { 4397 + const struct rtw89_pci_isr_def rtw89_pci_isr_ax = { 4635 4398 .isr_rdu = B_AX_RDU_INT, 4636 4399 .isr_halt_c2h = B_AX_HALT_C2H_INT_EN, 4637 4400 .isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN, 4638 4401 .isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT}, 4639 4402 .isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | 4640 4403 B_AX_RDU_INT}, 4404 + }; 4405 + EXPORT_SYMBOL(rtw89_pci_isr_ax); 4641 4406 4407 + const struct rtw89_pci_gen_def rtw89_pci_gen_ax = { 4642 4408 .mac_pre_init = rtw89_pci_ops_mac_pre_init_ax, 4643 4409 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax, 4644 4410 .mac_post_init = rtw89_pci_ops_mac_post_init_ax,
+120 -8
drivers/net/wireless/realtek/rtw89/pci.h
··· 372 372 #define B_BE_HS0ISR_IND_INT BIT(0) 373 373 374 374 #define R_BE_PCIE_DMA_IMR_0_V1 0x30B8 375 + #define B_BE_PCIE_RDU_CH7_IMR BIT(31) 376 + #define B_BE_PCIE_RDU_CH6_IMR BIT(30) 377 + #define B_BE_PCIE_RDU_CH5_IMR BIT(29) 378 + #define B_BE_PCIE_RDU_CH4_IMR BIT(28) 379 + #define B_BE_PCIE_RDU_CH3_IMR BIT(27) 380 + #define B_BE_PCIE_RDU_CH2_IMR BIT(26) 381 + #define B_BE_PCIE_RDU_CH1_IMR BIT(25) 382 + #define B_BE_PCIE_RDU_CH0_IMR BIT(24) 375 383 #define B_BE_PCIE_RX_RX1P1_IMR0_V1 BIT(23) 376 384 #define B_BE_PCIE_RX_RX0P1_IMR0_V1 BIT(22) 377 385 #define B_BE_PCIE_RX_ROQ1_IMR0_V1 BIT(21) ··· 405 397 #define B_BE_PCIE_TX_CH0_IMR0 BIT(0) 406 398 407 399 #define R_BE_PCIE_DMA_ISR 0x30BC 400 + #define B_BE_PCIE_RDU_CH7_INT BIT(31) 401 + #define B_BE_PCIE_RDU_CH6_INT BIT(30) 402 + #define B_BE_PCIE_RDU_CH5_INT BIT(29) 403 + #define B_BE_PCIE_RDU_CH4_INT BIT(28) 404 + #define B_BE_PCIE_RDU_CH3_INT BIT(27) 405 + #define B_BE_PCIE_RDU_CH2_INT BIT(26) 406 + #define B_BE_PCIE_RDU_CH1_INT BIT(25) 407 + #define B_BE_PCIE_RDU_CH0_INT BIT(24) 408 408 #define B_BE_PCIE_RX_RX1P1_ISR_V1 BIT(23) 409 409 #define B_BE_PCIE_RX_RX0P1_ISR_V1 BIT(22) 410 410 #define B_BE_PCIE_RX_ROQ1_ISR_V1 BIT(21) ··· 442 426 #define B_BE_RDU_CH4_INT_IMR_V1 BIT(29) 443 427 #define B_BE_RDU_CH3_INT_IMR_V1 BIT(28) 444 428 #define B_BE_RDU_CH2_INT_IMR_V1 BIT(27) 429 + #define B_BE_RDU_CH1_INT_EN_V2 BIT(27) 445 430 #define B_BE_RDU_CH1_INT_IMR_V1 BIT(26) 431 + #define B_BE_RDU_CH0_INT_EN_V2 BIT(26) 446 432 #define B_BE_RDU_CH0_INT_IMR_V1 BIT(25) 433 + #define B_BE_RXDMA_STUCK_INT_EN_V2 BIT(25) 447 434 #define B_BE_RXDMA_STUCK_INT_EN_V1 BIT(24) 435 + #define B_BE_TXDMA_STUCK_INT_EN_V2 BIT(24) 448 436 #define B_BE_TXDMA_STUCK_INT_EN_V1 BIT(23) 449 437 #define B_BE_TXDMA_CH14_INT_EN_V1 BIT(22) 450 438 #define B_BE_TXDMA_CH13_INT_EN_V1 BIT(21) ··· 479 459 #define B_BE_RDU_CH4_INT_V1 BIT(29) 480 460 #define B_BE_RDU_CH3_INT_V1 BIT(28) 481 461 #define B_BE_RDU_CH2_INT_V1 BIT(27) 462 + #define B_BE_RDU_CH1_INT_V2 BIT(27) 482 463 #define B_BE_RDU_CH1_INT_V1 BIT(26) 464 + #define B_BE_RDU_CH0_INT_V2 BIT(26) 483 465 #define B_BE_RDU_CH0_INT_V1 BIT(25) 466 + #define B_BE_RXDMA_STUCK_INT_V2 BIT(25) 484 467 #define B_BE_RXDMA_STUCK_INT_V1 BIT(24) 468 + #define B_BE_TXDMA_STUCK_INT_V2 BIT(24) 485 469 #define B_BE_TXDMA_STUCK_INT_V1 BIT(23) 486 470 #define B_BE_TXDMA_CH14_INT_V1 BIT(22) 487 471 #define B_BE_TXDMA_CH13_INT_V1 BIT(21) ··· 808 784 #define R_BE_CH13_TXBD_NUM_V1 0xB04C 809 785 #define R_BE_CH14_TXBD_NUM_V1 0xB04E 810 786 787 + #define R_BE_CH0_TXBD_CFG 0xB030 788 + #define R_BE_CH2_TXBD_CFG 0xB034 789 + #define R_BE_CH4_TXBD_CFG 0xB038 790 + #define R_BE_CH6_TXBD_CFG 0xB03C 791 + #define R_BE_CH8_TXBD_CFG 0xB040 792 + #define R_BE_CH10_TXBD_CFG 0xB044 793 + #define R_BE_CH12_TXBD_CFG 0xB048 794 + #define B_BE_TX_FLAG BIT(14) 795 + #define B_BE_TX_START_OFFSET_MASK GENMASK(12, 4) 796 + #define B_BE_TX_NUM_SEL_MASK GENMASK(2, 0) 797 + 811 798 #define R_BE_RXQ0_RXBD_NUM_V1 0xB050 812 799 #define R_BE_RPQ0_RXBD_NUM_V1 0xB052 800 + 801 + #define R_BE_RX_CH0_RXBD_CONFIG 0xB050 802 + #define R_BE_RX_CH1_RXBD_CONFIG 0xB052 803 + #define B_BE_RX_START_OFFSET_MASK GENMASK(11, 4) 804 + #define B_BE_RX_NUM_SEL_MASK GENMASK(2, 0) 813 805 814 806 #define R_BE_CH0_TXBD_IDX_V1 0xB100 815 807 #define R_BE_CH1_TXBD_IDX_V1 0xB104 ··· 877 837 #define R_BE_CH14_TXBD_DESA_L_V1 0xB270 878 838 #define R_BE_CH14_TXBD_DESA_H_V1 0xB274 879 839 840 + #define R_BE_ACQ_TXBD_DESA_L 0xB200 841 + #define B_BE_TX_ACQ_DESA_L_MASK GENMASK(31, 3) 842 + #define R_BE_ACQ_TXBD_DESA_H 0xB204 843 + #define B_BE_TX_ACQ_DESA_H_MASK GENMASK(7, 0) 844 + #define R_BE_NACQ_TXBD_DESA_L 0xB240 845 + #define B_BE_TX_NACQ_DESA_L_MASK GENMASK(31, 3) 846 + #define R_BE_NACQ_TXBD_DESA_H 0xB244 847 + #define B_BE_TX_NACQ_DESA_H_MASK GENMASK(7, 0) 848 + 880 849 #define R_BE_RXQ0_RXBD_DESA_L_V1 0xB300 881 850 #define R_BE_RXQ0_RXBD_DESA_H_V1 0xB304 882 851 #define R_BE_RPQ0_RXBD_DESA_L_V1 0xB308 883 852 #define R_BE_RPQ0_RXBD_DESA_H_V1 0xB30C 853 + 854 + #define R_BE_HOST0_RXBD_DESA_L 0xB300 855 + #define B_BE_RX_HOST0_DESA_L_MASK GENMASK(31, 3) 856 + #define R_BE_HOST0_RXBD_DESA_H 0xB304 857 + #define B_BE_RX_HOST0_DESA_H_MASK GENMASK(7, 0) 884 858 885 859 #define R_BE_WP_ADDR_H_SEL0_3_V1 0xB420 886 860 #define R_BE_WP_ADDR_H_SEL4_7_V1 0xB424 ··· 1303 1249 }; 1304 1250 1305 1251 struct rtw89_pci_ch_dma_addr { 1306 - u32 num; 1252 + u32 num; /* also `offset` addr for group_bd_addr design */ 1307 1253 u32 idx; 1308 1254 u32 bdram; 1309 1255 u32 desa_l; ··· 1321 1267 u8 min_num; 1322 1268 }; 1323 1269 1324 - struct rtw89_pci_gen_def { 1270 + struct rtw89_pci_isr_def { 1325 1271 u32 isr_rdu; 1326 1272 u32 isr_halt_c2h; 1327 1273 u32 isr_wdt_timeout; 1328 1274 struct rtw89_reg2_def isr_clear_rpq; 1329 1275 struct rtw89_reg2_def isr_clear_rxq; 1276 + }; 1330 1277 1278 + struct rtw89_pci_gen_def { 1331 1279 int (*mac_pre_init)(struct rtw89_dev *rtwdev); 1332 1280 int (*mac_pre_deinit)(struct rtw89_dev *rtwdev); 1333 1281 int (*mac_post_init)(struct rtw89_dev *rtwdev); ··· 1365 1309 unsigned long bitmap; /* bitmap of rtw89_quirks */ 1366 1310 }; 1367 1311 1312 + struct rtw89_pci_rpp_info { 1313 + u16 seq; 1314 + u8 qsel; 1315 + u8 tx_status; 1316 + u8 txch; 1317 + }; 1318 + 1368 1319 struct rtw89_pci_info { 1369 1320 const struct rtw89_pci_gen_def *gen_def; 1321 + const struct rtw89_pci_isr_def *isr_def; 1370 1322 enum mac_ax_bd_trunc_mode txbd_trunc_mode; 1371 1323 enum mac_ax_bd_trunc_mode rxbd_trunc_mode; 1372 1324 enum mac_ax_rxbd_mode rxbd_mode; ··· 1392 1328 bool rx_ring_eq_is_full; 1393 1329 bool check_rx_tag; 1394 1330 bool no_rxbd_fs; 1331 + bool group_bd_addr; 1332 + u32 rpp_fmt_size; 1395 1333 1396 1334 u32 init_cfg_reg; 1397 1335 u32 txhci_en_bit; ··· 1423 1357 u32 (*fill_txaddr_info)(struct rtw89_dev *rtwdev, 1424 1358 void *txaddr_info_addr, u32 total_len, 1425 1359 dma_addr_t dma, u8 *add_info_nr); 1360 + void (*parse_rpp)(struct rtw89_dev *rtwdev, void *rpp, 1361 + struct rtw89_pci_rpp_info *rpp_info); 1426 1362 void (*config_intr_mask)(struct rtw89_dev *rtwdev); 1427 1363 void (*enable_intr)(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci); 1428 1364 void (*disable_intr)(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci); ··· 1498 1430 __le32 dword; 1499 1431 } __packed; 1500 1432 1433 + #define RTW89_PCI_RPP_W0_MACID_V1_MASK GENMASK(9, 0) 1434 + #define RTW89_PCI_RPP_W0_DMA_CH_MASK GENMASK(13, 10) 1435 + #define RTW89_PCI_RPP_W0_TX_STATUS_V1_MASK GENMASK(16, 14) 1436 + #define RTW89_PCI_RPP_W0_PCIE_SEQ_V1_MASK GENMASK(31, 17) 1437 + #define RTW89_PCI_RPP_W1_QSEL_V1_MASK GENMASK(5, 0) 1438 + #define RTW89_PCI_RPP_W1_TID_IND BIT(6) 1439 + #define RTW89_PCI_RPP_W1_CHANGE_LINK BIT(7) 1440 + 1441 + struct rtw89_pci_rpp_fmt_v1 { 1442 + __le32 w0; 1443 + __le32 w1; 1444 + } __packed; 1445 + 1501 1446 struct rtw89_pci_rx_bd_32 { 1502 1447 __le16 buf_size; 1503 1448 __le16 opt; ··· 1549 1468 u32 rp; /* hw idx */ 1550 1469 }; 1551 1470 1471 + struct rtw89_pci_dma_pool { 1472 + void *head; 1473 + dma_addr_t dma; 1474 + u32 size; 1475 + }; 1476 + 1552 1477 struct rtw89_pci_tx_wd_ring { 1553 1478 void *head; 1554 1479 dma_addr_t dma; ··· 1584 1497 u64 tx_mac_id_drop; 1585 1498 }; 1586 1499 1500 + struct rtw89_pci_tx_rings { 1501 + struct rtw89_pci_tx_ring rings[RTW89_TXCH_NUM]; 1502 + struct rtw89_pci_dma_pool bd_pool; 1503 + }; 1504 + 1587 1505 struct rtw89_pci_rx_ring { 1588 1506 struct rtw89_pci_dma_ring bd_ring; 1589 1507 struct sk_buff *buf[RTW89_PCI_RXBD_NUM_MAX]; ··· 1596 1504 struct sk_buff *diliver_skb; 1597 1505 struct rtw89_rx_desc_info diliver_desc; 1598 1506 u32 target_rx_tag:13; 1507 + }; 1508 + 1509 + struct rtw89_pci_rx_rings { 1510 + struct rtw89_pci_rx_ring rings[RTW89_RXCH_NUM]; 1511 + struct rtw89_pci_dma_pool bd_pool; 1599 1512 }; 1600 1513 1601 1514 struct rtw89_pci_isrs { ··· 1620 1523 bool low_power; 1621 1524 bool under_recovery; 1622 1525 bool enable_dac; 1623 - struct rtw89_pci_tx_ring tx_rings[RTW89_TXCH_NUM]; 1624 - struct rtw89_pci_rx_ring rx_rings[RTW89_RXCH_NUM]; 1526 + struct rtw89_pci_tx_rings tx; 1527 + struct rtw89_pci_rx_rings rx; 1625 1528 struct sk_buff_head h2c_queue; 1626 1529 struct sk_buff_head h2c_release_queue; 1627 1530 DECLARE_BITMAP(kick_map, RTW89_TXCH_NUM); ··· 1634 1537 1635 1538 static inline struct rtw89_pci_rx_info *RTW89_PCI_RX_SKB_CB(struct sk_buff *skb) 1636 1539 { 1637 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1638 - 1639 - BUILD_BUG_ON(sizeof(struct rtw89_pci_tx_data) > 1640 - sizeof(info->status.status_driver_data)); 1540 + BUILD_BUG_ON(sizeof(struct rtw89_pci_rx_info) > sizeof(skb->cb)); 1641 1541 1642 1542 return (struct rtw89_pci_rx_info *)skb->cb; 1643 1543 } ··· 1664 1570 static inline struct rtw89_pci_tx_data *RTW89_PCI_TX_SKB_CB(struct sk_buff *skb) 1665 1571 { 1666 1572 struct rtw89_tx_skb_data *data = RTW89_TX_SKB_CB(skb); 1573 + 1574 + BUILD_BUG_ON(sizeof(struct rtw89_tx_skb_data) + 1575 + sizeof(struct rtw89_pci_tx_data) > 1576 + sizeof_field(struct ieee80211_tx_info, driver_data)); 1667 1577 1668 1578 return (struct rtw89_pci_tx_data *)data->hci_priv; 1669 1579 } ··· 1724 1626 extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set; 1725 1627 extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1; 1726 1628 extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be; 1629 + extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be_v1; 1727 1630 extern const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM]; 1728 1631 extern const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM]; 1632 + extern const struct rtw89_pci_isr_def rtw89_pci_isr_ax; 1633 + extern const struct rtw89_pci_isr_def rtw89_pci_isr_be; 1634 + extern const struct rtw89_pci_isr_def rtw89_pci_isr_be_v1; 1729 1635 extern const struct rtw89_pci_gen_def rtw89_pci_gen_ax; 1730 1636 extern const struct rtw89_pci_gen_def rtw89_pci_gen_be; 1731 1637 ··· 1748 1646 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, 1749 1647 void *txaddr_info_addr, u32 total_len, 1750 1648 dma_addr_t dma, u8 *add_info_nr); 1649 + void rtw89_pci_parse_rpp(struct rtw89_dev *rtwdev, void *_rpp, 1650 + struct rtw89_pci_rpp_info *rpp_info); 1651 + void rtw89_pci_parse_rpp_v1(struct rtw89_dev *rtwdev, void *_rpp, 1652 + struct rtw89_pci_rpp_info *rpp_info); 1751 1653 void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable); 1752 1654 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev); 1753 1655 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev); 1754 1656 void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev); 1657 + void rtw89_pci_config_intr_mask_v3(struct rtw89_dev *rtwdev); 1755 1658 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci); 1756 1659 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci); 1757 1660 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci); 1758 1661 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci); 1759 1662 void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci); 1760 1663 void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci); 1664 + void rtw89_pci_enable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci); 1665 + void rtw89_pci_disable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci); 1761 1666 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, 1762 1667 struct rtw89_pci *rtwpci, 1763 1668 struct rtw89_pci_isrs *isrs); ··· 1772 1663 struct rtw89_pci *rtwpci, 1773 1664 struct rtw89_pci_isrs *isrs); 1774 1665 void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev, 1666 + struct rtw89_pci *rtwpci, 1667 + struct rtw89_pci_isrs *isrs); 1668 + void rtw89_pci_recognize_intrs_v3(struct rtw89_dev *rtwdev, 1775 1669 struct rtw89_pci *rtwpci, 1776 1670 struct rtw89_pci_isrs *isrs); 1777 1671
+15 -3
drivers/net/wireless/realtek/rtw89/pci_be.c
··· 175 175 rtw89_write32(rtwdev, R_BE_RXBD_RWPTR_CLR1_V1, 176 176 B_BE_CLR_RXQ0_IDX | B_BE_CLR_RPQ0_IDX); 177 177 178 - rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; 178 + rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RXQ]; 179 179 rtw89_write16(rtwdev, R_BE_RXQ0_RXBD_IDX_V1, rx_ring->bd_ring.len - 1); 180 180 181 - rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 181 + rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ]; 182 182 rtw89_write16(rtwdev, R_BE_RPQ0_RXBD_IDX_V1, rx_ring->bd_ring.len - 1); 183 183 } 184 184 ··· 665 665 SIMPLE_DEV_PM_OPS(rtw89_pm_ops_be, rtw89_pci_suspend_be, rtw89_pci_resume_be); 666 666 EXPORT_SYMBOL(rtw89_pm_ops_be); 667 667 668 - const struct rtw89_pci_gen_def rtw89_pci_gen_be = { 668 + const struct rtw89_pci_isr_def rtw89_pci_isr_be = { 669 669 .isr_rdu = B_BE_RDU_CH1_INT_V1 | B_BE_RDU_CH0_INT_V1, 670 670 .isr_halt_c2h = B_BE_HALT_C2H_INT, 671 671 .isr_wdt_timeout = B_BE_WDT_TIMEOUT_INT, 672 672 .isr_clear_rpq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RPQ0_ISR_V1}, 673 673 .isr_clear_rxq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RX0P2_ISR_V1}, 674 + }; 675 + EXPORT_SYMBOL(rtw89_pci_isr_be); 674 676 677 + const struct rtw89_pci_isr_def rtw89_pci_isr_be_v1 = { 678 + .isr_rdu = B_BE_PCIE_RDU_CH1_INT | B_BE_PCIE_RDU_CH0_INT, 679 + .isr_halt_c2h = B_BE_HALT_C2H_INT, 680 + .isr_wdt_timeout = B_BE_WDT_TIMEOUT_INT, 681 + .isr_clear_rpq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RPQ0_ISR_V1}, 682 + .isr_clear_rxq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RX0P2_ISR_V1}, 683 + }; 684 + EXPORT_SYMBOL(rtw89_pci_isr_be_v1); 685 + 686 + const struct rtw89_pci_gen_def rtw89_pci_gen_be = { 675 687 .mac_pre_init = rtw89_pci_ops_mac_pre_init_be, 676 688 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_be, 677 689 .mac_post_init = rtw89_pci_ops_mac_post_init_be,
+440 -36
drivers/net/wireless/realtek/rtw89/phy.c
··· 1702 1702 rtw89_phy_bb_reset(rtwdev); 1703 1703 } 1704 1704 1705 + void rtw89_phy_init_bb_afe(struct rtw89_dev *rtwdev) 1706 + { 1707 + struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1708 + const struct rtw89_fw_element_hdr *afe_elm = elm_info->afe; 1709 + const struct rtw89_phy_afe_info *info; 1710 + u32 action, cat, class; 1711 + u32 addr, mask, val; 1712 + u32 poll, rpt; 1713 + u32 n, i; 1714 + 1715 + if (!afe_elm) 1716 + return; 1717 + 1718 + n = le32_to_cpu(afe_elm->size) / sizeof(*info); 1719 + 1720 + for (i = 0; i < n; i++) { 1721 + info = &afe_elm->u.afe.infos[i]; 1722 + 1723 + class = le32_to_cpu(info->class); 1724 + switch (class) { 1725 + case RTW89_FW_AFE_CLASS_P0: 1726 + case RTW89_FW_AFE_CLASS_P1: 1727 + case RTW89_FW_AFE_CLASS_CMN: 1728 + /* Currently support two paths */ 1729 + break; 1730 + case RTW89_FW_AFE_CLASS_P2: 1731 + case RTW89_FW_AFE_CLASS_P3: 1732 + case RTW89_FW_AFE_CLASS_P4: 1733 + default: 1734 + rtw89_warn(rtwdev, "unexpected AFE class %u\n", class); 1735 + continue; 1736 + } 1737 + 1738 + addr = le32_to_cpu(info->addr); 1739 + mask = le32_to_cpu(info->mask); 1740 + val = le32_to_cpu(info->val); 1741 + cat = le32_to_cpu(info->cat); 1742 + action = le32_to_cpu(info->action); 1743 + 1744 + switch (action) { 1745 + case RTW89_FW_AFE_ACTION_WRITE: 1746 + switch (cat) { 1747 + case RTW89_FW_AFE_CAT_MAC: 1748 + case RTW89_FW_AFE_CAT_MAC1: 1749 + rtw89_write32_mask(rtwdev, addr, mask, val); 1750 + break; 1751 + case RTW89_FW_AFE_CAT_AFEDIG: 1752 + case RTW89_FW_AFE_CAT_AFEDIG1: 1753 + rtw89_write32_mask(rtwdev, addr, mask, val); 1754 + break; 1755 + case RTW89_FW_AFE_CAT_BB: 1756 + rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0); 1757 + break; 1758 + case RTW89_FW_AFE_CAT_BB1: 1759 + rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1); 1760 + break; 1761 + default: 1762 + rtw89_warn(rtwdev, 1763 + "unexpected AFE writing action %u\n", action); 1764 + break; 1765 + } 1766 + break; 1767 + case RTW89_FW_AFE_ACTION_POLL: 1768 + for (poll = 0; poll <= 10; poll++) { 1769 + /* 1770 + * For CAT_BB, AFE reads register with mcu_offset 0, 1771 + * so both CAT_MAC and CAT_BB use the same method. 1772 + */ 1773 + rpt = rtw89_read32_mask(rtwdev, addr, mask); 1774 + if (rpt == val) 1775 + goto poll_done; 1776 + 1777 + fsleep(1); 1778 + } 1779 + rtw89_warn(rtwdev, "failed to poll AFE cat=%u addr=0x%x mask=0x%x\n", 1780 + cat, addr, mask); 1781 + poll_done: 1782 + break; 1783 + case RTW89_FW_AFE_ACTION_DELAY: 1784 + fsleep(addr); 1785 + break; 1786 + } 1787 + } 1788 + } 1789 + 1705 1790 static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev) 1706 1791 { 1707 1792 rtw89_phy_write32(rtwdev, 0x8080, 0x4); ··· 3028 2943 } 3029 2944 3030 2945 if (mode == RTW89_RA_RPT_MODE_LEGACY) { 3031 - valid = rtw89_ra_report_to_bitrate(rtwdev, rate, &legacy_bitrate); 2946 + valid = rtw89_legacy_rate_to_bitrate(rtwdev, rate, &legacy_bitrate); 3032 2947 if (!valid) 3033 2948 return; 3034 2949 } ··· 3171 3086 [RTW89_PHY_C2H_DM_FUNC_MCC_DIG] = NULL, 3172 3087 [RTW89_PHY_C2H_DM_FUNC_FW_SCAN] = rtw89_phy_c2h_fw_scan_rpt, 3173 3088 }; 3089 + 3090 + static 3091 + void rtw89_phy_c2h_rfk_tas_pwr(struct rtw89_dev *rtwdev, 3092 + const struct rtw89_c2h_rf_tas_rpt_log *content) 3093 + { 3094 + const enum rtw89_sar_sources src = rtwdev->sar.src; 3095 + struct rtw89_tas_info *tas = &rtwdev->tas; 3096 + u64 linear = 0; 3097 + u32 i, cur_idx; 3098 + s16 txpwr; 3099 + 3100 + if (!tas->enable || src == RTW89_SAR_SOURCE_NONE) 3101 + return; 3102 + 3103 + cur_idx = le32_to_cpu(content->cur_idx); 3104 + for (i = 0; i < cur_idx; i++) { 3105 + txpwr = le16_to_cpu(content->txpwr_history[i]); 3106 + linear += rtw89_db_quarter_to_linear(txpwr); 3107 + 3108 + rtw89_debug(rtwdev, RTW89_DBG_SAR, 3109 + "tas: index: %u, txpwr: %d\n", i, txpwr); 3110 + } 3111 + 3112 + if (cur_idx == 0) 3113 + tas->instant_txpwr = rtw89_db_to_linear(0); 3114 + else 3115 + tas->instant_txpwr = DIV_ROUND_DOWN_ULL(linear, cur_idx); 3116 + } 3174 3117 3175 3118 static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev, 3176 3119 enum rtw89_phy_c2h_rfk_log_func func, ··· 3451 3338 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[1] = %*ph\n", 3452 3339 (int)sizeof(txgapk->power_d[1]), txgapk->power_d[1]); 3453 3340 return; 3341 + case RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR: 3342 + if (len != sizeof(struct rtw89_c2h_rf_tas_rpt_log)) 3343 + goto out; 3344 + 3345 + rtw89_phy_c2h_rfk_tas_pwr(rtwdev, content); 3346 + 3347 + return; 3454 3348 default: 3455 3349 break; 3456 3350 } ··· 3509 3389 u16 content_len; 3510 3390 u16 chunk_len; 3511 3391 bool handled; 3512 - 3513 - if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK)) 3514 - return; 3515 3392 3516 3393 log_ptr += sizeof(*c2h_hdr); 3517 3394 len -= sizeof(*c2h_hdr); ··· 3586 3469 RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK, "TXGAPK"); 3587 3470 } 3588 3471 3472 + static void 3473 + rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3474 + { 3475 + rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3476 + RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR, "TAS"); 3477 + } 3478 + 3589 3479 static 3590 3480 void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev, 3591 3481 struct sk_buff *c2h, u32 len) = { ··· 3602 3478 [RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK] = rtw89_phy_c2h_rfk_log_rxdck, 3603 3479 [RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI] = rtw89_phy_c2h_rfk_log_tssi, 3604 3480 [RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk, 3481 + [RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr, 3605 3482 }; 3606 3483 3607 3484 static ··· 3665 3540 } 3666 3541 3667 3542 static void 3668 - rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3543 + rtw89_phy_c2h_rfk_report_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3669 3544 { 3670 - const struct rtw89_c2h_rf_tas_info *rf_tas = 3545 + const struct rtw89_c2h_rf_tas_info *report = 3671 3546 (const struct rtw89_c2h_rf_tas_info *)c2h->data; 3672 - const enum rtw89_sar_sources src = rtwdev->sar.src; 3673 - struct rtw89_tas_info *tas = &rtwdev->tas; 3674 - u64 linear = 0; 3675 - u32 i, cur_idx; 3676 - s16 txpwr; 3677 3547 3678 - if (!tas->enable || src == RTW89_SAR_SOURCE_NONE) 3679 - return; 3680 - 3681 - cur_idx = le32_to_cpu(rf_tas->cur_idx); 3682 - for (i = 0; i < cur_idx; i++) { 3683 - txpwr = (s16)le16_to_cpu(rf_tas->txpwr_history[i]); 3684 - linear += rtw89_db_quarter_to_linear(txpwr); 3685 - 3686 - rtw89_debug(rtwdev, RTW89_DBG_SAR, 3687 - "tas: index: %u, txpwr: %d\n", i, txpwr); 3688 - } 3689 - 3690 - if (cur_idx == 0) 3691 - tas->instant_txpwr = rtw89_db_to_linear(0); 3692 - else 3693 - tas->instant_txpwr = DIV_ROUND_DOWN_ULL(linear, cur_idx); 3548 + rtw89_phy_c2h_rfk_tas_pwr(rtwdev, &report->content); 3694 3549 } 3695 3550 3696 3551 static 3697 3552 void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev, 3698 3553 struct sk_buff *c2h, u32 len) = { 3699 3554 [RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state, 3700 - [RTW89_PHY_C2H_RFK_LOG_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr, 3555 + [RTW89_PHY_C2H_RFK_REPORT_FUNC_TAS_PWR] = rtw89_phy_c2h_rfk_report_tas_pwr, 3701 3556 }; 3702 3557 3703 3558 bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func) ··· 3731 3626 handler = rtw89_phy_c2h_dm_handler[func]; 3732 3627 break; 3733 3628 default: 3734 - rtw89_info(rtwdev, "PHY c2h class %d not support\n", class); 3735 - return; 3629 + break; 3736 3630 } 3737 3631 if (!handler) { 3738 - rtw89_info(rtwdev, "PHY c2h class %d func %d not support\n", class, 3739 - func); 3632 + rtw89_info_once(rtwdev, "PHY c2h class %d func %d not support\n", 3633 + class, func); 3740 3634 return; 3741 3635 } 3742 3636 handler(rtwdev, skb, len); ··· 5601 5497 i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]); 5602 5498 } 5603 5499 5500 + static void __rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev, 5501 + struct rtw89_bb_ctx *bb) 5502 + { 5503 + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5504 + struct rtw89_env_monitor_info *env = &bb->env_monitor; 5505 + const struct rtw89_ccx_regs *ccx = phy->ccx; 5506 + 5507 + env->nhm_include_cca = false; 5508 + env->nhm_mntr_time = 0; 5509 + env->nhm_sum = 0; 5510 + 5511 + rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_config, ccx->nhm_en_mask, bb->phy_idx); 5512 + rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_method, ccx->nhm_pwr_method_msk, 5513 + bb->phy_idx); 5514 + } 5515 + 5516 + void rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev) 5517 + { 5518 + const struct rtw89_chip_info *chip = rtwdev->chip; 5519 + struct rtw89_bb_ctx *bb; 5520 + 5521 + if (!chip->support_noise) 5522 + return; 5523 + 5524 + rtw89_for_each_active_bb(rtwdev, bb) 5525 + __rtw89_phy_nhm_setting_init(rtwdev, bb); 5526 + } 5527 + 5604 5528 static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev, 5605 5529 struct rtw89_bb_ctx *bb) 5606 5530 { ··· 5690 5558 } 5691 5559 5692 5560 static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev, 5693 - struct rtw89_bb_ctx *bb) 5561 + struct rtw89_bb_ctx *bb, u8 sel) 5694 5562 { 5695 5563 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5696 5564 struct rtw89_env_monitor_info *env = &bb->env_monitor; ··· 5700 5568 bb->phy_idx); 5701 5569 rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0, 5702 5570 bb->phy_idx); 5571 + if (sel & RTW89_PHY_ENV_MON_NHM) 5572 + rtw89_phy_write32_idx_clr(rtwdev, ccx->nhm_config, 5573 + ccx->nhm_en_mask, bb->phy_idx); 5574 + 5703 5575 rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1, 5704 5576 bb->phy_idx); 5705 5577 rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1, 5706 5578 bb->phy_idx); 5579 + if (sel & RTW89_PHY_ENV_MON_NHM) 5580 + rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_config, 5581 + ccx->nhm_en_mask, bb->phy_idx); 5707 5582 5708 5583 env->ccx_ongoing = true; 5709 5584 } ··· 5779 5640 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "T%d:[%d, %d, %d]\n", 5780 5641 i + 1, env->ifs_clm_his[i], env->ifs_clm_ifs_avg[i], 5781 5642 env->ifs_clm_cca_avg[i]); 5643 + } 5644 + 5645 + static u8 rtw89_nhm_weighted_avg(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb) 5646 + { 5647 + struct rtw89_env_monitor_info *env = &bb->env_monitor; 5648 + u8 nhm_weight[RTW89_NHM_RPT_NUM]; 5649 + u32 nhm_weighted_sum = 0; 5650 + u8 weight_zero; 5651 + u8 i; 5652 + 5653 + if (env->nhm_sum == 0) 5654 + return 0; 5655 + 5656 + weight_zero = clamp_t(u16, env->nhm_th[0] - RTW89_NHM_WEIGHT_OFFSET, 0, U8_MAX); 5657 + 5658 + for (i = 0; i < RTW89_NHM_RPT_NUM; i++) { 5659 + if (i == 0) 5660 + nhm_weight[i] = weight_zero; 5661 + else if (i == (RTW89_NHM_RPT_NUM - 1)) 5662 + nhm_weight[i] = env->nhm_th[i - 1] + RTW89_NHM_WEIGHT_OFFSET; 5663 + else 5664 + nhm_weight[i] = (env->nhm_th[i - 1] + env->nhm_th[i]) / 2; 5665 + } 5666 + 5667 + if (rtwdev->chip->chip_id == RTL8852A || rtwdev->chip->chip_id == RTL8852B || 5668 + rtwdev->chip->chip_id == RTL8852C) { 5669 + if (env->nhm_th[RTW89_NHM_TH_NUM - 1] == RTW89_NHM_WA_TH) { 5670 + nhm_weight[RTW89_NHM_RPT_NUM - 1] = 5671 + env->nhm_th[RTW89_NHM_TH_NUM - 2] + 5672 + RTW89_NHM_WEIGHT_OFFSET; 5673 + nhm_weight[RTW89_NHM_RPT_NUM - 2] = 5674 + nhm_weight[RTW89_NHM_RPT_NUM - 1]; 5675 + } 5676 + 5677 + env->nhm_result[0] += env->nhm_result[RTW89_NHM_RPT_NUM - 1]; 5678 + env->nhm_result[RTW89_NHM_RPT_NUM - 1] = 0; 5679 + } 5680 + 5681 + for (i = 0; i < RTW89_NHM_RPT_NUM; i++) 5682 + nhm_weighted_sum += env->nhm_result[i] * nhm_weight[i]; 5683 + 5684 + return (nhm_weighted_sum / env->nhm_sum) >> RTW89_NHM_TH_FACTOR; 5685 + } 5686 + 5687 + static void __rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev, 5688 + struct rtw89_bb_ctx *bb, enum rtw89_band hw_band, 5689 + u16 ch_hw_value) 5690 + { 5691 + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5692 + struct rtw89_env_monitor_info *env = &bb->env_monitor; 5693 + const struct rtw89_chip_info *chip = rtwdev->chip; 5694 + const struct rtw89_ccx_regs *ccx = phy->ccx; 5695 + struct ieee80211_supported_band *sband; 5696 + const struct rtw89_reg_def *nhm_rpt; 5697 + enum nl80211_band band; 5698 + u32 sum = 0; 5699 + u8 chan_idx; 5700 + u8 nhm_pwr; 5701 + u8 i; 5702 + 5703 + if (!rtw89_phy_read32_idx(rtwdev, ccx->nhm, ccx->nhm_ready, bb->phy_idx)) { 5704 + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "[NHM] Get NHM report Fail\n"); 5705 + return; 5706 + } 5707 + 5708 + for (i = 0; i < RTW89_NHM_RPT_NUM; i++) { 5709 + nhm_rpt = &(*chip->nhm_report)[i]; 5710 + 5711 + env->nhm_result[i] = 5712 + rtw89_phy_read32_idx(rtwdev, nhm_rpt->addr, 5713 + nhm_rpt->mask, bb->phy_idx); 5714 + sum += env->nhm_result[i]; 5715 + } 5716 + env->nhm_sum = sum; 5717 + nhm_pwr = rtw89_nhm_weighted_avg(rtwdev, bb); 5718 + 5719 + if (!ch_hw_value) 5720 + return; 5721 + 5722 + band = rtw89_hw_to_nl80211_band(hw_band); 5723 + sband = rtwdev->hw->wiphy->bands[band]; 5724 + if (!sband) 5725 + return; 5726 + 5727 + for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) { 5728 + struct ieee80211_channel *channel; 5729 + struct rtw89_nhm_report *rpt; 5730 + struct list_head *nhm_list; 5731 + 5732 + channel = &sband->channels[chan_idx]; 5733 + if (channel->hw_value != ch_hw_value) 5734 + continue; 5735 + 5736 + rpt = &env->nhm_his[hw_band][chan_idx]; 5737 + nhm_list = &env->nhm_rpt_list; 5738 + 5739 + rpt->channel = channel; 5740 + rpt->noise = nhm_pwr; 5741 + 5742 + if (list_empty(&rpt->list)) 5743 + list_add_tail(&rpt->list, nhm_list); 5744 + 5745 + return; 5746 + } 5747 + 5748 + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "[NHM] channel not found\n"); 5749 + } 5750 + 5751 + void rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev, enum rtw89_band hw_band, 5752 + u16 ch_hw_value) 5753 + { 5754 + const struct rtw89_chip_info *chip = rtwdev->chip; 5755 + struct rtw89_bb_ctx *bb; 5756 + 5757 + if (!chip->support_noise) 5758 + return; 5759 + 5760 + rtw89_for_each_active_bb(rtwdev, bb) 5761 + __rtw89_phy_nhm_get_result(rtwdev, bb, hw_band, ch_hw_value); 5782 5762 } 5783 5763 5784 5764 static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev, ··· 6000 5742 return true; 6001 5743 } 6002 5744 5745 + static void rtw89_phy_nhm_th_update(struct rtw89_dev *rtwdev, 5746 + struct rtw89_bb_ctx *bb) 5747 + { 5748 + struct rtw89_env_monitor_info *env = &bb->env_monitor; 5749 + static const u8 nhm_th_11k[RTW89_NHM_RPT_NUM] = { 5750 + 18, 21, 24, 27, 30, 35, 40, 45, 50, 55, 60, 0 5751 + }; 5752 + const struct rtw89_chip_info *chip = rtwdev->chip; 5753 + const struct rtw89_reg_def *nhm_th; 5754 + u8 i; 5755 + 5756 + for (i = 0; i < RTW89_NHM_RPT_NUM; i++) 5757 + env->nhm_th[i] = nhm_th_11k[i] << RTW89_NHM_TH_FACTOR; 5758 + 5759 + if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || 5760 + chip->chip_id == RTL8852C) 5761 + env->nhm_th[RTW89_NHM_TH_NUM - 1] = RTW89_NHM_WA_TH; 5762 + 5763 + for (i = 0; i < RTW89_NHM_TH_NUM; i++) { 5764 + nhm_th = &(*chip->nhm_th)[i]; 5765 + 5766 + rtw89_phy_write32_idx(rtwdev, nhm_th->addr, nhm_th->mask, 5767 + env->nhm_th[i], bb->phy_idx); 5768 + } 5769 + } 5770 + 5771 + static int rtw89_phy_nhm_set(struct rtw89_dev *rtwdev, 5772 + struct rtw89_bb_ctx *bb, 5773 + struct rtw89_ccx_para_info *para) 5774 + { 5775 + const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5776 + struct rtw89_env_monitor_info *env = &bb->env_monitor; 5777 + const struct rtw89_ccx_regs *ccx = phy->ccx; 5778 + u32 unit_idx = 0; 5779 + u32 period = 0; 5780 + 5781 + if (para->mntr_time == 0) { 5782 + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5783 + "[NHM] MNTR_TIME is 0\n"); 5784 + return -EINVAL; 5785 + } 5786 + 5787 + if (rtw89_phy_ccx_racing_ctrl(rtwdev, bb, para->rac_lv)) 5788 + return -EINVAL; 5789 + 5790 + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5791 + "[NHM]nhm_incld_cca=%d, mntr_time=%d ms\n", 5792 + para->nhm_incld_cca, para->mntr_time); 5793 + 5794 + if (para->mntr_time != env->nhm_mntr_time) { 5795 + rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time, 5796 + &period, &unit_idx); 5797 + rtw89_phy_write32_idx(rtwdev, ccx->nhm_config, 5798 + ccx->nhm_period_mask, period, bb->phy_idx); 5799 + rtw89_phy_write32_idx(rtwdev, ccx->nhm_config, 5800 + ccx->nhm_unit_mask, period, bb->phy_idx); 5801 + 5802 + env->nhm_mntr_time = para->mntr_time; 5803 + env->ccx_period = period; 5804 + env->ccx_unit_idx = unit_idx; 5805 + } 5806 + 5807 + if (para->nhm_incld_cca != env->nhm_include_cca) { 5808 + rtw89_phy_write32_idx(rtwdev, ccx->nhm_config, 5809 + ccx->nhm_include_cca_mask, para->nhm_incld_cca, 5810 + bb->phy_idx); 5811 + 5812 + env->nhm_include_cca = para->nhm_incld_cca; 5813 + } 5814 + 5815 + rtw89_phy_nhm_th_update(rtwdev, bb); 5816 + 5817 + return 0; 5818 + } 5819 + 5820 + static void __rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb) 5821 + { 5822 + struct rtw89_ccx_para_info para = { 5823 + .mntr_time = RTW89_NHM_MNTR_TIME, 5824 + .rac_lv = RTW89_RAC_LV_1, 5825 + .nhm_incld_cca = true, 5826 + }; 5827 + 5828 + rtw89_phy_ccx_racing_release(rtwdev, bb); 5829 + 5830 + rtw89_phy_nhm_set(rtwdev, bb, &para); 5831 + rtw89_phy_ccx_trigger(rtwdev, bb, RTW89_PHY_ENV_MON_NHM); 5832 + } 5833 + 5834 + void rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev) 5835 + { 5836 + const struct rtw89_chip_info *chip = rtwdev->chip; 5837 + struct rtw89_bb_ctx *bb; 5838 + 5839 + if (!chip->support_noise) 5840 + return; 5841 + 5842 + rtw89_for_each_active_bb(rtwdev, bb) 5843 + __rtw89_phy_nhm_trigger(rtwdev, bb); 5844 + } 5845 + 6003 5846 static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev, 6004 5847 struct rtw89_bb_ctx *bb, 6005 5848 struct rtw89_ccx_para_info *para) ··· 6175 5816 if (rtw89_phy_ifs_clm_set(rtwdev, bb, &para) == 0) 6176 5817 chk_result |= RTW89_PHY_ENV_MON_IFS_CLM; 6177 5818 if (chk_result) 6178 - rtw89_phy_ccx_trigger(rtwdev, bb); 5819 + rtw89_phy_ccx_trigger(rtwdev, bb, chk_result); 6179 5820 6180 5821 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 6181 5822 "get_result=0x%x, chk_result:0x%x\n", ··· 6289 5930 val |= BIT(RTW89_PHYSTS_IE13_DL_MU_DEF) | 6290 5931 BIT(RTW89_PHYSTS_IE01_CMN_OFDM); 6291 5932 } else if (i >= RTW89_CCK_PKT) { 6292 - val |= BIT(RTW89_PHYSTS_IE09_FTR_0); 6293 - 6294 5933 val &= ~(GENMASK(RTW89_PHYSTS_IE07_CMN_EXT_PATH_D, 6295 5934 RTW89_PHYSTS_IE04_CMN_EXT_PATH_A)); 6296 5935 ··· 7267 6910 rtw89_chip_bb_sethw(rtwdev); 7268 6911 7269 6912 rtw89_phy_env_monitor_init(rtwdev); 6913 + rtw89_phy_nhm_setting_init(rtwdev); 7270 6914 rtw89_physts_parsing_init(rtwdev); 7271 6915 rtw89_phy_dig_init(rtwdev); 7272 6916 rtw89_phy_cfo_init(rtwdev); ··· 7291 6933 { 7292 6934 rtw89_phy_env_monitor_init(rtwdev); 7293 6935 rtw89_physts_parsing_init(rtwdev); 6936 + } 6937 + 6938 + static void __rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb) 6939 + { 6940 + struct rtw89_env_monitor_info *env = &bb->env_monitor; 6941 + const struct rtw89_chip_info *chip = rtwdev->chip; 6942 + struct ieee80211_supported_band *sband; 6943 + enum rtw89_band hw_band; 6944 + enum nl80211_band band; 6945 + u8 idx; 6946 + 6947 + if (!chip->support_noise) 6948 + return; 6949 + 6950 + for (band = 0; band < NUM_NL80211_BANDS; band++) { 6951 + sband = rtwdev->hw->wiphy->bands[band]; 6952 + if (!sband) 6953 + continue; 6954 + 6955 + hw_band = rtw89_nl80211_to_hw_band(band); 6956 + env->nhm_his[hw_band] = 6957 + devm_kcalloc(rtwdev->dev, sband->n_channels, 6958 + sizeof(*env->nhm_his[0]), GFP_KERNEL); 6959 + 6960 + for (idx = 0; idx < sband->n_channels; idx++) 6961 + INIT_LIST_HEAD(&env->nhm_his[hw_band][idx].list); 6962 + 6963 + INIT_LIST_HEAD(&env->nhm_rpt_list); 6964 + } 6965 + } 6966 + 6967 + void rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev) 6968 + { 6969 + struct rtw89_bb_ctx *bb; 6970 + 6971 + rtw89_for_each_capab_bb(rtwdev, bb) 6972 + __rtw89_phy_dm_init_data(rtwdev, bb); 7294 6973 } 7295 6974 7296 6975 void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, ··· 7985 7590 .ifs_total_addr = R_IFSCNT, 7986 7591 .ifs_cnt_done_mask = B_IFSCNT_DONE_MSK, 7987 7592 .ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK, 7593 + .nhm = R_NHM_AX, 7594 + .nhm_ready = B_NHM_READY_MSK, 7595 + .nhm_config = R_NHM_CFG, 7596 + .nhm_period_mask = B_NHM_PERIOD_MSK, 7597 + .nhm_unit_mask = B_NHM_COUNTER_MSK, 7598 + .nhm_include_cca_mask = B_NHM_INCLUDE_CCA_MSK, 7599 + .nhm_en_mask = B_NHM_EN_MSK, 7600 + .nhm_method = R_NHM_TH9, 7601 + .nhm_pwr_method_msk = B_NHM_PWDB_METHOD_MSK, 7988 7602 }; 7989 7603 7990 7604 static const struct rtw89_physts_regs rtw89_physts_regs_ax = {
+23 -1
drivers/net/wireless/realtek/rtw89/phy.h
··· 149 149 RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK = 3, 150 150 RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI = 4, 151 151 RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK = 5, 152 + RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR = 9, 152 153 153 154 RTW89_PHY_C2H_RFK_LOG_FUNC_NUM, 154 155 }; 155 156 156 157 enum rtw89_phy_c2h_rfk_report_func { 157 158 RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE = 0, 158 - RTW89_PHY_C2H_RFK_LOG_TAS_PWR = 6, 159 + RTW89_PHY_C2H_RFK_REPORT_FUNC_TAS_PWR = 6, 159 160 }; 160 161 161 162 enum rtw89_phy_c2h_dm_func { ··· 188 187 RTW89_PHY_ENV_MON_IFS_CLM = BIT(3), 189 188 RTW89_PHY_ENV_MON_EDCCA_CLM = BIT(4), 190 189 }; 190 + 191 + #define RTW89_NHM_WEIGHT_OFFSET 2 192 + #define RTW89_NHM_WA_TH (109 << 1) 193 + #define RTW89_NOISE_DEFAULT -96 194 + #define RTW89_NHM_MNTR_TIME 40 195 + #define RTW89_NHM_TH_FACTOR 1 191 196 192 197 #define CCX_US_BASE_RATIO 4 193 198 enum rtw89_ccx_unit { ··· 435 428 u32 ifs_total_addr; 436 429 u32 ifs_cnt_done_mask; 437 430 u32 ifs_total_mask; 431 + u32 nhm; 432 + u32 nhm_ready; 433 + u32 nhm_config; 434 + u32 nhm_period_mask; 435 + u32 nhm_unit_mask; 436 + u32 nhm_include_cca_mask; 437 + u32 nhm_en_mask; 438 + u32 nhm_method; 439 + u32 nhm_pwr_method_msk; 438 440 }; 439 441 440 442 struct rtw89_physts_regs { ··· 830 814 bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 831 815 u32 addr, u32 mask, u32 data); 832 816 void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev); 817 + void rtw89_phy_init_bb_afe(struct rtw89_dev *rtwdev); 833 818 void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio); 834 819 void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev, 835 820 const struct rtw89_reg2_def *reg, ··· 838 821 void *extra_data); 839 822 void rtw89_phy_dm_init(struct rtw89_dev *rtwdev); 840 823 void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev); 824 + void rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev); 841 825 void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask, 842 826 u32 data, enum rtw89_phy_idx phy_idx); 843 827 void rtw89_phy_write32_idx_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits, ··· 1056 1038 u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev, 1057 1039 const struct rtw89_rfk_chan_desc *desc, u8 desc_nr, 1058 1040 const struct rtw89_chan *target_chan); 1041 + void rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev); 1042 + void rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev, enum rtw89_band hw_band, 1043 + u16 ch_hw_value); 1044 + void rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev); 1059 1045 1060 1046 #endif
+9
drivers/net/wireless/realtek/rtw89/phy_be.c
··· 63 63 .ifs_total_addr = R_IFSCNT_V1, 64 64 .ifs_cnt_done_mask = B_IFSCNT_DONE_MSK, 65 65 .ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK, 66 + .nhm = R_NHM_BE, 67 + .nhm_ready = B_NHM_READY_BE_MSK, 68 + .nhm_config = R_NHM_CFG, 69 + .nhm_period_mask = B_NHM_PERIOD_MSK, 70 + .nhm_unit_mask = B_NHM_COUNTER_MSK, 71 + .nhm_include_cca_mask = B_NHM_INCLUDE_CCA_MSK, 72 + .nhm_en_mask = B_NHM_EN_MSK, 73 + .nhm_method = R_NHM_TH9, 74 + .nhm_pwr_method_msk = B_NHM_PWDB_METHOD_MSK, 66 75 }; 67 76 68 77 static const struct rtw89_physts_regs rtw89_physts_regs_be = {
+3
drivers/net/wireless/realtek/rtw89/ps.c
··· 119 119 120 120 rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_FW_CTRL); 121 121 rtw89_fw_h2c_lps_parm(rtwdev, &lps_param); 122 + 123 + if (RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw)) 124 + rtw89_fw_h2c_pwr_lvl(rtwdev, rtwvif_link); 122 125 } 123 126 124 127 static void __rtw89_leave_lps(struct rtw89_dev *rtwdev,
+56
drivers/net/wireless/realtek/rtw89/reg.h
··· 3370 3370 #define B_AX_CSIPRT_HESU_AID_EN BIT(25) 3371 3371 #define B_AX_CSIPRT_VHTSU_AID_EN BIT(24) 3372 3372 3373 + #define R_AX_BCN_PSR_RPT_P0 0xCE84 3374 + #define R_AX_BCN_PSR_RPT_P0_C1 0xEE84 3375 + #define B_AX_BCAID_P0_MASK GENMASK(10, 0) 3376 + 3373 3377 #define R_AX_RX_STATE_MONITOR 0xCEF0 3374 3378 #define R_AX_RX_STATE_MONITOR_C1 0xEEF0 3375 3379 #define B_AX_RX_STATE_MONITOR_MASK GENMASK(31, 0) ··· 6262 6258 #define B_BE_PTCL_TOP_ERR_IND BIT(1) 6263 6259 #define B_BE_SCHEDULE_TOP_ERR_IND BIT(0) 6264 6260 6261 + #define R_BE_CMAC_FW_TRIGGER_IDCT_ISR 0x10168 6262 + #define R_BE_CMAC_FW_TRIGGER_IDCT_ISR_C1 0x14168 6263 + #define B_BE_CMAC_FW_ERR_IDCT_IMR BIT(31) 6264 + #define B_BE_CMAC_FW_TRIG_IDCT BIT(0) 6265 + 6265 6266 #define R_BE_SER_L0_DBG_CNT 0x10170 6266 6267 #define R_BE_SER_L0_DBG_CNT_C1 0x14170 6267 6268 #define B_BE_SER_L0_PHYINTF_CNT_MASK GENMASK(31, 24) ··· 7503 7494 #define R_BE_DRV_INFO_OPTION_C1 0x15470 7504 7495 #define B_BE_DRV_INFO_PHYRPT_EN BIT(0) 7505 7496 7497 + #define R_BE_BCN_PSR_RPT_P0 0x11484 7498 + #define R_BE_BCN_PSR_RPT_P0_C1 0x15484 7499 + #define B_BE_BCAID_P0_MASK GENMASK(10, 0) 7500 + 7506 7501 #define R_BE_RX_ERR_ISR 0x114F4 7507 7502 #define R_BE_RX_ERR_ISR_C1 0x154F4 7508 7503 #define B_BE_RX_ERR_TRIG_ACT_TO BIT(9) ··· 8105 8092 #define B_MEASUREMENT_TRIG_MSK BIT(2) 8106 8093 #define B_CCX_TRIG_OPT_MSK BIT(1) 8107 8094 #define B_CCX_EN_MSK BIT(0) 8095 + #define R_NHM_CFG 0x0C08 8096 + #define B_NHM_PERIOD_MSK GENMASK(15, 0) 8097 + #define B_NHM_COUNTER_MSK GENMASK(17, 16) 8098 + #define B_NHM_EN_MSK BIT(18) 8099 + #define B_NHM_INCLUDE_CCA_MSK BIT(19) 8100 + #define B_NHM_TH0_MSK GENMASK(31, 24) 8101 + #define R_NHM_TH1 0x0C0C 8102 + #define B_NHM_TH1_MSK GENMASK(7, 0) 8103 + #define B_NHM_TH2_MSK GENMASK(15, 8) 8104 + #define B_NHM_TH3_MSK GENMASK(23, 16) 8105 + #define B_NHM_TH4_MSK GENMASK(31, 24) 8106 + #define R_NHM_TH5 0x0C10 8107 + #define B_NHM_TH5_MSK GENMASK(7, 0) 8108 + #define B_NHM_TH6_MSK GENMASK(15, 8) 8109 + #define B_NHM_TH7_MSK GENMASK(23, 16) 8110 + #define B_NHM_TH8_MSK GENMASK(31, 24) 8111 + #define R_NHM_TH9 0x0C14 8112 + #define B_NHM_TH9_MSK GENMASK(7, 0) 8113 + #define B_NHM_TH10_MSK GENMASK(15, 8) 8114 + #define B_NHM_PWDB_METHOD_MSK GENMASK(17, 16) 8108 8115 #define R_FAHM 0x0C1C 8109 8116 #define B_RXTD_CKEN BIT(2) 8110 8117 #define R_IFS_COUNTER 0x0C28 ··· 8194 8161 #define R_BRK_ASYNC_RST_EN_1 0x0DC0 8195 8162 #define R_BRK_ASYNC_RST_EN_2 0x0DC4 8196 8163 #define R_BRK_ASYNC_RST_EN_3 0x0DC8 8164 + #define R_NHM_BE 0x0EA4 8165 + #define B_NHM_READY_BE_MSK BIT(16) 8197 8166 #define R_CTLTOP 0x1008 8198 8167 #define B_CTLTOP_ON BIT(23) 8199 8168 #define B_CTLTOP_VAL GENMASK(15, 12) ··· 8251 8216 #define B_SWSI_R_BUSY_V1 BIT(25) 8252 8217 #define B_SWSI_R_DATA_DONE_V1 BIT(26) 8253 8218 #define R_TX_COUNTER 0x1A40 8219 + #define R_NHM_CNT0 0x1A88 8220 + #define B_NHM_CNT0_MSK GENMASK(15, 0) 8221 + #define B_NHM_CNT1_MSK GENMASK(31, 16) 8222 + #define R_NHM_CNT2 0x1A8C 8223 + #define B_NHM_CNT2_MSK GENMASK(15, 0) 8224 + #define B_NHM_CNT3_MSK GENMASK(31, 16) 8225 + #define R_NHM_CNT4 0x1A90 8226 + #define B_NHM_CNT4_MSK GENMASK(15, 0) 8227 + #define B_NHM_CNT5_MSK GENMASK(31, 16) 8228 + #define R_NHM_CNT6 0x1A94 8229 + #define B_NHM_CNT6_MSK GENMASK(15, 0) 8230 + #define B_NHM_CNT7_MSK GENMASK(31, 16) 8231 + #define R_NHM_CNT8 0x1A98 8232 + #define B_NHM_CNT8_MSK GENMASK(15, 0) 8233 + #define B_NHM_CNT9_MSK GENMASK(31, 16) 8234 + #define R_NHM_CNT10 0x1A9C 8235 + #define B_NHM_CNT10_MSK GENMASK(15, 0) 8236 + #define B_NHM_CNT11_MSK GENMASK(31, 16) 8237 + #define R_NHM_AX 0x1AA4 8238 + #define B_NHM_READY_MSK BIT(16) 8254 8239 #define R_IFS_CLM_TX_CNT 0x1ACC 8255 8240 #define R_IFS_CLM_TX_CNT_V1 0x0ECC 8256 8241 #define B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK GENMASK(31, 16) ··· 9181 9126 #define B_COEF_SEL_MDPD BIT(8) 9182 9127 #define B_COEF_SEL_MDPD_V1 GENMASK(9, 8) 9183 9128 #define B_COEF_SEL_EN BIT(31) 9129 + #define R_CFIR_COEF 0x810c 9184 9130 #define R_CFIR_SYS 0x8120 9185 9131 #define R_IQK_RES 0x8124 9186 9132 #define B_IQK_RES_K BIT(28)
+4
drivers/net/wireless/realtek/rtw89/rtw8851b.c
··· 2537 2537 .query_rxdesc = rtw89_core_query_rxdesc, 2538 2538 .fill_txdesc = rtw89_core_fill_txdesc, 2539 2539 .fill_txdesc_fwcmd = rtw89_core_fill_txdesc, 2540 + .get_ch_dma = rtw89_core_get_ch_dma, 2540 2541 .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path, 2541 2542 .mac_cfg_gnt = rtw89_mac_cfg_gnt, 2542 2543 .stop_sch_tx = rtw89_mac_stop_sch_tx, ··· 2629 2628 .support_ant_gain = false, 2630 2629 .support_tas = false, 2631 2630 .support_sar_by_ant = false, 2631 + .support_noise = false, 2632 2632 .ul_tb_waveform_ctrl = true, 2633 2633 .ul_tb_pwr_diff = false, 2634 2634 .rx_freq_frome_ie = true, ··· 2691 2689 .cfo_hw_comp = true, 2692 2690 .dcfo_comp = &rtw8851b_dcfo_comp, 2693 2691 .dcfo_comp_sft = 12, 2692 + .nhm_report = NULL, 2693 + .nhm_th = NULL, 2694 2694 .imr_info = &rtw8851b_imr_info, 2695 2695 .imr_dmac_table = NULL, 2696 2696 .imr_cmac_table = NULL,
+95 -62
drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
··· 17 17 #define DPK_RF_REG_NUM_8851B 4 18 18 #define DPK_KSET_NUM 4 19 19 #define RTW8851B_RXK_GROUP_NR 4 20 - #define RTW8851B_RXK_GROUP_IDX_NR 2 21 - #define RTW8851B_TXK_GROUP_NR 1 20 + #define RTW8851B_RXK_GROUP_IDX_NR 4 21 + #define RTW8851B_A_TXK_GROUP_NR 2 22 + #define RTW8851B_G_TXK_GROUP_NR 1 22 23 #define RTW8851B_IQK_VER 0x14 23 24 #define RTW8851B_IQK_SS 1 24 25 #define RTW8851B_LOK_GRAM 10 ··· 115 114 static const u32 g_idxrxgain[RTW8851B_RXK_GROUP_NR] = {0x10e, 0x116, 0x28e, 0x296}; 116 115 static const u32 g_idxattc2[RTW8851B_RXK_GROUP_NR] = {0x0, 0xf, 0x0, 0xf}; 117 116 static const u32 g_idxrxagc[RTW8851B_RXK_GROUP_NR] = {0x0, 0x1, 0x2, 0x3}; 118 - static const u32 a_idxrxgain[RTW8851B_RXK_GROUP_IDX_NR] = {0x10C, 0x28c}; 119 - static const u32 a_idxattc2[RTW8851B_RXK_GROUP_IDX_NR] = {0xf, 0xf}; 120 - static const u32 a_idxrxagc[RTW8851B_RXK_GROUP_IDX_NR] = {0x4, 0x6}; 121 - static const u32 a_power_range[RTW8851B_TXK_GROUP_NR] = {0x0}; 122 - static const u32 a_track_range[RTW8851B_TXK_GROUP_NR] = {0x6}; 123 - static const u32 a_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x0a}; 124 - static const u32 a_itqt[RTW8851B_TXK_GROUP_NR] = {0x12}; 125 - static const u32 g_power_range[RTW8851B_TXK_GROUP_NR] = {0x0}; 126 - static const u32 g_track_range[RTW8851B_TXK_GROUP_NR] = {0x6}; 127 - static const u32 g_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x10}; 128 - static const u32 g_itqt[RTW8851B_TXK_GROUP_NR] = {0x12}; 117 + static const u32 a_idxrxgain[RTW8851B_RXK_GROUP_IDX_NR] = {0x10C, 0x112, 0x28c, 0x292}; 118 + static const u32 a_idxattc2[RTW8851B_RXK_GROUP_IDX_NR] = {0xf, 0xf, 0xf, 0xf}; 119 + static const u32 a_idxrxagc[RTW8851B_RXK_GROUP_IDX_NR] = {0x4, 0x5, 0x6, 0x7}; 120 + static const u32 a_power_range[RTW8851B_A_TXK_GROUP_NR] = {0x0, 0x0}; 121 + static const u32 a_track_range[RTW8851B_A_TXK_GROUP_NR] = {0x7, 0x7}; 122 + static const u32 a_gain_bb[RTW8851B_A_TXK_GROUP_NR] = {0x08, 0x0d}; 123 + static const u32 a_itqt[RTW8851B_A_TXK_GROUP_NR] = {0x12, 0x12}; 124 + static const u32 a_att_smxr[RTW8851B_A_TXK_GROUP_NR] = {0x0, 0x2}; 125 + static const u32 g_power_range[RTW8851B_G_TXK_GROUP_NR] = {0x0}; 126 + static const u32 g_track_range[RTW8851B_G_TXK_GROUP_NR] = {0x6}; 127 + static const u32 g_gain_bb[RTW8851B_G_TXK_GROUP_NR] = {0x10}; 128 + static const u32 g_itqt[RTW8851B_G_TXK_GROUP_NR] = {0x12}; 129 129 130 - static const u32 rtw8851b_backup_bb_regs[] = {0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec, 0xc0e8}; 130 + static const u32 rtw8851b_backup_bb_regs[] = { 131 + 0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec, 0xc0e8, 0x12a0, 0xc0f0}; 131 132 static const u32 rtw8851b_backup_rf_regs[] = { 132 133 0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5}; 133 134 ··· 141 138 static const u32 dpk_rf_reg[DPK_RF_REG_NUM_8851B] = {0xde, 0x8f, 0x5, 0x10005}; 142 139 143 140 static void _set_ch(struct rtw89_dev *rtwdev, u32 val); 144 - 145 - static u8 _rxk_5ghz_group_from_idx(u8 idx) 146 - { 147 - /* There are four RXK groups (RTW8851B_RXK_GROUP_NR), but only group 0 148 - * and 2 are used in 5 GHz band, so reduce elements to 2. 149 - */ 150 - if (idx < RTW8851B_RXK_GROUP_IDX_NR) 151 - return idx * 2; 152 - 153 - return 0; 154 - } 155 141 156 142 static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 157 143 { ··· 188 196 static void _rxck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, 189 197 bool force, enum adc_ck ck) 190 198 { 191 - static const u32 ck960_8851b[] = {0x8, 0x2, 0x2, 0x4, 0xf, 0xa, 0x93}; 199 + static const u32 ck960_8851b[] = {0x8, 0x2, 0x2, 0x4, 0xf, 0xa, 0x92}; 192 200 static const u32 ck1920_8851b[] = {0x9, 0x0, 0x0, 0x3, 0xf, 0xa, 0x49}; 193 201 const u32 *data; 194 202 ··· 792 800 "[IQK]============ S%d ID_NBTXK ============\n", path); 793 801 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0); 794 802 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 795 - 0x00b); 803 + 0x11); 796 804 iqk_cmd = 0x408 | (1 << (4 + path)); 797 805 break; 798 806 case ID_NBRXK: ··· 810 818 rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1); 811 819 notready = _iqk_check_cal(rtwdev, path); 812 820 if (iqk_info->iqk_sram_en && 813 - (ktype == ID_NBRXK || ktype == ID_RXK)) 821 + (ktype == ID_NBRXK || ktype == ID_RXK || ktype == ID_NBTXK)) 814 822 _iqk_sram(rtwdev, path); 815 823 816 824 rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0); ··· 897 905 bool kfail = false; 898 906 bool notready; 899 907 u32 rf_0; 900 - u8 idx; 908 + u32 val; 901 909 u8 gp; 902 910 903 911 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 904 912 905 - for (idx = 0; idx < RTW8851B_RXK_GROUP_IDX_NR; idx++) { 906 - gp = _rxk_5ghz_group_from_idx(idx); 913 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000); 914 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4); 915 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x17); 916 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5); 917 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27); 918 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0); 907 919 920 + val = rtw89_read_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20); 921 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_MASK, 0xc); 922 + 923 + for (gp = 0; gp < RTW8851B_RXK_GROUP_IDX_NR; gp++) { 908 924 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp); 909 925 910 - rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[idx]); 911 - rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[idx]); 926 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]); 927 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[gp]); 928 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, 0x1); 912 929 913 930 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 914 931 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0); ··· 927 926 fsleep(100); 928 927 rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); 929 928 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0); 930 - rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[idx]); 929 + rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]); 931 930 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); 932 931 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); 933 932 ··· 960 959 _iqk_sram(rtwdev, path); 961 960 962 961 if (kfail) { 962 + rtw89_phy_write32_mask(rtwdev, R_IQK_RES, B_IQK_RES_RXCFIR, 0x0); 963 963 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 964 964 iqk_info->nb_rxcfir[path] | 0x2); 965 965 iqk_info->is_wb_txiqk[path] = false; ··· 969 967 0x40000000); 970 968 iqk_info->is_wb_txiqk[path] = true; 971 969 } 970 + 971 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, val); 972 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000); 973 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4); 974 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x37); 975 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5); 976 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27); 977 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0); 972 978 973 979 rtw89_debug(rtwdev, RTW89_DBG_RFK, 974 980 "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail, ··· 990 980 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 991 981 bool kfail = false; 992 982 bool notready; 993 - u8 idx = 0x1; 983 + u8 gp = 2; 994 984 u32 rf_0; 995 - u8 gp; 996 - 997 - gp = _rxk_5ghz_group_from_idx(idx); 985 + u32 val; 998 986 999 987 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1000 988 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp); 1001 989 1002 - rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[idx]); 1003 - rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[idx]); 990 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000); 991 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4); 992 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x17); 993 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5); 994 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27); 995 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0); 996 + 997 + val = rtw89_read_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20); 998 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_MASK, 0xc); 999 + 1000 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]); 1001 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[gp]); 1002 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, 0x1); 1004 1003 1005 1004 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1006 1005 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0); ··· 1019 1000 fsleep(100); 1020 1001 rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); 1021 1002 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0); 1022 - rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[idx]); 1003 + rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]); 1023 1004 rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); 1024 1005 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); 1025 1006 ··· 1045 1026 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); 1046 1027 1047 1028 if (kfail) { 1029 + rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), 0xf, 0x0); 1048 1030 rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), 1049 1031 MASKDWORD, 0x40000002); 1050 1032 iqk_info->is_wb_rxiqk[path] = false; 1051 1033 } else { 1052 1034 iqk_info->is_wb_rxiqk[path] = false; 1053 1035 } 1036 + 1037 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, val); 1038 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000); 1039 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4); 1040 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x37); 1041 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5); 1042 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27); 1043 + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0); 1054 1044 1055 1045 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1056 1046 "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail, ··· 1177 1149 static bool _txk_5g_group_sel(struct rtw89_dev *rtwdev, 1178 1150 enum rtw89_phy_idx phy_idx, u8 path) 1179 1151 { 1152 + static const u8 a_idx[RTW8851B_A_TXK_GROUP_NR] = {2, 3}; 1180 1153 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1181 1154 bool kfail = false; 1182 1155 bool notready; ··· 1185 1156 1186 1157 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1187 1158 1188 - for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) { 1159 + rtw89_phy_write32_mask(rtwdev, R_CFIR_COEF, MASKDWORD, 0x33332222); 1160 + 1161 + for (gp = 0x0; gp < RTW8851B_A_TXK_GROUP_NR; gp++) { 1189 1162 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]); 1190 1163 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]); 1191 1164 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]); 1165 + rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, a_att_smxr[gp]); 1192 1166 1193 1167 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1194 1168 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1); 1195 1169 rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0); 1196 - rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp); 1170 + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, a_idx[gp]); 1197 1171 rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1172 + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x11); 1198 1173 rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]); 1199 1174 1200 1175 notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); ··· 1239 1206 1240 1207 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1241 1208 1242 - for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) { 1209 + rtw89_phy_write32_mask(rtwdev, R_CFIR_COEF, MASKDWORD, 0x0); 1210 + 1211 + for (gp = 0x0; gp < RTW8851B_G_TXK_GROUP_NR; gp++) { 1243 1212 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]); 1244 1213 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]); 1245 1214 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]); ··· 1284 1249 static bool _iqk_5g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 1285 1250 u8 path) 1286 1251 { 1252 + static const u8 a_idx[RTW8851B_A_TXK_GROUP_NR] = {2, 3}; 1287 1253 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1288 1254 bool kfail = false; 1289 1255 bool notready; 1290 - u8 gp; 1256 + u8 gp = 0; 1291 1257 1292 1258 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1293 1259 1294 - for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) { 1295 - rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]); 1296 - rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]); 1297 - rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]); 1260 + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]); 1261 + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]); 1262 + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]); 1263 + rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, a_att_smxr[gp]); 1298 1264 1299 - rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1300 - rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1); 1301 - rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0); 1302 - rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp); 1303 - rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1304 - rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]); 1265 + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1); 1266 + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1); 1267 + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0); 1268 + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, a_idx[gp]); 1269 + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); 1270 + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]); 1305 1271 1306 - notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); 1307 - iqk_info->nb_txcfir[path] = 1308 - rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2; 1309 - } 1272 + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); 1273 + iqk_info->nb_txcfir[path] = 1274 + rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2; 1310 1275 1311 1276 if (!notready) 1312 1277 kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); ··· 1335 1300 1336 1301 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); 1337 1302 1338 - for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) { 1303 + for (gp = 0x0; gp < RTW8851B_G_TXK_GROUP_NR; gp++) { 1339 1304 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]); 1340 1305 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]); 1341 1306 rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]); ··· 1698 1663 { 1699 1664 struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; 1700 1665 u8 idx, path; 1701 - 1702 - rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0); 1703 1666 1704 1667 if (iqk_info->is_iqk_init) 1705 1668 return;
+4
drivers/net/wireless/realtek/rtw89/rtw8851be.c
··· 11 11 12 12 static const struct rtw89_pci_info rtw8851b_pci_info = { 13 13 .gen_def = &rtw89_pci_gen_ax, 14 + .isr_def = &rtw89_pci_isr_ax, 14 15 .txbd_trunc_mode = MAC_AX_BD_TRUNC, 15 16 .rxbd_trunc_mode = MAC_AX_BD_TRUNC, 16 17 .rxbd_mode = MAC_AX_RXBD_PKT, ··· 29 28 .rx_ring_eq_is_full = false, 30 29 .check_rx_tag = false, 31 30 .no_rxbd_fs = false, 31 + .group_bd_addr = false, 32 + .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt), 32 33 33 34 .init_cfg_reg = R_AX_PCIE_INIT_CFG1, 34 35 .txhci_en_bit = B_AX_TXHCI_EN, ··· 60 57 61 58 .ltr_set = rtw89_pci_ltr_set, 62 59 .fill_txaddr_info = rtw89_pci_fill_txaddr_info, 60 + .parse_rpp = rtw89_pci_parse_rpp, 63 61 .config_intr_mask = rtw89_pci_config_intr_mask, 64 62 .enable_intr = rtw89_pci_enable_intr, 65 63 .disable_intr = rtw89_pci_disable_intr,
+3
drivers/net/wireless/realtek/rtw89/rtw8851bu.c
··· 16 16 static const struct usb_device_id rtw_8851bu_id_table[] = { 17 17 { USB_DEVICE_AND_INTERFACE_INFO(0x0bda, 0xb851, 0xff, 0xff, 0xff), 18 18 .driver_info = (kernel_ulong_t)&rtw89_8851bu_info }, 19 + /* D-Link AX9U rev. A1 */ 20 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x332a, 0xff, 0xff, 0xff), 21 + .driver_info = (kernel_ulong_t)&rtw89_8851bu_info }, 19 22 /* TP-Link Archer TX10UB Nano */ 20 23 { USB_DEVICE_AND_INTERFACE_INFO(0x3625, 0x010b, 0xff, 0xff, 0xff), 21 24 .driver_info = (kernel_ulong_t)&rtw89_8851bu_info },
+43 -3
drivers/net/wireless/realtek/rtw89/rtw8852a.c
··· 426 426 R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK 427 427 }; 428 428 429 + static const struct rtw89_reg_def rtw8852a_nhm_th[RTW89_NHM_TH_NUM] = { 430 + {R_NHM_CFG, B_NHM_TH0_MSK}, 431 + {R_NHM_TH1, B_NHM_TH1_MSK}, 432 + {R_NHM_TH1, B_NHM_TH2_MSK}, 433 + {R_NHM_TH1, B_NHM_TH3_MSK}, 434 + {R_NHM_TH1, B_NHM_TH4_MSK}, 435 + {R_NHM_TH5, B_NHM_TH5_MSK}, 436 + {R_NHM_TH5, B_NHM_TH6_MSK}, 437 + {R_NHM_TH5, B_NHM_TH7_MSK}, 438 + {R_NHM_TH5, B_NHM_TH8_MSK}, 439 + {R_NHM_TH9, B_NHM_TH9_MSK}, 440 + {R_NHM_TH9, B_NHM_TH10_MSK}, 441 + }; 442 + 443 + static const struct rtw89_reg_def rtw8852a_nhm_rpt[RTW89_NHM_RPT_NUM] = { 444 + {R_NHM_CNT0, B_NHM_CNT0_MSK}, 445 + {R_NHM_CNT0, B_NHM_CNT1_MSK}, 446 + {R_NHM_CNT2, B_NHM_CNT2_MSK}, 447 + {R_NHM_CNT2, B_NHM_CNT3_MSK}, 448 + {R_NHM_CNT4, B_NHM_CNT4_MSK}, 449 + {R_NHM_CNT4, B_NHM_CNT5_MSK}, 450 + {R_NHM_CNT6, B_NHM_CNT6_MSK}, 451 + {R_NHM_CNT6, B_NHM_CNT7_MSK}, 452 + {R_NHM_CNT8, B_NHM_CNT8_MSK}, 453 + {R_NHM_CNT8, B_NHM_CNT9_MSK}, 454 + {R_NHM_CNT10, B_NHM_CNT10_MSK}, 455 + {R_NHM_CNT10, B_NHM_CNT11_MSK}, 456 + }; 457 + 429 458 static const struct rtw89_imr_info rtw8852a_imr_info = { 430 459 .wdrls_imr_set = B_AX_WDRLS_IMR_SET, 431 460 .wsec_imr_reg = R_AX_SEC_DEBUG, ··· 2109 2080 { 2110 2081 u8 path; 2111 2082 u8 *rx_power = phy_ppdu->rssi; 2083 + u8 raw; 2112 2084 2113 - if (!status->signal) 2114 - status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], 2115 - rx_power[RF_PATH_B])); 2085 + if (!status->signal) { 2086 + if (phy_ppdu->to_self) 2087 + raw = ewma_rssi_read(&rtwdev->phystat.bcn_rssi); 2088 + else 2089 + raw = max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]); 2090 + 2091 + status->signal = RTW89_RSSI_RAW_TO_DBM(raw); 2092 + } 2093 + 2116 2094 for (path = 0; path < rtwdev->chip->rf_path_num; path++) { 2117 2095 status->chains |= BIT(path); 2118 2096 status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]); ··· 2178 2142 .query_rxdesc = rtw89_core_query_rxdesc, 2179 2143 .fill_txdesc = rtw89_core_fill_txdesc, 2180 2144 .fill_txdesc_fwcmd = rtw89_core_fill_txdesc, 2145 + .get_ch_dma = rtw89_core_get_ch_dma, 2181 2146 .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path, 2182 2147 .mac_cfg_gnt = rtw89_mac_cfg_gnt, 2183 2148 .stop_sch_tx = rtw89_mac_stop_sch_tx, ··· 2257 2220 .support_ant_gain = false, 2258 2221 .support_tas = false, 2259 2222 .support_sar_by_ant = false, 2223 + .support_noise = true, 2260 2224 .ul_tb_waveform_ctrl = false, 2261 2225 .ul_tb_pwr_diff = false, 2262 2226 .rx_freq_frome_ie = true, ··· 2320 2282 .cfo_hw_comp = false, 2321 2283 .dcfo_comp = &rtw8852a_dcfo_comp, 2322 2284 .dcfo_comp_sft = 10, 2285 + .nhm_report = &rtw8852a_nhm_rpt, 2286 + .nhm_th = &rtw8852a_nhm_th, 2323 2287 .imr_info = &rtw8852a_imr_info, 2324 2288 .imr_dmac_table = NULL, 2325 2289 .imr_cmac_table = NULL,
+4
drivers/net/wireless/realtek/rtw89/rtw8852ae.c
··· 11 11 12 12 static const struct rtw89_pci_info rtw8852a_pci_info = { 13 13 .gen_def = &rtw89_pci_gen_ax, 14 + .isr_def = &rtw89_pci_isr_ax, 14 15 .txbd_trunc_mode = MAC_AX_BD_TRUNC, 15 16 .rxbd_trunc_mode = MAC_AX_BD_TRUNC, 16 17 .rxbd_mode = MAC_AX_RXBD_PKT, ··· 29 28 .rx_ring_eq_is_full = false, 30 29 .check_rx_tag = false, 31 30 .no_rxbd_fs = false, 31 + .group_bd_addr = false, 32 + .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt), 32 33 33 34 .init_cfg_reg = R_AX_PCIE_INIT_CFG1, 34 35 .txhci_en_bit = B_AX_TXHCI_EN, ··· 58 55 59 56 .ltr_set = rtw89_pci_ltr_set, 60 57 .fill_txaddr_info = rtw89_pci_fill_txaddr_info, 58 + .parse_rpp = rtw89_pci_parse_rpp, 61 59 .config_intr_mask = rtw89_pci_config_intr_mask, 62 60 .enable_intr = rtw89_pci_enable_intr, 63 61 .disable_intr = rtw89_pci_disable_intr,
+4
drivers/net/wireless/realtek/rtw89/rtw8852b.c
··· 842 842 .query_rxdesc = rtw89_core_query_rxdesc, 843 843 .fill_txdesc = rtw89_core_fill_txdesc, 844 844 .fill_txdesc_fwcmd = rtw89_core_fill_txdesc, 845 + .get_ch_dma = rtw89_core_get_ch_dma, 845 846 .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path, 846 847 .mac_cfg_gnt = rtw89_mac_cfg_gnt, 847 848 .stop_sch_tx = rtw89_mac_stop_sch_tx, ··· 940 939 .support_ant_gain = true, 941 940 .support_tas = false, 942 941 .support_sar_by_ant = true, 942 + .support_noise = false, 943 943 .ul_tb_waveform_ctrl = true, 944 944 .ul_tb_pwr_diff = false, 945 945 .rx_freq_frome_ie = true, ··· 1003 1001 .cfo_hw_comp = true, 1004 1002 .dcfo_comp = &rtw8852b_dcfo_comp, 1005 1003 .dcfo_comp_sft = 10, 1004 + .nhm_report = NULL, 1005 + .nhm_th = NULL, 1006 1006 .imr_info = &rtw8852b_imr_info, 1007 1007 .imr_dmac_table = NULL, 1008 1008 .imr_cmac_table = NULL,
+4
drivers/net/wireless/realtek/rtw89/rtw8852be.c
··· 11 11 12 12 static const struct rtw89_pci_info rtw8852b_pci_info = { 13 13 .gen_def = &rtw89_pci_gen_ax, 14 + .isr_def = &rtw89_pci_isr_ax, 14 15 .txbd_trunc_mode = MAC_AX_BD_TRUNC, 15 16 .rxbd_trunc_mode = MAC_AX_BD_TRUNC, 16 17 .rxbd_mode = MAC_AX_RXBD_PKT, ··· 29 28 .rx_ring_eq_is_full = false, 30 29 .check_rx_tag = false, 31 30 .no_rxbd_fs = false, 31 + .group_bd_addr = false, 32 + .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt), 32 33 33 34 .init_cfg_reg = R_AX_PCIE_INIT_CFG1, 34 35 .txhci_en_bit = B_AX_TXHCI_EN, ··· 60 57 61 58 .ltr_set = rtw89_pci_ltr_set, 62 59 .fill_txaddr_info = rtw89_pci_fill_txaddr_info, 60 + .parse_rpp = rtw89_pci_parse_rpp, 63 61 .config_intr_mask = rtw89_pci_config_intr_mask, 64 62 .enable_intr = rtw89_pci_enable_intr, 65 63 .disable_intr = rtw89_pci_disable_intr,
+1
drivers/net/wireless/realtek/rtw89/rtw8852bt.c
··· 708 708 .query_rxdesc = rtw89_core_query_rxdesc, 709 709 .fill_txdesc = rtw89_core_fill_txdesc, 710 710 .fill_txdesc_fwcmd = rtw89_core_fill_txdesc, 711 + .get_ch_dma = rtw89_core_get_ch_dma, 711 712 .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path, 712 713 .mac_cfg_gnt = rtw89_mac_cfg_gnt, 713 714 .stop_sch_tx = rtw89_mac_stop_sch_tx,
+3 -11
drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
··· 1799 1799 { 1800 1800 struct rtw89_dpk_info *dpk = &rtwdev->dpk; 1801 1801 u8 val, kidx = dpk->cur_idx[path]; 1802 - bool off_reverse; 1803 1802 1804 1803 val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok; 1805 - 1806 - if (off) 1807 - off_reverse = false; 1808 - else 1809 - off_reverse = true; 1810 - 1811 - val = dpk->is_dpk_enable & off_reverse & dpk->bp[path][kidx].path_ok; 1812 1804 1813 1805 rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), 1814 1806 BIT(24), val); 1815 1807 1816 1808 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path, 1817 - kidx, str_enable_disable(dpk->is_dpk_enable & off_reverse)); 1809 + kidx, str_enable_disable(dpk->is_dpk_enable && !off)); 1818 1810 } 1819 1811 1820 1812 static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, ··· 1875 1883 rtw89_debug(rtwdev, RTW89_DBG_RFK, 1876 1884 "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n", 1877 1885 path, dpk->cur_idx[path], phy, 1878 - rtwdev->is_tssi_mode[path] ? "on" : "off", 1879 - rtwdev->dbcc_en ? "on" : "off", 1886 + str_on_off(rtwdev->is_tssi_mode[path]), 1887 + str_on_off(rtwdev->dbcc_en), 1880 1888 dpk->bp[path][kidx].band == 0 ? "2G" : 1881 1889 dpk->bp[path][kidx].band == 1 ? "5G" : "6G", 1882 1890 dpk->bp[path][kidx].ch,
+4
drivers/net/wireless/realtek/rtw89/rtw8852bte.c
··· 17 17 18 18 static const struct rtw89_pci_info rtw8852bt_pci_info = { 19 19 .gen_def = &rtw89_pci_gen_ax, 20 + .isr_def = &rtw89_pci_isr_ax, 20 21 .txbd_trunc_mode = MAC_AX_BD_TRUNC, 21 22 .rxbd_trunc_mode = MAC_AX_BD_TRUNC, 22 23 .rxbd_mode = MAC_AX_RXBD_PKT, ··· 35 34 .rx_ring_eq_is_full = false, 36 35 .check_rx_tag = false, 37 36 .no_rxbd_fs = false, 37 + .group_bd_addr = false, 38 + .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt), 38 39 39 40 .init_cfg_reg = R_AX_PCIE_INIT_CFG1, 40 41 .txhci_en_bit = B_AX_TXHCI_EN, ··· 66 63 67 64 .ltr_set = rtw89_pci_ltr_set, 68 65 .fill_txaddr_info = rtw89_pci_fill_txaddr_info, 66 + .parse_rpp = rtw89_pci_parse_rpp, 69 67 .config_intr_mask = rtw89_pci_config_intr_mask, 70 68 .enable_intr = rtw89_pci_enable_intr, 71 69 .disable_intr = rtw89_pci_disable_intr,
+2
drivers/net/wireless/realtek/rtw89/rtw8852bu.c
··· 30 30 .driver_info = (kernel_ulong_t)&rtw89_8852bu_info }, 31 31 { USB_DEVICE_AND_INTERFACE_INFO(0x0db0, 0x6931, 0xff, 0xff, 0xff), 32 32 .driver_info = (kernel_ulong_t)&rtw89_8852bu_info }, 33 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3327, 0xff, 0xff, 0xff), 34 + .driver_info = (kernel_ulong_t)&rtw89_8852bu_info }, 33 35 { USB_DEVICE_AND_INTERFACE_INFO(0x3574, 0x6121, 0xff, 0xff, 0xff), 34 36 .driver_info = (kernel_ulong_t)&rtw89_8852bu_info }, 35 37 { USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0100, 0xff, 0xff, 0xff),
+4
drivers/net/wireless/realtek/rtw89/rtw8852c.c
··· 2962 2962 .query_rxdesc = rtw89_core_query_rxdesc, 2963 2963 .fill_txdesc = rtw89_core_fill_txdesc_v1, 2964 2964 .fill_txdesc_fwcmd = rtw89_core_fill_txdesc_fwcmd_v1, 2965 + .get_ch_dma = rtw89_core_get_ch_dma, 2965 2966 .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path_v1, 2966 2967 .mac_cfg_gnt = rtw89_mac_cfg_gnt_v1, 2967 2968 .stop_sch_tx = rtw89_mac_stop_sch_tx_v1, ··· 3044 3043 .support_ant_gain = true, 3045 3044 .support_tas = true, 3046 3045 .support_sar_by_ant = true, 3046 + .support_noise = false, 3047 3047 .ul_tb_waveform_ctrl = false, 3048 3048 .ul_tb_pwr_diff = true, 3049 3049 .rx_freq_frome_ie = false, ··· 3108 3106 .cfo_hw_comp = false, 3109 3107 .dcfo_comp = &rtw8852c_dcfo_comp, 3110 3108 .dcfo_comp_sft = 12, 3109 + .nhm_report = NULL, 3110 + .nhm_th = NULL, 3111 3111 .imr_info = &rtw8852c_imr_info, 3112 3112 .imr_dmac_table = NULL, 3113 3113 .imr_cmac_table = NULL,
+4
drivers/net/wireless/realtek/rtw89/rtw8852ce.c
··· 20 20 21 21 static const struct rtw89_pci_info rtw8852c_pci_info = { 22 22 .gen_def = &rtw89_pci_gen_ax, 23 + .isr_def = &rtw89_pci_isr_ax, 23 24 .txbd_trunc_mode = MAC_AX_BD_TRUNC, 24 25 .rxbd_trunc_mode = MAC_AX_BD_TRUNC, 25 26 .rxbd_mode = MAC_AX_RXBD_PKT, ··· 38 37 .rx_ring_eq_is_full = false, 39 38 .check_rx_tag = false, 40 39 .no_rxbd_fs = false, 40 + .group_bd_addr = false, 41 + .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt), 41 42 42 43 .init_cfg_reg = R_AX_HAXI_INIT_CFG1, 43 44 .txhci_en_bit = B_AX_TXHCI_EN_V1, ··· 67 64 68 65 .ltr_set = rtw89_pci_ltr_set_v1, 69 66 .fill_txaddr_info = rtw89_pci_fill_txaddr_info_v1, 67 + .parse_rpp = rtw89_pci_parse_rpp, 70 68 .config_intr_mask = rtw89_pci_config_intr_mask_v1, 71 69 .enable_intr = rtw89_pci_enable_intr_v1, 72 70 .disable_intr = rtw89_pci_disable_intr_v1,
+10 -1
drivers/net/wireless/realtek/rtw89/rtw8922a.c
··· 2765 2765 return 0; 2766 2766 } 2767 2767 2768 + static const struct rtw89_chanctx_listener rtw8922a_chanctx_listener = { 2769 + .callbacks[RTW89_CHANCTX_CALLBACK_TAS] = rtw89_tas_chanctx_cb, 2770 + }; 2771 + 2768 2772 #ifdef CONFIG_PM 2769 2773 static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = { 2770 2774 .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | ··· 2821 2817 .query_rxdesc = rtw89_core_query_rxdesc_v2, 2822 2818 .fill_txdesc = rtw89_core_fill_txdesc_v2, 2823 2819 .fill_txdesc_fwcmd = rtw89_core_fill_txdesc_fwcmd_v2, 2820 + .get_ch_dma = rtw89_core_get_ch_dma, 2824 2821 .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path_v2, 2825 2822 .mac_cfg_gnt = rtw89_mac_cfg_gnt_v2, 2826 2823 .stop_sch_tx = rtw89_mac_stop_sch_tx_v2, ··· 2880 2875 .nctl_post_table = NULL, 2881 2876 .dflt_parms = NULL, /* load parm from fw */ 2882 2877 .rfe_parms_conf = NULL, /* load parm from fw */ 2878 + .chanctx_listener = &rtw8922a_chanctx_listener, 2883 2879 .txpwr_factor_bb = 3, 2884 2880 .txpwr_factor_rf = 2, 2885 2881 .txpwr_factor_mac = 1, ··· 2900 2894 BIT(NL80211_CHAN_WIDTH_160), 2901 2895 .support_unii4 = true, 2902 2896 .support_ant_gain = true, 2903 - .support_tas = false, 2897 + .support_tas = true, 2904 2898 .support_sar_by_ant = true, 2899 + .support_noise = false, 2905 2900 .ul_tb_waveform_ctrl = false, 2906 2901 .ul_tb_pwr_diff = false, 2907 2902 .rx_freq_frome_ie = false, ··· 2965 2958 .cfo_hw_comp = true, 2966 2959 .dcfo_comp = NULL, 2967 2960 .dcfo_comp_sft = 0, 2961 + .nhm_report = NULL, 2962 + .nhm_th = NULL, 2968 2963 .imr_info = NULL, 2969 2964 .imr_dmac_table = &rtw8922a_imr_dmac_table, 2970 2965 .imr_cmac_table = &rtw8922a_imr_cmac_table,
+4
drivers/net/wireless/realtek/rtw89/rtw8922ae.c
··· 17 17 18 18 static const struct rtw89_pci_info rtw8922a_pci_info = { 19 19 .gen_def = &rtw89_pci_gen_be, 20 + .isr_def = &rtw89_pci_isr_be, 20 21 .txbd_trunc_mode = MAC_AX_BD_TRUNC, 21 22 .rxbd_trunc_mode = MAC_AX_BD_TRUNC, 22 23 .rxbd_mode = MAC_AX_RXBD_PKT, ··· 35 34 .rx_ring_eq_is_full = true, 36 35 .check_rx_tag = true, 37 36 .no_rxbd_fs = true, 37 + .group_bd_addr = false, 38 + .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt), 38 39 39 40 .init_cfg_reg = R_BE_HAXI_INIT_CFG1, 40 41 .txhci_en_bit = B_BE_TXDMA_EN, ··· 64 61 65 62 .ltr_set = rtw89_pci_ltr_set_v2, 66 63 .fill_txaddr_info = rtw89_pci_fill_txaddr_info_v1, 64 + .parse_rpp = rtw89_pci_parse_rpp, 67 65 .config_intr_mask = rtw89_pci_config_intr_mask_v2, 68 66 .enable_intr = rtw89_pci_enable_intr_v2, 69 67 .disable_intr = rtw89_pci_disable_intr_v2,
+15
drivers/net/wireless/realtek/rtw89/sar.c
··· 4 4 5 5 #include "acpi.h" 6 6 #include "debug.h" 7 + #include "fw.h" 7 8 #include "phy.h" 8 9 #include "reg.h" 9 10 #include "sar.h" ··· 843 842 } 844 843 } 845 844 EXPORT_SYMBOL(rtw89_tas_chanctx_cb); 845 + 846 + void rtw89_tas_fw_timer_enable(struct rtw89_dev *rtwdev, bool enable) 847 + { 848 + const struct rtw89_chip_info *chip = rtwdev->chip; 849 + struct rtw89_tas_info *tas = &rtwdev->tas; 850 + 851 + if (!tas->enable) 852 + return; 853 + 854 + if (chip->chip_gen == RTW89_CHIP_AX) 855 + return; 856 + 857 + rtw89_fw_h2c_rf_tas_trigger(rtwdev, enable); 858 + } 846 859 847 860 void rtw89_sar_init(struct rtw89_dev *rtwdev) 848 861 {
+1
drivers/net/wireless/realtek/rtw89/sar.h
··· 37 37 void rtw89_tas_scan(struct rtw89_dev *rtwdev, bool start); 38 38 void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev, 39 39 enum rtw89_chanctx_state state); 40 + void rtw89_tas_fw_timer_enable(struct rtw89_dev *rtwdev, bool enable); 40 41 void rtw89_sar_init(struct rtw89_dev *rtwdev); 41 42 void rtw89_sar_track(struct rtw89_dev *rtwdev); 42 43
+3 -2
drivers/net/wireless/realtek/rtw89/ser.c
··· 205 205 206 206 static int ser_send_msg(struct rtw89_ser *ser, u8 event) 207 207 { 208 - struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); 209 208 struct ser_msg *msg = NULL; 210 209 211 210 if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags)) ··· 220 221 list_add(&msg->list, &ser->msg_q); 221 222 spin_unlock_irq(&ser->msg_q_lock); 222 223 223 - ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work); 224 + schedule_work(&ser->ser_hdl_work); 224 225 return 0; 225 226 } 226 227 ··· 501 502 } 502 503 503 504 drv_stop_rx(ser); 505 + wiphy_lock(wiphy); 504 506 drv_trx_reset(ser); 507 + wiphy_unlock(wiphy); 505 508 506 509 /* wait m3 */ 507 510 hal_send_m2_event(ser);
+1 -37
drivers/net/wireless/realtek/rtw89/txrx.h
··· 572 572 } __packed; 573 573 574 574 #define RTW89_PHY_STS_IE00_W0_RPL GENMASK(15, 7) 575 + #define RTW89_PHY_STS_IE00_W3_RX_PATH_EN GENMASK(31, 28) 575 576 576 577 struct rtw89_phy_sts_ie00_v2 { 577 578 __le32 w0; ··· 731 730 return RTW89_TX_QSEL_B1_MGMT; 732 731 else 733 732 return RTW89_TX_QSEL_B0_MGMT; 734 - } 735 - 736 - static inline u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel) 737 - { 738 - switch (qsel) { 739 - default: 740 - rtw89_warn(rtwdev, "Cannot map qsel to dma: %d\n", qsel); 741 - fallthrough; 742 - case RTW89_TX_QSEL_BE_0: 743 - case RTW89_TX_QSEL_BE_1: 744 - case RTW89_TX_QSEL_BE_2: 745 - case RTW89_TX_QSEL_BE_3: 746 - return RTW89_TXCH_ACH0; 747 - case RTW89_TX_QSEL_BK_0: 748 - case RTW89_TX_QSEL_BK_1: 749 - case RTW89_TX_QSEL_BK_2: 750 - case RTW89_TX_QSEL_BK_3: 751 - return RTW89_TXCH_ACH1; 752 - case RTW89_TX_QSEL_VI_0: 753 - case RTW89_TX_QSEL_VI_1: 754 - case RTW89_TX_QSEL_VI_2: 755 - case RTW89_TX_QSEL_VI_3: 756 - return RTW89_TXCH_ACH2; 757 - case RTW89_TX_QSEL_VO_0: 758 - case RTW89_TX_QSEL_VO_1: 759 - case RTW89_TX_QSEL_VO_2: 760 - case RTW89_TX_QSEL_VO_3: 761 - return RTW89_TXCH_ACH3; 762 - case RTW89_TX_QSEL_B0_MGMT: 763 - return RTW89_TXCH_CH8; 764 - case RTW89_TX_QSEL_B0_HI: 765 - return RTW89_TXCH_CH9; 766 - case RTW89_TX_QSEL_B1_MGMT: 767 - return RTW89_TXCH_CH10; 768 - case RTW89_TX_QSEL_B1_HI: 769 - return RTW89_TXCH_CH11; 770 - } 771 733 } 772 734 773 735 static inline u8 rtw89_core_get_tid_indicate(struct rtw89_dev *rtwdev, u8 tid)
+64 -15
drivers/net/wireless/realtek/rtw89/wow.c
··· 99 99 100 100 ieee80211_get_key_rx_seq(key, 0, &seq); 101 101 102 - /* seq.ccmp.pn[] is BE order array */ 103 - pn = u64_encode_bits(seq.ccmp.pn[0], RTW89_KEY_PN_5) | 104 - u64_encode_bits(seq.ccmp.pn[1], RTW89_KEY_PN_4) | 105 - u64_encode_bits(seq.ccmp.pn[2], RTW89_KEY_PN_3) | 106 - u64_encode_bits(seq.ccmp.pn[3], RTW89_KEY_PN_2) | 107 - u64_encode_bits(seq.ccmp.pn[4], RTW89_KEY_PN_1) | 108 - u64_encode_bits(seq.ccmp.pn[5], RTW89_KEY_PN_0); 102 + switch (key->cipher) { 103 + case WLAN_CIPHER_SUITE_TKIP: 104 + pn = u64_encode_bits(seq.tkip.iv32, RTW89_KEY_TKIP_PN_IV32) | 105 + u64_encode_bits(seq.tkip.iv16, RTW89_KEY_TKIP_PN_IV16); 106 + break; 107 + case WLAN_CIPHER_SUITE_CCMP: 108 + case WLAN_CIPHER_SUITE_GCMP: 109 + case WLAN_CIPHER_SUITE_CCMP_256: 110 + case WLAN_CIPHER_SUITE_GCMP_256: 111 + /* seq.ccmp.pn[] is BE order array */ 112 + pn = u64_encode_bits(seq.ccmp.pn[0], RTW89_KEY_PN_5) | 113 + u64_encode_bits(seq.ccmp.pn[1], RTW89_KEY_PN_4) | 114 + u64_encode_bits(seq.ccmp.pn[2], RTW89_KEY_PN_3) | 115 + u64_encode_bits(seq.ccmp.pn[3], RTW89_KEY_PN_2) | 116 + u64_encode_bits(seq.ccmp.pn[4], RTW89_KEY_PN_1) | 117 + u64_encode_bits(seq.ccmp.pn[5], RTW89_KEY_PN_0); 118 + break; 119 + default: 120 + return -EINVAL; 121 + } 109 122 110 123 err = _pn_to_iv(rtwdev, key, iv, pn, key->keyidx); 111 124 if (err) ··· 190 177 if (err) 191 178 return err; 192 179 193 - /* seq.ccmp.pn[] is BE order array */ 194 - seq.ccmp.pn[0] = u64_get_bits(pn, RTW89_KEY_PN_5); 195 - seq.ccmp.pn[1] = u64_get_bits(pn, RTW89_KEY_PN_4); 196 - seq.ccmp.pn[2] = u64_get_bits(pn, RTW89_KEY_PN_3); 197 - seq.ccmp.pn[3] = u64_get_bits(pn, RTW89_KEY_PN_2); 198 - seq.ccmp.pn[4] = u64_get_bits(pn, RTW89_KEY_PN_1); 199 - seq.ccmp.pn[5] = u64_get_bits(pn, RTW89_KEY_PN_0); 180 + switch (key->cipher) { 181 + case WLAN_CIPHER_SUITE_TKIP: 182 + seq.tkip.iv32 = u64_get_bits(pn, RTW89_KEY_TKIP_PN_IV32); 183 + seq.tkip.iv16 = u64_get_bits(pn, RTW89_KEY_TKIP_PN_IV16); 184 + break; 185 + case WLAN_CIPHER_SUITE_CCMP: 186 + case WLAN_CIPHER_SUITE_GCMP: 187 + case WLAN_CIPHER_SUITE_CCMP_256: 188 + case WLAN_CIPHER_SUITE_GCMP_256: 189 + /* seq.ccmp.pn[] is BE order array */ 190 + seq.ccmp.pn[0] = u64_get_bits(pn, RTW89_KEY_PN_5); 191 + seq.ccmp.pn[1] = u64_get_bits(pn, RTW89_KEY_PN_4); 192 + seq.ccmp.pn[2] = u64_get_bits(pn, RTW89_KEY_PN_3); 193 + seq.ccmp.pn[3] = u64_get_bits(pn, RTW89_KEY_PN_2); 194 + seq.ccmp.pn[4] = u64_get_bits(pn, RTW89_KEY_PN_1); 195 + seq.ccmp.pn[5] = u64_get_bits(pn, RTW89_KEY_PN_0); 196 + break; 197 + default: 198 + return -EINVAL; 199 + } 200 200 201 201 ieee80211_set_key_rx_seq(key, 0, &seq); 202 202 rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d iv-%*ph to pn-%*ph\n", ··· 311 285 312 286 switch (key->cipher) { 313 287 case WLAN_CIPHER_SUITE_TKIP: 288 + if (sta) 289 + memcpy(gtk_info->txmickey, 290 + key->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 291 + sizeof(gtk_info->txmickey)); 292 + fallthrough; 314 293 case WLAN_CIPHER_SUITE_CCMP: 315 294 case WLAN_CIPHER_SUITE_GCMP: 316 295 case WLAN_CIPHER_SUITE_CCMP_256: ··· 379 348 struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt; 380 349 struct rtw89_set_key_info_iter_data *iter_data = data; 381 350 bool update_tx_key_info = iter_data->rx_ready; 351 + u8 tmp[RTW89_MIC_KEY_LEN]; 382 352 int ret; 383 353 384 354 switch (key->cipher) { 385 355 case WLAN_CIPHER_SUITE_TKIP: 356 + /* 357 + * TX MIC KEY and RX MIC KEY is oppsite in FW, 358 + * need to swap it before sending to mac80211. 359 + */ 360 + if (!sta && update_tx_key_info && aoac_rpt->rekey_ok && 361 + !iter_data->tkip_gtk_swapped) { 362 + memcpy(tmp, &aoac_rpt->gtk[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], 363 + RTW89_MIC_KEY_LEN); 364 + memcpy(&aoac_rpt->gtk[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], 365 + &aoac_rpt->gtk[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], 366 + RTW89_MIC_KEY_LEN); 367 + memcpy(&aoac_rpt->gtk[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], 368 + tmp, RTW89_MIC_KEY_LEN); 369 + iter_data->tkip_gtk_swapped = true; 370 + } 371 + fallthrough; 386 372 case WLAN_CIPHER_SUITE_CCMP: 387 373 case WLAN_CIPHER_SUITE_GCMP: 388 374 case WLAN_CIPHER_SUITE_CCMP_256: ··· 690 642 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 691 643 struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt; 692 644 struct rtw89_set_key_info_iter_data data = {.error = false, 693 - .rx_ready = rx_ready}; 645 + .rx_ready = rx_ready, 646 + .tkip_gtk_swapped = false}; 694 647 struct ieee80211_bss_conf *bss_conf; 695 648 struct ieee80211_key_conf *key; 696 649
+6
drivers/net/wireless/realtek/rtw89/wow.h
··· 5 5 #ifndef __RTW89_WOW_H__ 6 6 #define __RTW89_WOW_H__ 7 7 8 + #define RTW89_KEY_TKIP_PN_IV16 GENMASK_ULL(15, 0) 9 + #define RTW89_KEY_TKIP_PN_IV32 GENMASK_ULL(47, 16) 10 + 8 11 #define RTW89_KEY_PN_0 GENMASK_ULL(7, 0) 9 12 #define RTW89_KEY_PN_1 GENMASK_ULL(15, 8) 10 13 #define RTW89_KEY_PN_2 GENMASK_ULL(23, 16) ··· 27 24 #define RTW89_WOW_VALID_CHECK 0xDD 28 25 #define RTW89_WOW_SYMBOL_CHK_PTK BIT(0) 29 26 #define RTW89_WOW_SYMBOL_CHK_GTK BIT(1) 27 + 28 + #define RTW89_MIC_KEY_LEN 8 30 29 31 30 enum rtw89_wake_reason { 32 31 RTW89_WOW_RSN_RX_PTK_REKEY = 0x1, ··· 78 73 u32 igtk_cipher; 79 74 bool rx_ready; 80 75 bool error; 76 + bool tkip_gtk_swapped; 81 77 }; 82 78 83 79 static inline int rtw89_wow_get_sec_hdr_len(struct rtw89_dev *rtwdev)