Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iwlagn: move tx queues to transport layer

This finalizes the move of the data path to the transport layer.

Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>

authored by

Emmanuel Grumbach and committed by
John W. Linville
8ad71bef e20d4341

+136 -139
+2 -21
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
··· 742 742 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 743 743 int txq_id = SEQ_TO_QUEUE(sequence); 744 744 int cmd_index = SEQ_TO_INDEX(sequence); 745 - struct iwl_tx_queue *txq = &priv->txq[txq_id]; 746 745 struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 747 746 struct ieee80211_hdr *hdr; 748 747 u32 status = le16_to_cpu(tx_resp->status.status); ··· 754 755 struct sk_buff_head skbs; 755 756 struct sk_buff *skb; 756 757 struct iwl_rxon_context *ctx; 757 - 758 - if ((cmd_index >= txq->q.n_bd) || 759 - (iwl_queue_used(&txq->q, cmd_index) == 0)) { 760 - IWL_ERR(priv, "%s: Read index for DMA queue txq_id (%d) " 761 - "cmd_index %d is out of range [0-%d] %d %d\n", 762 - __func__, txq_id, cmd_index, txq->q.n_bd, 763 - txq->q.write_ptr, txq->q.read_ptr); 764 - return; 765 - } 766 - 767 - txq->time_stamp = jiffies; 758 + bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); 768 759 769 760 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >> 770 761 IWLAGN_TX_RES_TID_POS; ··· 763 774 764 775 spin_lock_irqsave(&priv->shrd->sta_lock, flags); 765 776 766 - if (txq->sched_retry) 777 + if (is_agg) 767 778 iwl_rx_reply_tx_agg(priv, tx_resp); 768 779 769 780 if (tx_resp->frame_count == 1) { 770 - bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); 771 - 772 781 __skb_queue_head_init(&skbs); 773 782 /*we can free until ssn % q.n_bd not inclusive */ 774 783 iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id, ··· 837 850 { 838 851 struct iwl_rx_packet *pkt = rxb_addr(rxb); 839 852 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; 840 - struct iwl_tx_queue *txq = NULL; 841 853 struct iwl_ht_agg *agg; 842 854 struct sk_buff_head reclaimed_skbs; 843 855 struct ieee80211_tx_info *info; 844 856 struct ieee80211_hdr *hdr; 845 857 struct sk_buff *skb; 846 858 unsigned long flags; 847 - int index; 848 859 int sta_id; 849 860 int tid; 850 861 int freed; ··· 860 875 return; 861 876 } 862 877 863 - txq = &priv->txq[scd_flow]; 864 878 sta_id = ba_resp->sta_id; 865 879 tid = ba_resp->tid; 866 880 agg = &priv->shrd->tid_data[sta_id][tid].agg; 867 - 868 - /* Find index of block-ack window */ 869 - index = ba_resp_scd_ssn & (txq->q.n_bd - 1); 870 881 871 882 spin_lock_irqsave(&priv->shrd->sta_lock, flags); 872 883
-42
drivers/net/wireless/iwlwifi/iwl-dev.h
··· 574 574 ****************************************************************************/ 575 575 extern void iwl_update_chain_flags(struct iwl_priv *priv); 576 576 extern const u8 iwl_bcast_addr[ETH_ALEN]; 577 - extern int iwl_queue_space(const struct iwl_queue *q); 578 - static inline int iwl_queue_used(const struct iwl_queue *q, int i) 579 - { 580 - return q->write_ptr >= q->read_ptr ? 581 - (i >= q->read_ptr && i < q->write_ptr) : 582 - !(i < q->read_ptr && i >= q->write_ptr); 583 - } 584 - 585 - 586 - static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) 587 - { 588 - return index & (q->n_window - 1); 589 - } 590 577 591 578 #define IWL_OPERATION_MODE_AUTO 0 592 579 #define IWL_OPERATION_MODE_HT_ONLY 1 ··· 1143 1156 1144 1157 int activity_timer_active; 1145 1158 1146 - /* Tx DMA processing queues */ 1147 - struct iwl_tx_queue *txq; 1148 - unsigned long txq_ctx_active_msk; 1149 - 1150 1159 /* counts mgmt, ctl, and data packets */ 1151 1160 struct traffic_stats tx_stats; 1152 1161 struct traffic_stats rx_stats; ··· 1154 1171 int num_stations; 1155 1172 struct iwl_station_entry stations[IWLAGN_STATION_COUNT]; 1156 1173 unsigned long ucode_key_table; 1157 - 1158 - /* queue refcounts */ 1159 - #define IWL_MAX_HW_QUEUES 32 1160 - unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; 1161 - /* for each AC */ 1162 - atomic_t queue_stop_count[4]; 1163 1174 1164 1175 /* Indication if ieee80211_ops->open has been called */ 1165 1176 u8 is_open; ··· 1311 1334 bool have_rekey_data; 1312 1335 }; /*iwl_priv */ 1313 1336 1314 - static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) 1315 - { 1316 - set_bit(txq_id, &priv->txq_ctx_active_msk); 1317 - } 1318 - 1319 - static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id) 1320 - { 1321 - clear_bit(txq_id, &priv->txq_ctx_active_msk); 1322 - } 1323 - 1324 1337 extern struct iwl_mod_params iwlagn_mod_params; 1325 - 1326 - static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv, 1327 - int txq_id, int idx) 1328 - { 1329 - if (priv->txq[txq_id].skbs[idx]) 1330 - return (struct ieee80211_hdr *)priv->txq[txq_id]. 1331 - skbs[idx]->data; 1332 - return NULL; 1333 - } 1334 1338 1335 1339 static inline struct iwl_rxon_context * 1336 1340 iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
+46 -4
drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
··· 125 125 * @ac_to_fifo: to what fifo is a specifc AC mapped ? 126 126 * @ac_to_queue: to what tx queue is a specifc AC mapped ? 127 127 * @mcast_queue: 128 + * @txq: Tx DMA processing queues 129 + * @txq_ctx_active_msk: what queue is active 130 + * queue_stopped: tracks what queue is stopped 131 + * queue_stop_count: tracks what SW queue is stopped 128 132 */ 129 133 struct iwl_trans_pcie { 130 134 struct iwl_rx_queue rxq; ··· 154 150 const u8 *ac_to_fifo[NUM_IWL_RXON_CTX]; 155 151 const u8 *ac_to_queue[NUM_IWL_RXON_CTX]; 156 152 u8 mcast_queue[NUM_IWL_RXON_CTX]; 153 + 154 + struct iwl_tx_queue *txq; 155 + unsigned long txq_ctx_active_msk; 156 + #define IWL_MAX_HW_QUEUES 32 157 + unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; 158 + atomic_t queue_stop_count[4]; 157 159 }; 158 160 159 161 #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \ ··· 217 207 int index); 218 208 int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, 219 209 struct sk_buff_head *skbs); 210 + int iwl_queue_space(const struct iwl_queue *q); 220 211 221 212 /***************************************************** 222 213 * Error handling ··· 227 216 int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display); 228 217 void iwl_dump_csr(struct iwl_trans *trans); 229 218 219 + /***************************************************** 220 + * Helpers 221 + ******************************************************/ 230 222 static inline void iwl_disable_interrupts(struct iwl_trans *trans) 231 223 { 232 224 clear_bit(STATUS_INT_ENABLED, &trans->shrd->status); ··· 279 265 u8 queue = txq->swq_id; 280 266 u8 ac = queue & 3; 281 267 u8 hwq = (queue >> 2) & 0x1f; 268 + struct iwl_trans_pcie *trans_pcie = 269 + IWL_TRANS_GET_PCIE_TRANS(trans); 282 270 283 271 if (unlikely(!trans->shrd->mac80211_registered)) 284 272 return; 285 273 286 - if (test_and_clear_bit(hwq, priv(trans)->queue_stopped)) 287 - if (atomic_dec_return(&priv(trans)->queue_stop_count[ac]) <= 0) 274 + if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) 275 + if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) 288 276 ieee80211_wake_queue(trans->shrd->hw, ac); 289 277 } 290 278 ··· 296 280 u8 queue = txq->swq_id; 297 281 u8 ac = queue & 3; 298 282 u8 hwq = (queue >> 2) & 0x1f; 283 + struct iwl_trans_pcie *trans_pcie = 284 + IWL_TRANS_GET_PCIE_TRANS(trans); 299 285 300 286 if (unlikely(!trans->shrd->mac80211_registered)) 301 287 return; 302 288 303 - if (!test_and_set_bit(hwq, priv(trans)->queue_stopped)) 304 - if (atomic_inc_return(&priv(trans)->queue_stop_count[ac]) > 0) 289 + if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) 290 + if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) 305 291 ieee80211_stop_queue(trans->shrd->hw, ac); 306 292 } 307 293 ··· 318 300 #endif 319 301 320 302 #define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue 303 + 304 + static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie, 305 + int txq_id) 306 + { 307 + set_bit(txq_id, &trans_pcie->txq_ctx_active_msk); 308 + } 309 + 310 + static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie *trans_pcie, 311 + int txq_id) 312 + { 313 + clear_bit(txq_id, &trans_pcie->txq_ctx_active_msk); 314 + } 315 + 316 + static inline int iwl_queue_used(const struct iwl_queue *q, int i) 317 + { 318 + return q->write_ptr >= q->read_ptr ? 319 + (i >= q->read_ptr && i < q->write_ptr) : 320 + !(i < q->read_ptr && i >= q->write_ptr); 321 + } 322 + 323 + static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) 324 + { 325 + return index & (q->n_window - 1); 326 + } 321 327 322 328 #endif /* __iwl_trans_int_pcie_h__ */
+1 -1
drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
··· 1032 1032 iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq); 1033 1033 for (i = 0; i < hw_params(trans).max_txq_num; i++) 1034 1034 iwl_txq_update_write_ptr(trans, 1035 - &priv(trans)->txq[i]); 1035 + &trans_pcie->txq[i]); 1036 1036 1037 1037 isr_stats->wakeup++; 1038 1038
+32 -21
drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
··· 407 407 struct iwl_tx_queue *txq, 408 408 int tx_fifo_id, int scd_retry) 409 409 { 410 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 410 411 int txq_id = txq->q.id; 411 412 int active = 412 - test_bit(txq_id, &priv(trans)->txq_ctx_active_msk) ? 1 : 0; 413 + test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0; 413 414 414 415 iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id), 415 416 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | ··· 483 482 484 483 /* Place first TFD at index corresponding to start sequence number. 485 484 * Assumes that ssn_idx is valid (!= 0xFFF) */ 486 - priv(trans)->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); 487 - priv(trans)->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); 485 + trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); 486 + trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); 488 487 iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx); 489 488 490 489 /* Set up Tx window size and frame limit for this queue */ ··· 501 500 iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id)); 502 501 503 502 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ 504 - iwl_trans_tx_queue_set_status(trans, &priv(trans)->txq[txq_id], 503 + iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 505 504 tx_fifo, 1); 506 505 507 - priv(trans)->txq[txq_id].sta_id = sta_id; 508 - priv(trans)->txq[txq_id].tid = tid; 506 + trans_pcie->txq[txq_id].sta_id = sta_id; 507 + trans_pcie->txq[txq_id].tid = tid; 509 508 510 509 spin_unlock_irqrestore(&trans->shrd->lock, flags); 511 510 } ··· 518 517 */ 519 518 static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans) 520 519 { 520 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 521 521 int txq_id; 522 522 523 523 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) 524 524 if (!test_and_set_bit(txq_id, 525 - &priv(trans)->txq_ctx_active_msk)) 525 + &trans_pcie->txq_ctx_active_msk)) 526 526 return txq_id; 527 527 return -1; 528 528 } ··· 532 530 enum iwl_rxon_context_id ctx, int sta_id, 533 531 int tid, u16 *ssn) 534 532 { 533 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 535 534 struct iwl_tid_data *tid_data; 536 535 unsigned long flags; 537 536 u16 txq_id; ··· 548 545 tid_data = &trans->shrd->tid_data[sta_id][tid]; 549 546 *ssn = SEQ_TO_SN(tid_data->seq_number); 550 547 tid_data->agg.txq_id = txq_id; 551 - iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id); 548 + iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id); 552 549 553 550 tid_data = &trans->shrd->tid_data[sta_id][tid]; 554 551 if (tid_data->tfds_in_queue == 0) { ··· 567 564 568 565 void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id) 569 566 { 567 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 570 568 iwlagn_tx_queue_stop_scheduler(trans, txq_id); 571 569 572 570 iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id)); 573 571 574 - priv(trans)->txq[txq_id].q.read_ptr = 0; 575 - priv(trans)->txq[txq_id].q.write_ptr = 0; 572 + trans_pcie->txq[txq_id].q.read_ptr = 0; 573 + trans_pcie->txq[txq_id].q.write_ptr = 0; 576 574 /* supposes that ssn_idx is valid (!= 0xFFF) */ 577 575 iwl_trans_set_wr_ptrs(trans, txq_id, 0); 578 576 579 577 iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id)); 580 - iwl_txq_ctx_deactivate(priv(trans), txq_id); 581 - iwl_trans_tx_queue_set_status(trans, &priv(trans)->txq[txq_id], 0, 0); 578 + iwl_txq_ctx_deactivate(trans_pcie, txq_id); 579 + iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0); 582 580 } 583 581 584 582 int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, 585 583 enum iwl_rxon_context_id ctx, int sta_id, 586 584 int tid) 587 585 { 586 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 588 587 unsigned long flags; 589 588 int read_ptr, write_ptr; 590 589 struct iwl_tid_data *tid_data; ··· 626 621 "or starting\n"); 627 622 } 628 623 629 - write_ptr = priv(trans)->txq[txq_id].q.write_ptr; 630 - read_ptr = priv(trans)->txq[txq_id].q.read_ptr; 624 + write_ptr = trans_pcie->txq[txq_id].q.write_ptr; 625 + read_ptr = trans_pcie->txq[txq_id].q.read_ptr; 631 626 632 627 /* The queue is not empty */ 633 628 if (write_ptr != read_ptr) { ··· 668 663 */ 669 664 static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 670 665 { 671 - struct iwl_tx_queue *txq = &priv(trans)->txq[trans->shrd->cmd_queue]; 666 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 667 + struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue]; 672 668 struct iwl_queue *q = &txq->q; 673 669 struct iwl_device_cmd *out_cmd; 674 670 struct iwl_cmd_meta *out_meta; ··· 858 852 */ 859 853 static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx) 860 854 { 861 - struct iwl_tx_queue *txq = &priv->txq[txq_id]; 855 + struct iwl_trans_pcie *trans_pcie = 856 + IWL_TRANS_GET_PCIE_TRANS(trans(priv)); 857 + struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; 862 858 struct iwl_queue *q = &txq->q; 863 859 int nfreed = 0; 864 860 ··· 901 893 struct iwl_device_cmd *cmd; 902 894 struct iwl_cmd_meta *meta; 903 895 struct iwl_trans *trans = trans(priv); 904 - struct iwl_tx_queue *txq = &priv->txq[trans->shrd->cmd_queue]; 896 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 897 + struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue]; 905 898 unsigned long flags; 906 899 907 900 /* If a Tx command is being handled and it isn't in the actual ··· 911 902 if (WARN(txq_id != trans->shrd->cmd_queue, 912 903 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 913 904 txq_id, trans->shrd->cmd_queue, sequence, 914 - priv->txq[trans->shrd->cmd_queue].q.read_ptr, 915 - priv->txq[trans->shrd->cmd_queue].q.write_ptr)) { 905 + trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr, 906 + trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) { 916 907 iwl_print_hex_error(priv, pkt, 32); 917 908 return; 918 909 } ··· 1081 1072 1082 1073 static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1083 1074 { 1075 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1084 1076 int cmd_idx; 1085 1077 int ret; 1086 1078 ··· 1154 1144 * in later, it will possibly set an invalid 1155 1145 * address (cmd->meta.source). 1156 1146 */ 1157 - priv(trans)->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &= 1147 + trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &= 1158 1148 ~CMD_WANT_SKB; 1159 1149 } 1160 1150 fail: ··· 1191 1181 int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, 1192 1182 struct sk_buff_head *skbs) 1193 1183 { 1194 - struct iwl_tx_queue *txq = &priv(trans)->txq[txq_id]; 1184 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1185 + struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; 1195 1186 struct iwl_queue *q = &txq->q; 1196 1187 int last_to_free; 1197 1188 int freed = 0;
+55 -50
drivers/net/wireless/iwlwifi/iwl-trans.c
··· 409 409 */ 410 410 static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) 411 411 { 412 - struct iwl_priv *priv = priv(trans); 413 - struct iwl_tx_queue *txq = &priv->txq[txq_id]; 412 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 413 + struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; 414 414 struct iwl_queue *q = &txq->q; 415 415 416 416 if (!q->n_bd) ··· 433 433 */ 434 434 static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id) 435 435 { 436 - struct iwl_priv *priv = priv(trans); 437 - struct iwl_tx_queue *txq = &priv->txq[txq_id]; 436 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 437 + struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; 438 438 struct device *dev = bus(trans)->dev; 439 439 int i; 440 440 if (WARN_ON(!txq)) ··· 477 477 static void iwl_trans_pcie_tx_free(struct iwl_trans *trans) 478 478 { 479 479 int txq_id; 480 - struct iwl_trans_pcie *trans_pcie = 481 - IWL_TRANS_GET_PCIE_TRANS(trans); 482 - struct iwl_priv *priv = priv(trans); 480 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 483 481 484 482 /* Tx queues */ 485 - if (priv->txq) { 483 + if (trans_pcie->txq) { 486 484 for (txq_id = 0; 487 485 txq_id < hw_params(trans).max_txq_num; txq_id++) 488 486 iwl_tx_queue_free(trans, txq_id); 489 487 } 490 488 491 - kfree(priv->txq); 492 - priv->txq = NULL; 489 + kfree(trans_pcie->txq); 490 + trans_pcie->txq = NULL; 493 491 494 492 iwlagn_free_dma_ptr(trans, &trans_pcie->kw); 495 493 ··· 505 507 { 506 508 int ret; 507 509 int txq_id, slots_num; 508 - struct iwl_priv *priv = priv(trans); 509 - struct iwl_trans_pcie *trans_pcie = 510 - IWL_TRANS_GET_PCIE_TRANS(trans); 510 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 511 511 512 512 u16 scd_bc_tbls_size = hw_params(trans).max_txq_num * 513 513 sizeof(struct iwlagn_scd_bc_tbl); 514 514 515 515 /*It is not allowed to alloc twice, so warn when this happens. 516 516 * We cannot rely on the previous allocation, so free and fail */ 517 - if (WARN_ON(priv->txq)) { 517 + if (WARN_ON(trans_pcie->txq)) { 518 518 ret = -EINVAL; 519 519 goto error; 520 520 } ··· 531 535 goto error; 532 536 } 533 537 534 - priv->txq = kzalloc(sizeof(struct iwl_tx_queue) * 538 + trans_pcie->txq = kzalloc(sizeof(struct iwl_tx_queue) * 535 539 hw_params(trans).max_txq_num, GFP_KERNEL); 536 - if (!priv->txq) { 540 + if (!trans_pcie->txq) { 537 541 IWL_ERR(trans, "Not enough memory for txq\n"); 538 542 ret = ENOMEM; 539 543 goto error; ··· 543 547 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) { 544 548 slots_num = (txq_id == trans->shrd->cmd_queue) ? 545 549 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 546 - ret = iwl_trans_txq_alloc(trans, &priv->txq[txq_id], slots_num, 547 - txq_id); 550 + ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id], 551 + slots_num, txq_id); 548 552 if (ret) { 549 553 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 550 554 goto error; ··· 564 568 int txq_id, slots_num; 565 569 unsigned long flags; 566 570 bool alloc = false; 567 - struct iwl_priv *priv = priv(trans); 568 - struct iwl_trans_pcie *trans_pcie = 569 - IWL_TRANS_GET_PCIE_TRANS(trans); 571 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 570 572 571 - if (!priv->txq) { 573 + if (!trans_pcie->txq) { 572 574 ret = iwl_trans_tx_alloc(trans); 573 575 if (ret) 574 576 goto error; ··· 588 594 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) { 589 595 slots_num = (txq_id == trans->shrd->cmd_queue) ? 590 596 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 591 - ret = iwl_trans_txq_init(trans, &priv->txq[txq_id], slots_num, 592 - txq_id); 597 + ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id], 598 + slots_num, txq_id); 593 599 if (ret) { 594 600 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 595 601 goto error; ··· 910 916 iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0); 911 917 912 918 /* make sure all queue are not stopped */ 913 - memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); 919 + memset(&trans_pcie->queue_stopped[0], 0, 920 + sizeof(trans_pcie->queue_stopped)); 914 921 for (i = 0; i < 4; i++) 915 - atomic_set(&priv->queue_stop_count[i], 0); 922 + atomic_set(&trans_pcie->queue_stop_count[i], 0); 916 923 for_each_context(priv, ctx) 917 924 ctx->last_tx_rejected = false; 918 925 919 926 /* reset to 0 to enable all the queue first */ 920 - priv->txq_ctx_active_msk = 0; 927 + trans_pcie->txq_ctx_active_msk = 0; 921 928 922 929 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) < 923 930 IWLAGN_FIRST_AMPDU_QUEUE); ··· 929 934 int fifo = queue_to_fifo[i].fifo; 930 935 int ac = queue_to_fifo[i].ac; 931 936 932 - iwl_txq_ctx_activate(priv, i); 937 + iwl_txq_ctx_activate(trans_pcie, i); 933 938 934 939 if (fifo == IWL_TX_FIFO_UNUSED) 935 940 continue; 936 941 937 942 if (ac != IWL_AC_UNSET) 938 - iwl_set_swq_id(&priv->txq[i], ac, i); 939 - iwl_trans_tx_queue_set_status(trans, &priv->txq[i], fifo, 0); 943 + iwl_set_swq_id(&trans_pcie->txq[i], ac, i); 944 + iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i], 945 + fifo, 0); 940 946 } 941 947 942 948 spin_unlock_irqrestore(&trans->shrd->lock, flags); ··· 954 958 { 955 959 int ch, txq_id; 956 960 unsigned long flags; 957 - struct iwl_priv *priv = priv(trans); 961 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 958 962 959 963 /* Turn off all Tx DMA fifos */ 960 964 spin_lock_irqsave(&trans->shrd->lock, flags); ··· 975 979 } 976 980 spin_unlock_irqrestore(&trans->shrd->lock, flags); 977 981 978 - if (!priv->txq) { 982 + if (!trans_pcie->txq) { 979 983 IWL_WARN(trans, "Stopping tx queues that aren't allocated..."); 980 984 return 0; 981 985 } ··· 1104 1108 } 1105 1109 } 1106 1110 1107 - txq = &priv(trans)->txq[txq_id]; 1111 + txq = &trans_pcie->txq[txq_id]; 1108 1112 q = &txq->q; 1109 1113 1110 1114 /* Set up driver data for this TFD */ ··· 1264 1268 static int iwlagn_txq_check_empty(struct iwl_trans *trans, 1265 1269 int sta_id, u8 tid, int txq_id) 1266 1270 { 1267 - struct iwl_queue *q = &priv(trans)->txq[txq_id].q; 1271 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1272 + struct iwl_queue *q = &trans_pcie->txq[txq_id].q; 1268 1273 struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid]; 1269 1274 1270 1275 lockdep_assert_held(&trans->shrd->sta_lock); ··· 1283 1286 iwl_stop_tx_ba_trans_ready(priv(trans), 1284 1287 NUM_IWL_RXON_CTX, 1285 1288 sta_id, tid); 1286 - iwl_wake_queue(trans, &priv(trans)->txq[txq_id]); 1289 + iwl_wake_queue(trans, &trans_pcie->txq[txq_id]); 1287 1290 } 1288 1291 break; 1289 1292 case IWL_EMPTYING_HW_QUEUE_ADDBA: ··· 1321 1324 int txq_id, int ssn, u32 status, 1322 1325 struct sk_buff_head *skbs) 1323 1326 { 1324 - struct iwl_tx_queue *txq = &priv(trans)->txq[txq_id]; 1327 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1328 + struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; 1325 1329 /* n_bd is usually 256 => n_bd - 1 = 0xff */ 1326 1330 int tfd_num = ssn & (txq->q.n_bd - 1); 1327 1331 int freed = 0; 1328 1332 u8 agg_state; 1329 1333 bool cond; 1334 + 1335 + txq->time_stamp = jiffies; 1330 1336 1331 1337 if (txq->sched_retry) { 1332 1338 agg_state = ··· 1421 1421 txq_id = trans_pcie->ac_to_queue[ctx][ac]; 1422 1422 IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n", 1423 1423 ac, 1424 - (atomic_read(&priv(trans)->queue_stop_count[ac]) > 0) 1424 + (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0) 1425 1425 ? "stopped" : "awake"); 1426 - iwl_wake_queue(trans, &priv(trans)->txq[txq_id]); 1426 + iwl_wake_queue(trans, &trans_pcie->txq[txq_id]); 1427 1427 } 1428 1428 } 1429 1429 ··· 1448 1448 1449 1449 static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id) 1450 1450 { 1451 - iwl_stop_queue(trans, &priv(trans)->txq[txq_id]); 1451 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1452 + 1453 + iwl_stop_queue(trans, &trans_pcie->txq[txq_id]); 1452 1454 } 1453 1455 1454 1456 #define IWL_FLUSH_WAIT_MS 2000 1455 1457 1456 1458 static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans) 1457 1459 { 1460 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1458 1461 struct iwl_tx_queue *txq; 1459 1462 struct iwl_queue *q; 1460 1463 int cnt; ··· 1468 1465 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) { 1469 1466 if (cnt == trans->shrd->cmd_queue) 1470 1467 continue; 1471 - txq = &priv(trans)->txq[cnt]; 1468 + txq = &trans_pcie->txq[cnt]; 1472 1469 q = &txq->q; 1473 1470 while (q->read_ptr != q->write_ptr && !time_after(jiffies, 1474 1471 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) ··· 1489 1486 */ 1490 1487 static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt) 1491 1488 { 1492 - struct iwl_tx_queue *txq = &priv(trans)->txq[cnt]; 1489 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1490 + struct iwl_tx_queue *txq = &trans_pcie->txq[cnt]; 1493 1491 struct iwl_queue *q = &txq->q; 1494 1492 unsigned long timeout; 1495 1493 ··· 1582 1578 const u8 *ptr; 1583 1579 ssize_t ret; 1584 1580 1585 - if (!priv->txq) { 1581 + if (!trans_pcie->txq) { 1586 1582 IWL_ERR(trans, "txq not ready\n"); 1587 1583 return -EAGAIN; 1588 1584 } ··· 1593 1589 } 1594 1590 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n"); 1595 1591 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) { 1596 - txq = &priv->txq[cnt]; 1592 + txq = &trans_pcie->txq[cnt]; 1597 1593 q = &txq->q; 1598 1594 pos += scnprintf(buf + pos, bufsz - pos, 1599 1595 "q[%d]: read_ptr: %u, write_ptr: %u\n", ··· 1670 1666 1671 1667 static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, 1672 1668 char __user *user_buf, 1673 - size_t count, loff_t *ppos) { 1674 - 1669 + size_t count, loff_t *ppos) 1670 + { 1675 1671 struct iwl_trans *trans = file->private_data; 1672 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1676 1673 struct iwl_priv *priv = priv(trans); 1677 1674 struct iwl_tx_queue *txq; 1678 1675 struct iwl_queue *q; ··· 1683 1678 int ret; 1684 1679 const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num; 1685 1680 1686 - if (!priv->txq) { 1681 + if (!trans_pcie->txq) { 1687 1682 IWL_ERR(priv, "txq not ready\n"); 1688 1683 return -EAGAIN; 1689 1684 } ··· 1692 1687 return -ENOMEM; 1693 1688 1694 1689 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) { 1695 - txq = &priv->txq[cnt]; 1690 + txq = &trans_pcie->txq[cnt]; 1696 1691 q = &txq->q; 1697 1692 pos += scnprintf(buf + pos, bufsz - pos, 1698 1693 "hwq %.2d: read=%u write=%u stop=%d" 1699 1694 " swq_id=%#.2x (ac %d/hwq %d)\n", 1700 1695 cnt, q->read_ptr, q->write_ptr, 1701 - !!test_bit(cnt, priv->queue_stopped), 1696 + !!test_bit(cnt, trans_pcie->queue_stopped), 1702 1697 txq->swq_id, txq->swq_id & 3, 1703 1698 (txq->swq_id >> 2) & 0x1f); 1704 1699 if (cnt >= 4) 1705 1700 continue; 1706 1701 /* for the ACs, display the stop count too */ 1707 1702 pos += scnprintf(buf + pos, bufsz - pos, 1708 - " stop-count: %d\n", 1709 - atomic_read(&priv->queue_stop_count[cnt])); 1703 + " stop-count: %d\n", 1704 + atomic_read(&trans_pcie->queue_stop_count[cnt])); 1710 1705 } 1711 1706 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1712 1707 kfree(buf);