Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

wifi: iwlwifi: mvm: protect TXQ list manipulation

Some recent upstream debugging uncovered the fact that in
iwlwifi, the TXQ list manipulation is racy.

Introduce a new state bit for when the TXQ is completely
ready and can be used without locking, and if that's not
set yet acquire the lock to check everything correctly.

Reviewed-by: Benjamin Berg <benjamin.berg@intel.com>
Tested-by: Jose Ignacio Tornos Martinez <jtornosm@redhat.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>

+39 -34
+14 -31
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
··· 760 760 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 761 761 struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); 762 762 763 - /* 764 - * Please note that racing is handled very carefully here: 765 - * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is 766 - * deleted afterwards. 767 - * This means that if: 768 - * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list): 769 - * queue is allocated and we can TX. 770 - * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list): 771 - * a race, should defer the frame. 772 - * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list): 773 - * need to allocate the queue and defer the frame. 774 - * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list): 775 - * queue is already scheduled for allocation, no need to allocate, 776 - * should defer the frame. 777 - */ 778 - 779 - /* If the queue is allocated TX and return. */ 780 - if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) { 781 - /* 782 - * Check that list is empty to avoid a race where txq_id is 783 - * already updated, but the queue allocation work wasn't 784 - * finished 785 - */ 786 - if (unlikely(txq->sta && !list_empty(&mvmtxq->list))) 787 - return; 788 - 763 + if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) || 764 + !txq->sta) { 789 765 iwl_mvm_mac_itxq_xmit(hw, txq); 790 766 return; 791 767 } 792 768 793 - /* The list is being deleted only after the queue is fully allocated. */ 794 - if (!list_empty(&mvmtxq->list)) 795 - return; 769 + /* iwl_mvm_mac_itxq_xmit() will later be called by the worker 770 + * to handle any packets we leave on the txq now 771 + */ 796 772 797 - list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); 798 - schedule_work(&mvm->add_stream_wk); 773 + spin_lock_bh(&mvm->add_stream_lock); 774 + /* The list is being deleted only after the queue is fully allocated. */ 775 + if (list_empty(&mvmtxq->list) && 776 + /* recheck under lock */ 777 + !test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) { 778 + list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); 779 + schedule_work(&mvm->add_stream_wk); 780 + } 781 + spin_unlock_bh(&mvm->add_stream_lock); 799 782 } 800 783 801 784 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
+2
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
··· 731 731 atomic_t tx_request; 732 732 #define IWL_MVM_TXQ_STATE_STOP_FULL 0 733 733 #define IWL_MVM_TXQ_STATE_STOP_REDIRECT 1 734 + #define IWL_MVM_TXQ_STATE_READY 2 734 735 unsigned long state; 735 736 }; 736 737 ··· 830 829 struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES]; 831 830 }; 832 831 struct work_struct add_stream_wk; /* To add streams to queues */ 832 + spinlock_t add_stream_lock; 833 833 834 834 const char *nvm_file_name; 835 835 struct iwl_nvm_data *nvm_data;
+1
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
··· 1195 1195 INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); 1196 1196 INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); 1197 1197 INIT_LIST_HEAD(&mvm->add_stream_txqs); 1198 + spin_lock_init(&mvm->add_stream_lock); 1198 1199 1199 1200 init_waitqueue_head(&mvm->rx_sync_waitq); 1200 1201
+22 -3
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
··· 384 384 struct iwl_mvm_txq *mvmtxq = 385 385 iwl_mvm_txq_from_tid(sta, tid); 386 386 387 - mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; 387 + spin_lock_bh(&mvm->add_stream_lock); 388 388 list_del_init(&mvmtxq->list); 389 + clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); 390 + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; 391 + spin_unlock_bh(&mvm->add_stream_lock); 389 392 } 390 393 391 394 /* Regardless if this is a reserved TXQ for a STA - mark it as false */ ··· 482 479 disable_agg_tids |= BIT(tid); 483 480 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; 484 481 485 - mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; 482 + spin_lock_bh(&mvm->add_stream_lock); 486 483 list_del_init(&mvmtxq->list); 484 + clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); 485 + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; 486 + spin_unlock_bh(&mvm->add_stream_lock); 487 487 } 488 488 489 489 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ ··· 1450 1444 * a queue in the function itself. 1451 1445 */ 1452 1446 if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) { 1447 + spin_lock_bh(&mvm->add_stream_lock); 1453 1448 list_del_init(&mvmtxq->list); 1449 + spin_unlock_bh(&mvm->add_stream_lock); 1454 1450 continue; 1455 1451 } 1456 1452 1457 - list_del_init(&mvmtxq->list); 1453 + /* now we're ready, any remaining races/concurrency will be 1454 + * handled in iwl_mvm_mac_itxq_xmit() 1455 + */ 1456 + set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); 1457 + 1458 1458 local_bh_disable(); 1459 + spin_lock(&mvm->add_stream_lock); 1460 + list_del_init(&mvmtxq->list); 1461 + spin_unlock(&mvm->add_stream_lock); 1462 + 1459 1463 iwl_mvm_mac_itxq_xmit(mvm->hw, txq); 1460 1464 local_bh_enable(); 1461 1465 } ··· 1880 1864 struct iwl_mvm_txq *mvmtxq = 1881 1865 iwl_mvm_txq_from_mac80211(sta->txq[i]); 1882 1866 1867 + spin_lock_bh(&mvm->add_stream_lock); 1883 1868 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; 1884 1869 list_del_init(&mvmtxq->list); 1870 + clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); 1871 + spin_unlock_bh(&mvm->add_stream_lock); 1885 1872 } 1886 1873 } 1887 1874