]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
wifi: iwlwifi: mvm: protect TXQ list manipulation
authorJohannes Berg <johannes.berg@intel.com>
Fri, 17 Mar 2023 09:53:25 +0000 (10:53 +0100)
committerJohannes Berg <johannes.berg@intel.com>
Wed, 22 Mar 2023 12:14:24 +0000 (13:14 +0100)
Some recent upstream debugging uncovered the fact that in
iwlwifi, the TXQ list manipulation is racy.

Introduce a new state bit for when the TXQ is completely
ready and can be used without locking, and if that's not
set yet acquire the lock to check everything correctly.

Reviewed-by: Benjamin Berg <benjamin.berg@intel.com>
Tested-by: Jose Ignacio Tornos Martinez <jtornosm@redhat.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c

index f81c609ecf58deaa3201a9859ca2d77e230e5dd3..b55b1b17f4d197cf32775699087702bae2396bff 100644 (file)
@@ -760,42 +760,25 @@ static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
 
-       /*
-        * Please note that racing is handled very carefully here:
-        * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
-        * deleted afterwards.
-        * This means that if:
-        * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
-        *      queue is allocated and we can TX.
-        * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
-        *      a race, should defer the frame.
-        * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
-        *      need to allocate the queue and defer the frame.
-        * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
-        *      queue is already scheduled for allocation, no need to allocate,
-        *      should defer the frame.
-        */
-
-       /* If the queue is allocated TX and return. */
-       if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
-               /*
-                * Check that list is empty to avoid a race where txq_id is
-                * already updated, but the queue allocation work wasn't
-                * finished
-                */
-               if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
-                       return;
-
+       if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
+           !txq->sta) {
                iwl_mvm_mac_itxq_xmit(hw, txq);
                return;
        }
 
-       /* The list is being deleted only after the queue is fully allocated. */
-       if (!list_empty(&mvmtxq->list))
-               return;
+       /* iwl_mvm_mac_itxq_xmit() will later be called by the worker
+        * to handle any packets we leave on the txq now
+        */
 
-       list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
-       schedule_work(&mvm->add_stream_wk);
+       spin_lock_bh(&mvm->add_stream_lock);
+       /* The list is being deleted only after the queue is fully allocated. */
+       if (list_empty(&mvmtxq->list) &&
+           /* recheck under lock */
+           !test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) {
+               list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
+               schedule_work(&mvm->add_stream_wk);
+       }
+       spin_unlock_bh(&mvm->add_stream_lock);
 }
 
 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...)          \
index 421d2649b0f0cf15aa1042aea01db3cdb9afe958..f307c345dfa0b4b9b815daa717f51751d17cdf0d 100644 (file)
@@ -731,6 +731,7 @@ struct iwl_mvm_txq {
        atomic_t tx_request;
 #define IWL_MVM_TXQ_STATE_STOP_FULL    0
 #define IWL_MVM_TXQ_STATE_STOP_REDIRECT        1
+#define IWL_MVM_TXQ_STATE_READY                2
        unsigned long state;
 };
 
@@ -829,6 +830,7 @@ struct iwl_mvm {
                struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
        };
        struct work_struct add_stream_wk; /* To add streams to queues */
+       spinlock_t add_stream_lock;
 
        const char *nvm_file_name;
        struct iwl_nvm_data *nvm_data;
index efad8f92d132114240c355c4b316a3ffa0ccc67e..9711841bb4564d33b8edaab378ced12db0a295b6 100644 (file)
@@ -1195,6 +1195,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
        INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
        INIT_LIST_HEAD(&mvm->add_stream_txqs);
+       spin_lock_init(&mvm->add_stream_lock);
 
        init_waitqueue_head(&mvm->rx_sync_waitq);
 
index 21ad7b85c434c3e29d03098ca2ec453c108418f6..9caae77995ca933cdce204fac7663c51213f9389 100644 (file)
@@ -384,8 +384,11 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                struct iwl_mvm_txq *mvmtxq =
                        iwl_mvm_txq_from_tid(sta, tid);
 
-               mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+               spin_lock_bh(&mvm->add_stream_lock);
                list_del_init(&mvmtxq->list);
+               clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+               mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+               spin_unlock_bh(&mvm->add_stream_lock);
        }
 
        /* Regardless if this is a reserved TXQ for a STA - mark it as false */
@@ -479,8 +482,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
                        disable_agg_tids |= BIT(tid);
                mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
 
-               mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+               spin_lock_bh(&mvm->add_stream_lock);
                list_del_init(&mvmtxq->list);
+               clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+               mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+               spin_unlock_bh(&mvm->add_stream_lock);
        }
 
        mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
@@ -1444,12 +1450,22 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
                 * a queue in the function itself.
                 */
                if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
+                       spin_lock_bh(&mvm->add_stream_lock);
                        list_del_init(&mvmtxq->list);
+                       spin_unlock_bh(&mvm->add_stream_lock);
                        continue;
                }
 
-               list_del_init(&mvmtxq->list);
+               /* now we're ready, any remaining races/concurrency will be
+                * handled in iwl_mvm_mac_itxq_xmit()
+                */
+               set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+
                local_bh_disable();
+               spin_lock(&mvm->add_stream_lock);
+               list_del_init(&mvmtxq->list);
+               spin_unlock(&mvm->add_stream_lock);
+
                iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
                local_bh_enable();
        }
@@ -1864,8 +1880,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
                struct iwl_mvm_txq *mvmtxq =
                        iwl_mvm_txq_from_mac80211(sta->txq[i]);
 
+               spin_lock_bh(&mvm->add_stream_lock);
                mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
                list_del_init(&mvmtxq->list);
+               clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+               spin_unlock_bh(&mvm->add_stream_lock);
        }
 }