]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - drivers/net/wireless/intel/iwlwifi/mvm/sta.c
iwlwifi: mvm: move queue reconfiguration into new function
[mirror_ubuntu-jammy-kernel.git] / drivers / net / wireless / intel / iwlwifi / mvm / sta.c
index 8f929c774e70c9e3bf3a7c5a42bee2f36a83fb96..a36a631cdfa6964beba92e9f3835ea969ee2278c 100644 (file)
@@ -358,6 +358,108 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
        return ret;
 }
 
+static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
+                              int mac80211_queue, u8 tid, u8 flags)
+{
+       struct iwl_scd_txq_cfg_cmd cmd = {
+               .scd_queue = queue,
+               .action = SCD_CFG_DISABLE_QUEUE,
+       };
+       bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
+       int ret;
+
+       if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
+               return -EINVAL;
+
+       if (iwl_mvm_has_new_tx_api(mvm)) {
+               spin_lock_bh(&mvm->queue_info_lock);
+
+               if (remove_mac_queue)
+                       mvm->hw_queue_to_mac80211[queue] &=
+                               ~BIT(mac80211_queue);
+
+               spin_unlock_bh(&mvm->queue_info_lock);
+
+               iwl_trans_txq_free(mvm->trans, queue);
+
+               return 0;
+       }
+
+       spin_lock_bh(&mvm->queue_info_lock);
+
+       if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) {
+               spin_unlock_bh(&mvm->queue_info_lock);
+               return 0;
+       }
+
+       mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+
+       /*
+        * If there is another TID with the same AC - don't remove the MAC queue
+        * from the mapping
+        */
+       if (tid < IWL_MAX_TID_COUNT) {
+               unsigned long tid_bitmap =
+                       mvm->queue_info[queue].tid_bitmap;
+               int ac = tid_to_mac80211_ac[tid];
+               int i;
+
+               for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
+                       if (tid_to_mac80211_ac[i] == ac)
+                               remove_mac_queue = false;
+               }
+       }
+
+       if (remove_mac_queue)
+               mvm->hw_queue_to_mac80211[queue] &=
+                       ~BIT(mac80211_queue);
+
+       cmd.action = mvm->queue_info[queue].tid_bitmap ?
+               SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
+       if (cmd.action == SCD_CFG_DISABLE_QUEUE)
+               mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
+
+       IWL_DEBUG_TX_QUEUES(mvm,
+                           "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
+                           queue,
+                           mvm->queue_info[queue].tid_bitmap,
+                           mvm->hw_queue_to_mac80211[queue]);
+
+       /* If the queue is still enabled - nothing left to do in this func */
+       if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
+               spin_unlock_bh(&mvm->queue_info_lock);
+               return 0;
+       }
+
+       cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+       cmd.tid = mvm->queue_info[queue].txq_tid;
+
+       /* Make sure queue info is correct even though we overwrite it */
+       WARN(mvm->queue_info[queue].tid_bitmap ||
+            mvm->hw_queue_to_mac80211[queue],
+            "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
+            queue, mvm->hw_queue_to_mac80211[queue],
+            mvm->queue_info[queue].tid_bitmap);
+
+       /* If we are here - the queue is freed and we can zero out these vals */
+       mvm->queue_info[queue].tid_bitmap = 0;
+       mvm->hw_queue_to_mac80211[queue] = 0;
+
+       /* Regardless if this is a reserved TXQ for a STA - mark it as false */
+       mvm->queue_info[queue].reserved = false;
+
+       spin_unlock_bh(&mvm->queue_info_lock);
+
+       iwl_trans_txq_disable(mvm->trans, queue, false);
+       ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
+                                  sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
+
+       if (ret)
+               IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
+                       queue, ret);
+       return ret;
+}
+
 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
 {
        struct ieee80211_sta *sta;
@@ -616,7 +718,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
        cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
        cmd.tid = mvm->queue_info[queue].txq_tid;
        mq = mvm->hw_queue_to_mac80211[queue];
-       shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
+       shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
        spin_unlock_bh(&mvm->queue_info_lock);
 
        IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
@@ -674,6 +776,57 @@ out:
        return ret;
 }
 
+static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
+                                  u8 minq, u8 maxq)
+{
+       int i;
+
+       lockdep_assert_held(&mvm->queue_info_lock);
+
+       /* This should not be hit with new TX path */
+       if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+               return -ENOSPC;
+
+       /* Start by looking for a free queue */
+       for (i = minq; i <= maxq; i++)
+               if (mvm->queue_info[i].tid_bitmap == 0 &&
+                   mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
+                       return i;
+
+       return -ENOSPC;
+}
+
+static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
+                                  u8 sta_id, u8 tid, unsigned int timeout)
+{
+       int queue, size = IWL_DEFAULT_QUEUE_SIZE;
+
+       if (tid == IWL_MAX_TID_COUNT) {
+               tid = IWL_MGMT_TID;
+               size = IWL_MGMT_QUEUE_SIZE;
+       }
+       queue = iwl_trans_txq_alloc(mvm->trans,
+                                   cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
+                                   sta_id, tid, SCD_QUEUE_CFG, size, timeout);
+
+       if (queue < 0) {
+               IWL_DEBUG_TX_QUEUES(mvm,
+                                   "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
+                                   sta_id, tid, queue);
+               return queue;
+       }
+
+       IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
+                           queue, sta_id, tid);
+
+       mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+       IWL_DEBUG_TX_QUEUES(mvm,
+                           "Enabling TXQ #%d (mac80211 map:0x%x)\n",
+                           queue, mvm->hw_queue_to_mac80211[queue]);
+
+       return queue;
+}
+
 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
                                        struct ieee80211_sta *sta, u8 ac,
                                        int tid)
@@ -704,6 +857,92 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
        return 0;
 }
 
+static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
+                                      int mac80211_queue, u8 sta_id, u8 tid)
+{
+       bool enable_queue = true;
+
+       spin_lock_bh(&mvm->queue_info_lock);
+
+       /* Make sure this TID isn't already enabled */
+       if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
+               spin_unlock_bh(&mvm->queue_info_lock);
+               IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
+                       queue, tid);
+               return false;
+       }
+
+       /* Update mappings and refcounts */
+       if (mvm->queue_info[queue].tid_bitmap)
+               enable_queue = false;
+
+       if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
+               WARN(mac80211_queue >=
+                    BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
+                    "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
+                    mac80211_queue, queue, sta_id, tid);
+               mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+       }
+
+       mvm->queue_info[queue].tid_bitmap |= BIT(tid);
+       mvm->queue_info[queue].ra_sta_id = sta_id;
+
+       if (enable_queue) {
+               if (tid != IWL_MAX_TID_COUNT)
+                       mvm->queue_info[queue].mac80211_ac =
+                               tid_to_mac80211_ac[tid];
+               else
+                       mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
+
+               mvm->queue_info[queue].txq_tid = tid;
+       }
+
+       IWL_DEBUG_TX_QUEUES(mvm,
+                           "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
+                           queue, mvm->queue_info[queue].tid_bitmap,
+                           mvm->hw_queue_to_mac80211[queue]);
+
+       spin_unlock_bh(&mvm->queue_info_lock);
+
+       return enable_queue;
+}
+
+static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
+                              int mac80211_queue, u16 ssn,
+                              const struct iwl_trans_txq_scd_cfg *cfg,
+                              unsigned int wdg_timeout)
+{
+       struct iwl_scd_txq_cfg_cmd cmd = {
+               .scd_queue = queue,
+               .action = SCD_CFG_ENABLE_QUEUE,
+               .window = cfg->frame_limit,
+               .sta_id = cfg->sta_id,
+               .ssn = cpu_to_le16(ssn),
+               .tx_fifo = cfg->fifo,
+               .aggregate = cfg->aggregate,
+               .tid = cfg->tid,
+       };
+       bool inc_ssn;
+
+       if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+               return false;
+
+       /* Send the enabling command if we need to */
+       if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
+                                       cfg->sta_id, cfg->tid))
+               return false;
+
+       inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
+                                          NULL, wdg_timeout);
+       if (inc_ssn)
+               le16_add_cpu(&cmd.ssn, 1);
+
+       WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
+            "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
+
+       return inc_ssn;
+}
+
 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
                                   struct ieee80211_sta *sta, u8 ac, int tid,
                                   struct ieee80211_hdr *hdr)
@@ -1032,6 +1271,198 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
        spin_unlock_bh(&mvm->queue_info_lock);
 }
 
+/*
+ * Remove inactive TIDs of a given queue.
+ * If all queue TIDs are inactive - mark the queue as inactive
+ * If only some the queue TIDs are inactive - unmap them from the queue
+ */
+static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
+                                        struct iwl_mvm_sta *mvmsta, int queue,
+                                        unsigned long tid_bitmap)
+{
+       int tid;
+
+       lockdep_assert_held(&mvmsta->lock);
+       lockdep_assert_held(&mvm->queue_info_lock);
+
+       if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+               return;
+
+       /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
+       for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+               /* If some TFDs are still queued - don't mark TID as inactive */
+               if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
+                       tid_bitmap &= ~BIT(tid);
+
+               /* Don't mark as inactive any TID that has an active BA */
+               if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
+                       tid_bitmap &= ~BIT(tid);
+       }
+
+       /* If all TIDs in the queue are inactive - mark queue as inactive. */
+       if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
+               mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
+
+               for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
+                       mvmsta->tid_data[tid].is_tid_active = false;
+
+               IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
+                                   queue);
+               return;
+       }
+
+       /*
+        * If we are here, this is a shared queue and not all TIDs timed-out.
+        * Remove the ones that did.
+        */
+       for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+               int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
+
+               mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+               mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
+               mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+               mvmsta->tid_data[tid].is_tid_active = false;
+
+               IWL_DEBUG_TX_QUEUES(mvm,
+                                   "Removing inactive TID %d from shared Q:%d\n",
+                                   tid, queue);
+       }
+
+       IWL_DEBUG_TX_QUEUES(mvm,
+                           "TXQ #%d left with tid bitmap 0x%x\n", queue,
+                           mvm->queue_info[queue].tid_bitmap);
+
+       /*
+        * There may be different TIDs with the same mac queues, so make
+        * sure all TIDs have existing corresponding mac queues enabled
+        */
+       tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+       for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+               mvm->hw_queue_to_mac80211[queue] |=
+                       BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
+       }
+
+       /* If the queue is marked as shared - "unshare" it */
+       if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
+           mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
+               mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
+               IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
+                                   queue);
+       }
+}
+
+static void iwl_mvm_reconfigure_queue(struct iwl_mvm *mvm, int queue)
+{
+       bool reconfig;
+       bool change_owner;
+
+       spin_lock_bh(&mvm->queue_info_lock);
+       reconfig = mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING;
+
+       /*
+        * We need to take into account a situation in which a TXQ was
+        * allocated to TID x, and then turned shared by adding TIDs y
+        * and z. If TID x becomes inactive and is removed from the TXQ,
+        * ownership must be given to one of the remaining TIDs.
+        * This is mainly because if TID x continues - a new queue can't
+        * be allocated for it as long as it is an owner of another TXQ.
+        */
+       change_owner = !(mvm->queue_info[queue].tid_bitmap &
+                        BIT(mvm->queue_info[queue].txq_tid)) &&
+                      (mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED);
+       spin_unlock_bh(&mvm->queue_info_lock);
+
+       if (reconfig)
+               iwl_mvm_unshare_queue(mvm, queue);
+       else if (change_owner)
+               iwl_mvm_change_queue_owner(mvm, queue);
+}
+
+static void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
+{
+       unsigned long now = jiffies;
+       int i;
+
+       if (iwl_mvm_has_new_tx_api(mvm))
+               return;
+
+       spin_lock_bh(&mvm->queue_info_lock);
+
+       rcu_read_lock();
+
+       /* we skip the CMD queue below by starting at 1 */
+       BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
+
+       /*
+        * If a queue times out - mark it as INACTIVE (don't remove right away
+        * if we don't have to.) This is an optimization in case traffic comes
+        * later, and we don't HAVE to use a currently-inactive queue
+        */
+       for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
+               struct ieee80211_sta *sta;
+               struct iwl_mvm_sta *mvmsta;
+               u8 sta_id;
+               int tid;
+               unsigned long inactive_tid_bitmap = 0;
+               unsigned long queue_tid_bitmap;
+
+               queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
+               if (!queue_tid_bitmap)
+                       continue;
+
+               /* If TXQ isn't in active use anyway - nothing to do here... */
+               if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
+                   mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
+                       continue;
+
+               /* Check to see if there are inactive TIDs on this queue */
+               for_each_set_bit(tid, &queue_tid_bitmap,
+                                IWL_MAX_TID_COUNT + 1) {
+                       if (time_after(mvm->queue_info[i].last_frame_time[tid] +
+                                      IWL_MVM_DQA_QUEUE_TIMEOUT, now))
+                               continue;
+
+                       inactive_tid_bitmap |= BIT(tid);
+               }
+
+               /* If all TIDs are active - finish check on this queue */
+               if (!inactive_tid_bitmap)
+                       continue;
+
+               /*
+                * If we are here - the queue hadn't been served recently and is
+                * in use
+                */
+
+               sta_id = mvm->queue_info[i].ra_sta_id;
+               sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+               /*
+                * If the STA doesn't exist anymore, it isn't an error. It could
+                * be that it was removed since getting the queues, and in this
+                * case it should've inactivated its queues anyway.
+                */
+               if (IS_ERR_OR_NULL(sta))
+                       continue;
+
+               mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+               /* this isn't so nice, but works OK due to the way we loop */
+               spin_unlock(&mvm->queue_info_lock);
+
+               /* and we need this locking order */
+               spin_lock(&mvmsta->lock);
+               spin_lock(&mvm->queue_info_lock);
+               iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
+                                            inactive_tid_bitmap);
+               /* only unlock sta lock - we still need the queue info lock */
+               spin_unlock(&mvmsta->lock);
+       }
+
+       rcu_read_unlock();
+       spin_unlock_bh(&mvm->queue_info_lock);
+}
+
 static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
 {
        if (tid == IWL_MAX_TID_COUNT)
@@ -1100,7 +1531,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
        struct ieee80211_sta *sta;
        struct iwl_mvm_sta *mvmsta;
        unsigned long deferred_tid_traffic;
-       int queue, sta_id, tid;
+       int sta_id, tid;
 
        /* Check inactivity of queues */
        iwl_mvm_inactivity_check(mvm);
@@ -1108,39 +1539,14 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
        mutex_lock(&mvm->mutex);
 
        /* No queue reconfiguration in TVQM mode */
-       if (iwl_mvm_has_new_tx_api(mvm))
-               goto alloc_queues;
-
-       /* Reconfigure queues requiring reconfiguation */
-       for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
-               bool reconfig;
-               bool change_owner;
-
-               spin_lock_bh(&mvm->queue_info_lock);
-               reconfig = (mvm->queue_info[queue].status ==
-                           IWL_MVM_QUEUE_RECONFIGURING);
-
-               /*
-                * We need to take into account a situation in which a TXQ was
-                * allocated to TID x, and then turned shared by adding TIDs y
-                * and z. If TID x becomes inactive and is removed from the TXQ,
-                * ownership must be given to one of the remaining TIDs.
-                * This is mainly because if TID x continues - a new queue can't
-                * be allocated for it as long as it is an owner of another TXQ.
-                */
-               change_owner = !(mvm->queue_info[queue].tid_bitmap &
-                                BIT(mvm->queue_info[queue].txq_tid)) &&
-                              (mvm->queue_info[queue].status ==
-                               IWL_MVM_QUEUE_SHARED);
-               spin_unlock_bh(&mvm->queue_info_lock);
+       if (!iwl_mvm_has_new_tx_api(mvm)) {
+               int queue;
 
-               if (reconfig)
-                       iwl_mvm_unshare_queue(mvm, queue);
-               else if (change_owner)
-                       iwl_mvm_change_queue_owner(mvm, queue);
+               /* Reconfigure queues requiring reconfiguation */
+               for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++)
+                       iwl_mvm_reconfigure_queue(mvm, queue);
        }
 
-alloc_queues:
        /* Go over all stations with deferred traffic */
        for_each_set_bit(sta_id, mvm->sta_deferred_frames,
                         IWL_MVM_STATION_COUNT) {
@@ -1183,7 +1589,7 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
 
        /* Make sure we have free resources for this STA */
        if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
-           !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
+           !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
            (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
             IWL_MVM_QUEUE_FREE))
                queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;