iwlwifi: mvm: support new flush API
authorMordechai Goodstein <mordechay.goodstein@intel.com>
Wed, 10 May 2017 13:42:53 +0000 (16:42 +0300)
committerLuca Coelho <luciano.coelho@intel.com>
Fri, 23 Jun 2017 09:19:33 +0000 (12:19 +0300)
This new API allows flushing queues based on station ID and TID in A000
devices.  One reason for using this is that tfd_queue_mask is only good
for 32 queues, which is not enough for A000 devices.

Signed-off-by: Sara Sharon <sara.sharon@intel.com>
Signed-off-by: Mordechai Goodstein <mordechay.goodstein@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c

index 744dc069ff23a9dbcf4fb61416da2dd1766e5064..c2a1aeef74eceaebf56d5ede512649964a9d1553 100644 (file)
@@ -119,19 +119,30 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
                                        size_t count, loff_t *ppos)
 {
        int ret;
-       u32 scd_q_msk;
+       u32 flush_arg;
 
        if (!iwl_mvm_firmware_running(mvm) ||
            mvm->cur_ucode != IWL_UCODE_REGULAR)
                return -EIO;
 
-       if (sscanf(buf, "%x", &scd_q_msk) != 1)
+       if (kstrtou32(buf, 0, &flush_arg))
                return -EINVAL;
 
-       IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk);
+       if (iwl_mvm_has_new_tx_api(mvm)) {
+               IWL_DEBUG_TX_QUEUES(mvm,
+                                   "FLUSHING all tids queues on sta_id = %d\n",
+                                   flush_arg);
+               mutex_lock(&mvm->mutex);
+               ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFF, 0) ? : count;
+               mutex_unlock(&mvm->mutex);
+               return ret;
+       }
+
+       IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING queues mask to flush = 0x%x\n",
+                           flush_arg);
 
        mutex_lock(&mvm->mutex);
-       ret =  iwl_mvm_flush_tx_path(mvm, scd_q_msk, 0) ? : count;
+       ret =  iwl_mvm_flush_tx_path(mvm, flush_arg, 0) ? : count;
        mutex_unlock(&mvm->mutex);
 
        return ret;
index 0562ce4062492dd007b84e21cee21ba02fa33f77..a37c584b0b48562218ac6e9c8b2c1516afcd0681 100644 (file)
@@ -846,12 +846,24 @@ enum iwl_dump_control {
  * @flush_ctl: control flags
  * @reserved: reserved
  */
-struct iwl_tx_path_flush_cmd {
+struct iwl_tx_path_flush_cmd_v1 {
        __le32 queues_ctl;
        __le16 flush_ctl;
        __le16 reserved;
 } __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */
 
+/**
+ * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
+ * @sta_id: station ID to flush
+ * @tid_mask: TID mask to flush
+ * @reserved: reserved
+ */
+struct iwl_tx_path_flush_cmd {
+       __le32 sta_id;
+       __le16 tid_mask;
+       __le16 reserved;
+} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_2 */
+
 /* Available options for the SCD_QUEUE_CFG HCMD */
 enum iwl_scd_cfg_actions {
        SCD_CFG_DISABLE_QUEUE           = 0x0,
index 7171928872de57991671b85692fbc8aa7dab32fe..3b1f15873034a5b594d135d2f34c460e504362a5 100644 (file)
@@ -1385,6 +1385,8 @@ static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
 #endif
 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
 int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags);
+int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
+                          u16 tids, u32 flags);
 
 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
 
index fc2c607013fb250bf0909a2b193e2851802d4957..2e4bfe9f07ecb2fe0b25dbaaad65f89ad5f83afc 100644 (file)
@@ -1477,9 +1477,15 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
        synchronize_net();
 
        /* Flush the hw queues, in case something got queued during entry */
-       ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm), flags);
-       if (ret)
-               return ret;
+       /* TODO new tx api */
+       if (iwl_mvm_has_new_tx_api(mvm)) {
+               WARN_ONCE(1, "d0i3: Need to implement flush TX queue\n");
+       } else {
+               ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm),
+                                           flags);
+               if (ret)
+                       return ret;
+       }
 
        /* configure wowlan configuration only if needed */
        if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
index aa41ee8ed9163b7538dfe391dd5f8c68dd1c720e..02f35a929606138d16817e79804ea2ecb317e678 100644 (file)
@@ -734,7 +734,6 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
        spin_lock_bh(&mvmsta->lock);
        mvmsta->tid_data[tid].txq_id = queue;
        mvmsta->tid_data[tid].is_tid_active = true;
-       mvmsta->tfd_queue_msk |= BIT(queue);
        spin_unlock_bh(&mvmsta->lock);
 
        return 0;
@@ -2004,8 +2003,6 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                        mvm->probe_queue = queue;
                else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
                        mvm->p2p_dev_queue = queue;
-
-               bsta->tfd_queue_msk |= BIT(queue);
        }
 
        return 0;
@@ -2015,29 +2012,32 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
                                          struct ieee80211_vif *vif)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       int queue;
 
        lockdep_assert_held(&mvm->mutex);
 
        iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
 
-       if (vif->type == NL80211_IFTYPE_AP ||
-           vif->type == NL80211_IFTYPE_ADHOC)
-               iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
-                                   IWL_MAX_TID_COUNT, 0);
-
-       if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->probe_queue)) {
-               iwl_mvm_disable_txq(mvm, mvm->probe_queue,
-                                   vif->hw_queue[0], IWL_MAX_TID_COUNT,
-                                   0);
-               mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->probe_queue);
+       switch (vif->type) {
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_ADHOC:
+               queue = mvm->probe_queue;
+               break;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               queue = mvm->p2p_dev_queue;
+               break;
+       default:
+               WARN(1, "Can't free bcast queue on vif type %d\n",
+                    vif->type);
+               return;
        }
 
-       if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->p2p_dev_queue)) {
-               iwl_mvm_disable_txq(mvm, mvm->p2p_dev_queue,
-                                   vif->hw_queue[0], IWL_MAX_TID_COUNT,
-                                   0);
-               mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->p2p_dev_queue);
-       }
+       iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
+       if (iwl_mvm_has_new_tx_api(mvm))
+               return;
+
+       WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
+       mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
 }
 
 /* Send the FW a request to remove the station from it's internal data
@@ -2913,14 +2913,17 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        if (old_state >= IWL_AGG_ON) {
                iwl_mvm_drain_sta(mvm, mvmsta, true);
-               if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
-                       IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
 
-               if (iwl_mvm_has_new_tx_api(mvm))
+               if (iwl_mvm_has_new_tx_api(mvm)) {
+                       if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
+                                                  BIT(tid), 0))
+                               IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
                        iwl_trans_wait_txq_empty(mvm->trans, txq_id);
-
-               else
+               } else {
+                       if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
+                               IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
                        iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
+               }
 
                iwl_mvm_drain_sta(mvm, mvmsta, false);
 
index 02f293b711fdce9b4afb1f1308bbb5ca8817f2f6..157a75394763af786706df0c56f2bd223202f835 100644 (file)
@@ -1902,11 +1902,13 @@ out:
 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
 {
        int ret;
-       struct iwl_tx_path_flush_cmd flush_cmd = {
+       struct iwl_tx_path_flush_cmd_v1 flush_cmd = {
                .queues_ctl = cpu_to_le32(tfd_msk),
                .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
        };
 
+       WARN_ON(iwl_mvm_has_new_tx_api(mvm));
+
        ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
                                   sizeof(flush_cmd), &flush_cmd);
        if (ret)
@@ -1914,19 +1916,41 @@ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
        return ret;
 }
 
-int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
+int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
+                          u16 tids, u32 flags)
 {
-       u32 mask;
+       int ret;
+       struct iwl_tx_path_flush_cmd flush_cmd = {
+               .sta_id = cpu_to_le32(sta_id),
+               .tid_mask = cpu_to_le16(tids),
+       };
 
-       if (internal) {
-               struct iwl_mvm_int_sta *int_sta = sta;
+       WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
 
-               mask = int_sta->tfd_queue_msk;
-       } else {
-               struct iwl_mvm_sta *mvm_sta = sta;
+       ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
+                                  sizeof(flush_cmd), &flush_cmd);
+       if (ret)
+               IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
+       return ret;
+}
 
-               mask = mvm_sta->tfd_queue_msk;
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
+{
+       struct iwl_mvm_int_sta *int_sta = sta;
+       struct iwl_mvm_sta *mvm_sta = sta;
+
+       if (iwl_mvm_has_new_tx_api(mvm)) {
+               if (internal)
+                       return iwl_mvm_flush_sta_tids(mvm, int_sta->sta_id,
+                                                     BIT(IWL_MGMT_TID), flags);
+
+               return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
+                                             0xFF, flags);
        }
 
-       return iwl_mvm_flush_tx_path(mvm, mask, flags);
+       if (internal)
+               return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
+                                            flags);
+
+       return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, flags);
 }