* @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in
* probe requests.
* @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
+ * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
+ * which also implies support for the scheduler configuration command
*/
enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9),
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
+ IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
};
/* The default calibrate table size if not specified by firmware file */
TXPATH_FLUSH = 0x1e,
MGMT_MCAST_KEY = 0x1f,
+ /* scheduler config */
+ SCD_QUEUE_CFG = 0x1d,
+
/* global key */
WEP_KEY = 0x20,
__le32 voltage;
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */
+/**
+ * enum iwl_scd_control - scheduler config command control flags
+ * @IWL_SCD_CONTROL_RM_TID: remove TID from this queue
+ * @IWL_SCD_CONTROL_SET_SSN: use the SSN and program it into HW
+ */
+enum iwl_scd_control {
+ IWL_SCD_CONTROL_RM_TID = BIT(4),
+ IWL_SCD_CONTROL_SET_SSN = BIT(5),
+};
+
+/**
+ * enum iwl_scd_flags - scheduler config command flags
+ * @IWL_SCD_FLAGS_SHARE_TID: multiple TIDs map to this queue
+ * @IWL_SCD_FLAGS_SHARE_RA: multiple RAs map to this queue
+ * @IWL_SCD_FLAGS_DQA_ENABLED: DQA is enabled
+ */
+enum iwl_scd_flags {
+ IWL_SCD_FLAGS_SHARE_TID = BIT(0),
+ IWL_SCD_FLAGS_SHARE_RA = BIT(1),
+ IWL_SCD_FLAGS_DQA_ENABLED = BIT(2),
+};
+
+#define IWL_SCDQ_INVALID_STA 0xff
+
+/**
+ * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
+ * @token: dialog token addba - unused legacy
+ * @sta_id: station id 4-bit
+ * @tid: TID 0..7
+ * @scd_queue: TFD queue num 0 .. 31
+ * @enable: 1 queue enable, 0 queue disable
+ * @aggregate: 1 aggregated queue, 0 otherwise
+ * @tx_fifo: tx fifo num 0..7
+ * @window: up to 64
+ * @ssn: starting seq num 12-bit
+ * @control: command control flags
+ * @flags: flags - see &enum iwl_scd_flags
+ *
+ * Note that every time the command is sent, all parameters must
+ * be filled with the exception of
+ * - the SSN, which is only used with @IWL_SCD_CONTROL_SET_SSN
+ * - the window, which is only relevant when starting aggregation
+ */
+struct iwl_scd_txq_cfg_cmd {
+ u8 token;
+ u8 sta_id;
+ u8 tid;
+ u8 scd_queue;
+ u8 enable;
+ u8 aggregate;
+ u8 tx_fifo;
+ u8 window;
+ __le16 ssn;
+ u8 control;
+ u8 flags;
+} __packed;
+
#endif /* __fw_api_h__ */
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
- iwl_trans_ac_txq_enable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE,
- IWL_MVM_TX_FIFO_VO);
+ iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
+ IWL_MVM_TX_FIFO_VO);
break;
case NL80211_IFTYPE_AP:
- iwl_trans_ac_txq_enable(mvm->trans, vif->cab_queue,
- IWL_MVM_TX_FIFO_MCAST);
+ iwl_mvm_enable_ac_txq(mvm, vif->cab_queue,
+ IWL_MVM_TX_FIFO_MCAST);
/* fall through */
default:
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
- iwl_trans_ac_txq_enable(mvm->trans, vif->hw_queue[ac],
- iwl_mvm_ac_to_tx_fifo[ac]);
+ iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
+ iwl_mvm_ac_to_tx_fifo[ac]);
break;
}
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
- iwl_trans_txq_disable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE,
- true);
+ iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE);
break;
case NL80211_IFTYPE_AP:
- iwl_trans_txq_disable(mvm->trans, vif->cab_queue, true);
+ iwl_mvm_disable_txq(mvm, vif->cab_queue);
/* fall through */
default:
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
- iwl_trans_txq_disable(mvm->trans, vif->hw_queue[ac],
- true);
+ iwl_mvm_disable_txq(mvm, vif->hw_queue[ac]);
}
}
(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
}
+static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
+{
+ return mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_DQA_SUPPORT;
+}
+
extern const u8 iwl_mvm_ac_to_tx_fifo[];
struct iwl_rate_info {
return mvmvif->low_latency;
}
+/* hw scheduler queue config */
+void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
+ const struct iwl_trans_txq_scd_cfg *cfg);
+void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue);
+
+static inline void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue,
+ u8 fifo)
+{
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = fifo,
+ .tid = IWL_MAX_TID_COUNT,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+
+ iwl_mvm_enable_txq(mvm, queue, 0, &cfg);
+}
+
+static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
+ int fifo, int sta_id, int tid,
+ int frame_limit, u16 ssn)
+{
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = fifo,
+ .sta_id = sta_id,
+ .tid = tid,
+ .frame_limit = frame_limit,
+ .aggregate = true,
+ };
+
+ iwl_mvm_enable_txq(mvm, queue, ssn, &cfg);
+}
+
/* Assoc status */
bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
CMD(BT_COEX_UPDATE_REDUCED_TXP),
CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
CMD(ANTENNA_COUPLING_NOTIFICATION),
+ CMD(SCD_QUEUE_CFG),
};
#undef CMD
lockdep_assert_held(&mvm->mutex);
/* Map Aux queue to fifo - needs to happen before adding Aux station */
- iwl_trans_ac_txq_enable(mvm->trans, mvm->aux_queue,
- IWL_MVM_TX_FIFO_MCAST);
+ iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue,
+ IWL_MVM_TX_FIFO_MCAST);
/* Allocate aux station and assign to it the aux queue */
ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
if (ret)
return -EIO;
- iwl_trans_txq_enable(mvm->trans, queue, fifo, mvmsta->sta_id, tid,
- buf_size, ssn);
+ iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
+ buf_size, ssn);
/*
* Even though in theory the peer could have different
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
- iwl_trans_txq_disable(mvm->trans, txq_id, true);
+ iwl_mvm_disable_txq(mvm, txq_id);
return 0;
case IWL_AGG_STARTING:
case IWL_EMPTYING_HW_QUEUE_ADDBA:
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
- iwl_trans_txq_disable(mvm->trans, tid_data->txq_id, true);
+ iwl_mvm_disable_txq(mvm, tid_data->txq_id);
}
mvm->queue_to_mac80211[tid_data->txq_id] =
IWL_DEBUG_TX_QUEUES(mvm,
"Can continue DELBA flow ssn = next_recl = %d\n",
tid_data->next_reclaimed);
- iwl_trans_txq_disable(mvm->trans, tid_data->txq_id, true);
+ iwl_mvm_disable_txq(mvm, tid_data->txq_id);
tid_data->state = IWL_AGG_OFF;
/*
* we can't hold the mutex - but since we are after a sequence
- * point (call to iwl_trans_txq_disable), so we don't even need
+ * point (call to iwl_mvm_disable_txq(), so we don't even need
* a memory barrier.
*/
mvm->queue_to_mac80211[tid_data->txq_id] =
iwl_mvm_dump_umac_error_log(mvm);
}
+void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
+ const struct iwl_trans_txq_scd_cfg *cfg)
+{
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .enable = 1,
+ .window = cfg->frame_limit,
+ .sta_id = cfg->sta_id,
+ .ssn = cpu_to_le16(ssn),
+ .tx_fifo = cfg->fifo,
+ .aggregate = cfg->aggregate,
+ .flags = IWL_SCD_FLAGS_DQA_ENABLED,
+ .tid = cfg->tid,
+ .control = IWL_SCD_CONTROL_SET_SSN,
+ };
+ int ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
+ sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm,
+ "Failed to configure queue %d on FIFO %d\n",
+ queue, cfg->fifo);
+ }
+
+ iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
+ iwl_mvm_is_dqa_supported(mvm) ? NULL : cfg);
+}
+
+void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue)
+{
+ iwl_trans_txq_disable(mvm->trans, queue,
+ !iwl_mvm_is_dqa_supported(mvm));
+
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .enable = 0,
+ };
+ int ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, CMD_ASYNC,
+ sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
+ queue, ret);
+ }
+}
+
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
* @init: This command is sent as part of station initialization right