next_reclaimed);
/*we can free until ssn % q.n_bd not inclusive */
- iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
- ssn, status, &skbs);
+ WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
+ ssn, status, &skbs));
iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;
while (!skb_queue_empty(&skbs)) {
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- if (unlikely(agg->txq_id != scd_flow)) {
- /*
- * FIXME: this is a uCode bug which need to be addressed,
- * log the information and return for now!
- * since it is possible happen very often and in order
- * not to fill the syslog, don't enable the logging by default
- */
- IWL_DEBUG_TX_REPLY(priv,
- "BA scd_flow %d does not match txq_id %d\n",
- scd_flow, agg->txq_id);
+ if (unlikely(!agg->wait_for_ba)) {
+ if (unlikely(ba_resp->bitmap))
+ IWL_ERR(priv, "Received BA when not expected\n");
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
- if (unlikely(!agg->wait_for_ba)) {
- if (unlikely(ba_resp->bitmap))
- IWL_ERR(priv, "Received BA when not expected\n");
+ __skb_queue_head_init(&reclaimed_skbs);
+
+ /* Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway). */
+ if (iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow,
+ ba_resp_scd_ssn, 0, &reclaimed_skbs)) {
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
ba_resp->txed, ba_resp->txed_2_done);
- __skb_queue_head_init(&reclaimed_skbs);
-
- /* Release all TFDs before the SSN, i.e. all TFDs in front of
- * block-ack window (we assume that they've been successfully
- * transmitted ... if not, it's too late anyway). */
priv->shrd->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn;
- iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, ba_resp_scd_ssn,
- 0, &reclaimed_skbs);
+
iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;
while (!skb_queue_empty(&reclaimed_skbs)) {
i, station->sta.sta.addr,
station->sta.station_flags_msk);
pos += scnprintf(buf + pos, bufsz - pos,
- "TID\tseq_num\ttxq_id\trate_n_flags\n");
+ "TID\tseq_num\trate_n_flags\n");
for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
tid_data = &priv->shrd->tid_data[i][j];
pos += scnprintf(buf + pos, bufsz - pos,
- "%d:\t%#x\t%#x\t%#x",
+ "%d:\t%#x\t%#x",
j, tid_data->seq_number,
- tid_data->agg.txq_id,
tid_data->agg.rate_n_flags);
if (tid_data->agg.wait_for_ba)
* Tx response (REPLY_TX), and the block ack notification
* (REPLY_COMPRESSED_BA).
* @state: state of the BA agreement establishment / tear down.
- * @txq_id: Tx queue used by the BA session - used by the transport layer.
- * Needed by the upper layer for debugfs only.
* @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
* the first packet to be sent in legacy HW queue in Tx AGG stop flow.
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that
struct iwl_ht_agg {
u32 rate_n_flags;
enum iwl_agg_state state;
- u16 txq_id;
u16 ssn;
bool wait_for_ba;
};
const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
u8 mcast_queue[NUM_IWL_RXON_CTX];
+ u8 agg_txq[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
struct iwl_tx_queue *txq;
unsigned long txq_ctx_active_msk;
return -EINVAL;
}
+static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id)
+{
+ if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE)
+ return false;
+ return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE +
+ hw_params(trans).num_ampdu_queues);
+}
+
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx, int sta_id,
int tid, int frame_limit, u16 ssn)
return;
}
- txq_id = trans->shrd->tid_data[sta_id][tid].agg.txq_id;
+ txq_id = trans_pcie->agg_txq[sta_id][tid];
+ if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
+ IWL_ERR(trans,
+ "queue number out of range: %d, must be %d to %d\n",
+ txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
+ IWLAGN_FIRST_AMPDU_QUEUE +
+ hw_params(trans).num_ampdu_queues - 1);
+ return;
+ }
ra_tid = BUILD_RAxTID(sta_id, tid);
return -ENXIO;
}
- trans->shrd->tid_data[sta_id][tid].agg.txq_id = txq_id;
+ trans_pcie->agg_txq[sta_id][tid] = txq_id;
iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
return 0;
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- /* TODO: the transport layer shouldn't access the tid_data */
- int txq_id = trans->shrd->tid_data[sta_id][tid].agg.txq_id;
+ u8 txq_id = trans_pcie->agg_txq[sta_id][tid];
- if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWLAGN_FIRST_AMPDU_QUEUE +
- hw_params(trans).num_ampdu_queues <= txq_id)) {
+ if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
IWL_ERR(trans,
"queue number out of range: %d, must be %d to %d\n",
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
+ trans_pcie->agg_txq[sta_id][tid] = 0;
trans_pcie->txq[txq_id].q.read_ptr = 0;
trans_pcie->txq[txq_id].q.write_ptr = 0;
/* supposes that ssn_idx is valid (!= 0xFFF) */
info->flags, tid_data->agg.state);
IWL_ERR(trans, "sta_id = %d, tid = %d "
"txq_id = %d, seq_num = %d", sta_id,
- tid, tid_data->agg.txq_id,
+ tid, trans_pcie->agg_txq[sta_id][tid],
SEQ_TO_SN(seq_number));
}
- txq_id = tid_data->agg.txq_id;
+ txq_id = trans_pcie->agg_txq[sta_id][tid];
is_agg = true;
}
seq_number += 0x10;
return 0;
}
-static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
+static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
int txq_id, int ssn, u32 status,
struct sk_buff_head *skbs)
{
txq->time_stamp = jiffies;
+ if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
+ txq_id != trans_pcie->agg_txq[sta_id][tid])) {
+ /*
+ * FIXME: this is a uCode bug which need to be addressed,
+ * log the information and return for now.
+ * Since it is can possibly happen very often and in order
+ * not to fill the syslog, don't use IWL_ERR or IWL_WARN
+ */
+ IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
+ "agg_txq[sta_id[tid] %d", txq_id,
+ trans_pcie->agg_txq[sta_id][tid]);
+ return 1;
+ }
+
if (txq->q.read_ptr != tfd_num) {
IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n",
txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr,
status != TX_STATUS_FAIL_PASSIVE_NO_RX))
iwl_wake_queue(trans, txq, "Packets reclaimed");
}
+ return 0;
}
static void iwl_trans_pcie_free(struct iwl_trans *trans)
int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
u8 sta_id);
- void (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
+ int (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
int txq_id, int ssn, u32 status,
struct sk_buff_head *skbs);
return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id);
}
-static inline void iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
+static inline int iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
int tid, int txq_id, int ssn, u32 status,
struct sk_buff_head *skbs)
{
- trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, status, skbs);
+ return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn,
+ status, skbs);
}
static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,