return -1;
}
+int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_tid_data *tid_data;
+ unsigned long flags;
+ int sta_id;
+
+ sta_id = iwl_sta_id(sta);
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+
+ tid_data = &priv->shrd->tid_data[sta_id][tid];
+
+ switch (priv->shrd->tid_data[sta_id][tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /*
+ * This can happen if the peer stops aggregation
+ * again before we've had a chance to drain the
+ * queue we selected previously, i.e. before the
+ * session was really started completely.
+ */
+ IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
+ goto turn_off;
+ case IWL_AGG_ON:
+ break;
+ default:
+ IWL_WARN(priv, "Stopping AGG while state not ON "
+ "or starting for %d on %d (%d)\n", sta_id, tid,
+ priv->shrd->tid_data[sta_id][tid].agg.state);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return 0;
+ }
+
+ tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
+
+ /* There are still packets for this RA / TID in the HW */
+ if (tid_data->agg.ssn != tid_data->next_reclaimed) {
+ IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
+ "next_recl = %d",
+ tid_data->agg.ssn,
+ tid_data->next_reclaimed);
+ priv->shrd->tid_data[sta_id][tid].agg.state =
+ IWL_EMPTYING_HW_QUEUE_DELBA;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return 0;
+ }
+
+ IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
+ tid_data->agg.ssn);
+turn_off:
+ priv->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
+
+ /* do not restore/save irqs */
+ spin_unlock(&priv->shrd->sta_lock);
+ spin_lock(&priv->shrd->lock);
+
+ iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);
+
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
+
+ iwl_stop_tx_ba_trans_ready(priv, vif_priv->ctx->ctxid, sta_id, tid);
+
+ return 0;
+}
+
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
return ret;
}
-int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid)
-{
- int sta_id;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
- sta_id = iwl_sta_id(sta);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
- return -ENXIO;
- }
-
- return iwl_trans_tx_agg_disable(trans(priv), vif_priv->ctx->ctxid,
- sta_id, tid);
-}
-
static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
const u8 *addr1)
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
u16 byte_cnt);
-void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id);
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid);
+ int sta_id, int tid);
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
return 0;
}
-void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
+int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- iwlagn_tx_queue_stop_scheduler(trans, txq_id);
-
- iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
-
- trans_pcie->txq[txq_id].q.read_ptr = 0;
- trans_pcie->txq[txq_id].q.write_ptr = 0;
- /* supposes that ssn_idx is valid (!= 0xFFF) */
- iwl_trans_set_wr_ptrs(trans, txq_id, 0);
-
- iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
- iwl_txq_ctx_deactivate(trans_pcie, txq_id);
- iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
-}
-
-int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid)
-{
- struct iwl_tid_data *tid_data;
- unsigned long flags;
- int txq_id;
-
- spin_lock_irqsave(&trans->shrd->sta_lock, flags);
-
- tid_data = &trans->shrd->tid_data[sta_id][tid];
- txq_id = tid_data->agg.txq_id;
+ /* TODO: the transport layer shouldn't access the tid_data */
+ int txq_id = trans->shrd->tid_data[sta_id][tid].agg.txq_id;
if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
(IWLAGN_FIRST_AMPDU_QUEUE +
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
IWLAGN_FIRST_AMPDU_QUEUE +
hw_params(trans).num_ampdu_queues - 1);
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
return -EINVAL;
}
- switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /*
- * This can happen if the peer stops aggregation
- * again before we've had a chance to drain the
- * queue we selected previously, i.e. before the
- * session was really started completely.
- */
- IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
- goto turn_off;
- case IWL_AGG_ON:
- break;
- default:
- IWL_WARN(trans, "Stopping AGG while state not ON "
- "or starting for %d on %d (%d)\n", sta_id, tid,
- trans->shrd->tid_data[sta_id][tid].agg.state);
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
- return 0;
- }
-
- tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
-
- /* There are still packets for this RA / TID in the HW */
- if (tid_data->agg.ssn != tid_data->next_reclaimed) {
- IWL_DEBUG_TX_QUEUES(trans, "Can't proceed: ssn %d, "
- "next_recl = %d",
- tid_data->agg.ssn,
- tid_data->next_reclaimed);
- trans->shrd->tid_data[sta_id][tid].agg.state =
- IWL_EMPTYING_HW_QUEUE_DELBA;
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
- return 0;
- }
-
- IWL_DEBUG_TX_QUEUES(trans, "Can proceed: ssn = next_recl = %d",
- tid_data->agg.ssn);
-turn_off:
- trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
-
- /* do not restore/save irqs */
- spin_unlock(&trans->shrd->sta_lock);
- spin_lock(&trans->shrd->lock);
-
- iwl_trans_pcie_txq_agg_disable(trans, txq_id);
+ iwlagn_tx_queue_stop_scheduler(trans, txq_id);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
- iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
+ trans_pcie->txq[txq_id].q.read_ptr = 0;
+ trans_pcie->txq[txq_id].q.write_ptr = 0;
+ /* supposes that ssn_idx is valid (!= 0xFFF) */
+ iwl_trans_set_wr_ptrs(trans, txq_id, 0);
+ iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
+ iwl_txq_ctx_deactivate(trans_pcie, txq_id);
+ iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
return 0;
}
IWL_DEBUG_TX_QUEUES(trans,
"Can continue DELBA flow ssn = next_recl ="
" %d", tid_data->next_reclaimed);
- iwl_trans_pcie_txq_agg_disable(trans, txq_id);
+ iwl_trans_pcie_tx_agg_disable(trans, sta_id, tid);
tid_data->agg.state = IWL_AGG_OFF;
iwl_stop_tx_ba_trans_ready(priv(trans),
NUM_IWL_RXON_CTX,
struct sk_buff_head *skbs);
int (*tx_agg_disable)(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid);
+ int sta_id, int tid);
int (*tx_agg_alloc)(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx, int sta_id, int tid,
u16 *ssn);
}
static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx,
int sta_id, int tid)
{
- return trans->ops->tx_agg_disable(trans, ctx, sta_id, tid);
+ return trans->ops->tx_agg_disable(trans, sta_id, tid);
}
static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,