u32 tbs;
};
-/*
- * Generic queue structure
- *
- * Contains common data for Rx and Tx queues.
- *
- * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
- * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
- * there might be HW changes in the future). For the normal TX
- * queues, n_window, which is the size of the software queue data
- * is also 256; however, for the command queue, n_window is only
- * 32 since we don't need so many commands pending. Since the HW
- * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
- * the software buffers (in the variables @meta, @txb in struct
- * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
- * the same struct) have 256.
- * This means that we end up with the following:
- * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
- * SW entries: | 0 | ... | 31 |
- * where N is a number between 0 and 7. This means that the SW
- * data is a window overlayed over the HW queue.
- */
-struct iwl_queue {
- int write_ptr; /* 1-st empty entry (index) host_w*/
- int read_ptr; /* last used entry (index) host_r*/
- /* use for monitoring and recovering the stuck queue */
- dma_addr_t dma_addr; /* physical addr for BD's */
- int n_window; /* safe queue window */
- u32 id;
- int low_mark; /* low watermark, resume queue if free
- * space more than this */
- int high_mark; /* high watermark, stop queue if free
- * space less than this */
-};
#define TFD_TX_CMD_SLOTS 256
#define TFD_CMD_SLOTS 32
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
* @frozen: tx stuck queue timer is frozen
* @frozen_expiry_remainder: remember how long until the timer fires
+ * @write_ptr: 1-st empty entry (index) host_w
+ * @read_ptr: last used entry (index) host_r
+ * @dma_addr: physical addr for BD's
+ * @n_window: safe queue window
+ * @id: queue id
+ * @low_mark: low watermark, resume queue if free space more than this
+ * @high_mark: high watermark, stop queue if free space less than this
*
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
+ *
+ * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
+ * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
+ * there might be HW changes in the future). For the normal TX
+ * queues, n_window, which is the size of the software queue data
+ * is also 256; however, for the command queue, n_window is only
+ * 32 since we don't need so many commands pending. Since the HW
+ * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
+ * This means that we end up with the following:
+ * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
+ * SW entries: | 0 | ... | 31 |
+ * where N is a number between 0 and 7. This means that the SW
+ * data is a window overlayed over the HW queue.
*/
struct iwl_txq {
- struct iwl_queue q;
void *tfds;
struct iwl_pcie_first_tb_buf *first_tb_bufs;
dma_addr_t first_tb_dma;
bool block;
unsigned long wd_timeout;
struct sk_buff_head overflow_q;
+
+ int write_ptr;
+ int read_ptr;
+ dma_addr_t dma_addr;
+ int n_window;
+ u32 id;
+ int low_mark;
+ int high_mark;
};
static inline dma_addr_t
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
- IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
- iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
+ if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
+ IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
+ iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
}
}
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
- iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
- IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
+ if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
+ iwl_op_mode_queue_full(trans->op_mode, txq->id);
+ IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
} else
IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
- txq->q.id);
+ txq->id);
}
-static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
+static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
{
return q->write_ptr >= q->read_ptr ?
(i >= q->read_ptr && i < q->write_ptr) :
!(i < q->read_ptr && i >= q->write_ptr);
}
-static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
+static inline u8 get_cmd_index(struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
}
*
***************************************************/
-static int iwl_queue_space(const struct iwl_queue *q)
+static int iwl_queue_space(const struct iwl_txq *q)
{
unsigned int max;
unsigned int used;
/*
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
*/
-static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
+static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id)
{
q->n_window = slots_num;
q->id = id;
spin_lock(&txq->lock);
/* check if triggered erroneously */
- if (txq->q.read_ptr == txq->q.write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
spin_unlock(&txq->lock);
return;
}
spin_unlock(&txq->lock);
- IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
+ IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->id,
jiffies_to_msecs(txq->wd_timeout));
iwl_trans_pcie_log_scd_error(trans, txq);
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int write_ptr = txq->q.write_ptr;
- int txq_id = txq->q.id;
+ int write_ptr = txq->write_ptr;
+ int txq_id = txq->id;
u8 sec_ctl = 0;
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent;
struct iwl_tx_cmd *tx_cmd =
- (void *) txq->entries[txq->q.write_ptr].cmd->payload;
+ (void *)txq->entries[txq->write_ptr].cmd->payload;
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
- int txq_id = txq->q.id;
- int read_ptr = txq->q.read_ptr;
+ int txq_id = txq->id;
+ int read_ptr = txq->read_ptr;
u8 sta_id = 0;
__le16 bc_ent;
struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[txq->q.read_ptr].cmd->payload;
+ (void *)txq->entries[read_ptr].cmd->payload;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
- int txq_id = txq->q.id;
+ int txq_id = txq->id;
lockdep_assert_held(&txq->lock);
* if not in power-save mode, uCode will never sleep when we're
* trying to tx (during RFKILL, we're not trying to tx).
*/
- IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
if (!txq->block)
iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
+ txq->write_ptr | (txq_id << 8));
}
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
* idx is bounded by n_window
*/
- int rd_ptr = txq->q.read_ptr;
- int idx = get_cmd_index(&txq->q, rd_ptr);
+ int rd_ptr = txq->read_ptr;
+ int idx = get_cmd_index(txq, rd_ptr);
lockdep_assert_held(&txq->lock);
dma_addr_t addr, u16 len, bool reset)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_queue *q;
void *tfd;
u32 num_tbs;
- q = &txq->q;
- tfd = txq->tfds + trans_pcie->tfd_size * q->write_ptr;
+ tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr;
if (reset)
memset(tfd, 0, trans_pcie->tfd_size);
(unsigned long)txq);
txq->trans_pcie = trans_pcie;
- txq->q.n_window = slots_num;
+ txq->n_window = slots_num;
txq->entries = kcalloc(slots_num,
sizeof(struct iwl_pcie_txq_entry),
/* Circular buffer of transmit frame descriptors (TFDs),
* shared with device */
txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
- &txq->q.dma_addr, GFP_KERNEL);
+ &txq->dma_addr, GFP_KERNEL);
if (!txq->tfds)
goto error;
if (!txq->first_tb_bufs)
goto err_free_tfds;
- txq->q.id = txq_id;
+ txq->id = txq_id;
return 0;
err_free_tfds:
- dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
+ dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
error:
if (txq->entries && txq_id == trans_pcie->cmd_queue)
for (i = 0; i < slots_num; i++)
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
/* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(&txq->q, slots_num, txq_id);
+ ret = iwl_queue_init(txq, slots_num, txq_id);
if (ret)
return ret;
if (trans->cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr);
+ txq->dma_addr);
else
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr >> 8);
+ txq->dma_addr >> 8);
return 0;
}
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
- struct iwl_queue *q = &txq->q;
spin_lock_bh(&txq->lock);
- while (q->write_ptr != q->read_ptr) {
+ while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
- txq_id, q->read_ptr);
+ txq_id, txq->read_ptr);
if (txq_id != trans_pcie->cmd_queue) {
- struct sk_buff *skb = txq->entries[q->read_ptr].skb;
+ struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_txq_free_tfd(trans, txq);
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
- if (q->read_ptr == q->write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
if (txq_id != trans_pcie->cmd_queue) {
IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
- q->id);
+ txq->id);
iwl_trans_unref(trans);
} else {
iwl_pcie_clear_cmd_in_flight(trans);
/* De-alloc array of command/tx buffers */
if (txq_id == trans_pcie->cmd_queue)
- for (i = 0; i < txq->q.n_window; i++) {
+ for (i = 0; i < txq->n_window; i++) {
kzfree(txq->entries[i].cmd);
kzfree(txq->entries[i].free_buf);
}
if (txq->tfds) {
dma_free_coherent(dev,
trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
- txq->tfds, txq->q.dma_addr);
- txq->q.dma_addr = 0;
+ txq->tfds, txq->dma_addr);
+ txq->dma_addr = 0;
txq->tfds = NULL;
dma_free_coherent(dev,
- sizeof(*txq->first_tb_bufs) * txq->q.n_window,
+ sizeof(*txq->first_tb_bufs) * txq->n_window,
txq->first_tb_bufs, txq->first_tb_dma);
}
if (trans->cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr);
+ txq->dma_addr);
else
iwl_write_direct32(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr >> 8);
+ txq->dma_addr >> 8);
iwl_pcie_txq_unmap(trans, txq_id);
- txq->q.read_ptr = 0;
- txq->q.write_ptr = 0;
+ txq->read_ptr = 0;
+ txq->write_ptr = 0;
}
/* Tell NIC where to find the "keep warm" buffer */
* if empty delete timer, otherwise move timer forward
* since we're making progress on this queue
*/
- if (txq->q.read_ptr == txq->q.write_ptr)
+ if (txq->read_ptr == txq->write_ptr)
del_timer(&txq->stuck_timer);
else
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
- struct iwl_queue *q = &txq->q;
int last_to_free;
/* This function is not meant to release cmd queue*/
goto out;
}
- if (txq->q.read_ptr == tfd_num)
+ if (txq->read_ptr == tfd_num)
goto out;
IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
- txq_id, txq->q.read_ptr, tfd_num, ssn);
+ txq_id, txq->read_ptr, tfd_num, ssn);
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
last_to_free = iwl_queue_dec_wrap(tfd_num);
- if (!iwl_queue_used(q, last_to_free)) {
+ if (!iwl_queue_used(txq, last_to_free)) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
- q->write_ptr, q->read_ptr);
+ txq->write_ptr, txq->read_ptr);
goto out;
}
goto out;
for (;
- q->read_ptr != tfd_num;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
- struct sk_buff *skb = txq->entries[txq->q.read_ptr].skb;
+ txq->read_ptr != tfd_num;
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
+ struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
__skb_queue_tail(skbs, skb);
- txq->entries[txq->q.read_ptr].skb = NULL;
+ txq->entries[txq->read_ptr].skb = NULL;
if (!trans->cfg->use_tfh)
iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
iwl_pcie_txq_progress(txq);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
+ if (iwl_queue_space(txq) > txq->low_mark &&
test_bit(txq_id, trans_pcie->queue_stopped)) {
struct sk_buff_head overflow_skbs;
}
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+ if (iwl_queue_space(txq) > txq->low_mark)
iwl_wake_queue(trans, txq);
}
- if (q->read_ptr == q->write_ptr) {
- IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
+ if (txq->read_ptr == txq->write_ptr) {
+ IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id);
iwl_trans_unref(trans);
}
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
- struct iwl_queue *q = &txq->q;
unsigned long flags;
int nfreed = 0;
lockdep_assert_held(&txq->lock);
- if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
+ if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
- q->write_ptr, q->read_ptr);
+ txq->write_ptr, txq->read_ptr);
return;
}
- for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
+ for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx;
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
- idx, q->write_ptr, q->read_ptr);
+ idx, txq->write_ptr, txq->read_ptr);
iwl_force_nmi(trans);
}
}
- if (q->read_ptr == q->write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
iwl_pcie_clear_cmd_in_flight(trans);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
*/
iwl_scd_txq_disable_agg(trans, txq_id);
- ssn = txq->q.read_ptr;
+ ssn = txq->read_ptr;
}
}
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
- txq->q.read_ptr = (ssn & 0xff);
- txq->q.write_ptr = (ssn & 0xff);
+ txq->read_ptr = (ssn & 0xff);
+ txq->write_ptr = (ssn & 0xff);
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
(ssn & 0xff) | (txq_id << 8));
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
- struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
unsigned long flags;
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
goto free_dup_buf;
}
- idx = get_cmd_index(q, q->write_ptr);
+ idx = get_cmd_index(txq, txq->write_ptr);
out_cmd = txq->entries[idx].cmd;
out_meta = &txq->entries[idx].meta;
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
+ INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
copy_size = sizeof(struct iwl_cmd_header_wide);
out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
out_cmd->hdr.sequence =
cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
+ INDEX_TO_SEQ(txq->write_ptr));
out_cmd->hdr.group_id = 0;
cmd_pos = sizeof(struct iwl_cmd_header);
iwl_get_cmd_string(trans, cmd->id),
group_id, out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
+ cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
copy_size - tb0_size,
DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_pcie_tfd_unmap(trans, out_meta, txq, q->write_ptr);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
idx = -ENOMEM;
goto out;
}
phys_addr = dma_map_single(trans->dev, (void *)data,
cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_pcie_tfd_unmap(trans, out_meta, txq, q->write_ptr);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
idx = -ENOMEM;
goto out;
}
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
/* start timer if queue currently empty */
- if (q->read_ptr == q->write_ptr && txq->wd_timeout)
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
}
/* Increment and update queue's write index */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
if (WARN(txq_id != trans_pcie->cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
txq_id, trans_pcie->cmd_queue, sequence,
- trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
- trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
+ trans_pcie->txq[trans_pcie->cmd_queue].read_ptr,
+ trans_pcie->txq[trans_pcie->cmd_queue].write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
}
spin_lock_bh(&txq->lock);
- cmd_index = get_cmd_index(&txq->q, index);
+ cmd_index = get_cmd_index(txq, index);
cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta;
cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
HOST_COMPLETE_TIMEOUT);
if (!ret) {
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
- struct iwl_queue *q = &txq->q;
IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
iwl_get_cmd_string(trans, cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
- q->read_ptr, q->write_ptr);
+ txq->read_ptr, txq->write_ptr);
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_queue *q = &txq->q;
u16 tb2_len;
int i;
skb->data + hdr_len,
tb2_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
- iwl_pcie_tfd_unmap(trans, out_meta, txq, q->write_ptr);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
return -EINVAL;
}
iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
- iwl_pcie_tfd_unmap(trans, out_meta, txq, q->write_ptr);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
return -EINVAL;
}
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
}
trace_iwlwifi_dev_tx(trans->dev, skb,
- iwl_pcie_get_tfd(trans_pcie, txq, q->write_ptr),
+ iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
skb->data + hdr_len, tb2_len);
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
- struct iwl_queue *q = &txq->q;
u16 length, iv_len, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
IEEE80211_CCMP_HDR_LEN : 0;
trace_iwlwifi_dev_tx(trans->dev, skb,
- iwl_pcie_get_tfd(trans_pcie, txq, q->write_ptr),
+ iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
NULL, 0);
return 0;
out_unmap:
- iwl_pcie_tfd_unmap(trans, out_meta, txq, q->write_ptr);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
return ret;
}
#else /* CONFIG_INET */
struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq;
- struct iwl_queue *q;
dma_addr_t tb0_phys, tb1_phys, scratch_phys;
void *tb1_addr;
void *tfd;
bool amsdu;
txq = &trans_pcie->txq[txq_id];
- q = &txq->q;
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
"TX on unused queue %d\n", txq_id))
spin_lock(&txq->lock);
- if (iwl_queue_space(q) < q->high_mark) {
+ if (iwl_queue_space(txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_queue_space(q) < 3)) {
+ if (unlikely(iwl_queue_space(txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
*/
wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
WARN_ONCE(txq->ampdu &&
- (wifi_seq & 0xff) != q->write_ptr,
+ (wifi_seq & 0xff) != txq->write_ptr,
"Q: %d WiFi Seq %d tfdNum %d",
- txq_id, wifi_seq, q->write_ptr);
+ txq_id, wifi_seq, txq->write_ptr);
/* Set up driver data for this TFD */
- txq->entries[q->write_ptr].skb = skb;
- txq->entries[q->write_ptr].cmd = dev_cmd;
+ txq->entries[txq->write_ptr].skb = skb;
+ txq->entries[txq->write_ptr].cmd = dev_cmd;
dev_cmd->hdr.sequence =
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
+ INDEX_TO_SEQ(txq->write_ptr)));
- tb0_phys = iwl_pcie_get_first_tb_dma(txq, q->write_ptr);
+ tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch);
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
/* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_meta = &txq->entries[q->write_ptr].meta;
+ out_meta = &txq->entries[txq->write_ptr].meta;
out_meta->flags = 0;
/*
}
/* The first TB points to bi-directional DMA data */
- memcpy(&txq->first_tb_bufs[q->write_ptr], &dev_cmd->hdr,
+ memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
IWL_FIRST_TB_SIZE);
iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
IWL_FIRST_TB_SIZE, true);
goto out_err;
}
- tfd = iwl_pcie_get_tfd(trans_pcie, txq, q->write_ptr);
+ tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
iwl_pcie_tfd_get_num_tbs(trans, tfd));
wait_write_ptr = ieee80211_has_morefrags(fc);
/* start timer if queue currently empty */
- if (q->read_ptr == q->write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
if (txq->wd_timeout) {
/*
* If the TXQ is active, then set the timer, if not,
else
txq->frozen_expiry_remainder = txq->wd_timeout;
}
- IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
+ IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
iwl_trans_ref(trans);
}
/* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
if (!wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);