1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2017 Intel Deutschland GmbH
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2017 Intel Deutschland GmbH
22 * All rights reserved.
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
28 * * Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * * Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in
32 * the documentation and/or other materials provided with the
34 * * Neither the name Intel Corporation nor the names of its
35 * contributors may be used to endorse or promote products derived
36 * from this software without specific prior written permission.
38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
41 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
42 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
43 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
44 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
45 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
46 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
47 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 *****************************************************************************/
51 #include <linux/pm_runtime.h>
53 #include "iwl-debug.h"
57 #include "mvm/fw-api.h"
60 * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels
62 void iwl_pcie_gen2_tx_stop(struct iwl_trans
*trans
)
64 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
68 * This function can be called before the op_mode disabled the
69 * queues. This happens when we have an rfkill interrupt.
70 * Since we stop Tx altogether - mark the queues as stopped.
72 memset(trans_pcie
->queue_stopped
, 0, sizeof(trans_pcie
->queue_stopped
));
73 memset(trans_pcie
->queue_used
, 0, sizeof(trans_pcie
->queue_used
));
75 /* Unmap DMA from host system and free skb's */
76 for (txq_id
= 0; txq_id
< ARRAY_SIZE(trans_pcie
->txq
); txq_id
++) {
77 if (!trans_pcie
->txq
[txq_id
])
79 iwl_pcie_gen2_txq_unmap(trans
, txq_id
);
84 * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
86 static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq
*txq
, u16 byte_cnt
,
89 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
= txq
->bc_tbl
.addr
;
90 int write_ptr
= txq
->write_ptr
;
91 u8 filled_tfd_size
, num_fetch_chunks
;
95 len
= DIV_ROUND_UP(len
, 4);
97 if (WARN_ON(len
> 0xFFF || write_ptr
>= TFD_QUEUE_SIZE_MAX
))
100 filled_tfd_size
= offsetof(struct iwl_tfh_tfd
, tbs
) +
101 num_tbs
* sizeof(struct iwl_tfh_tb
);
103 * filled_tfd_size contains the number of filled bytes in the TFD.
104 * Dividing it by 64 will give the number of chunks to fetch
105 * to SRAM- 0 for one chunk, 1 for 2 and so on.
106 * If, for example, TFD contains only 3 TBs then 32 bytes
107 * of the TFD are used, and only one chunk of 64 bytes should
110 num_fetch_chunks
= DIV_ROUND_UP(filled_tfd_size
, 64) - 1;
112 bc_ent
= cpu_to_le16(len
| (num_fetch_chunks
<< 12));
113 scd_bc_tbl
->tfd_offset
[write_ptr
] = bc_ent
;
117 * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
119 static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans
*trans
,
122 lockdep_assert_held(&txq
->lock
);
124 IWL_DEBUG_TX(trans
, "Q:%d WR: 0x%x\n", txq
->id
, txq
->write_ptr
);
127 * if not in power-save mode, uCode will never sleep when we're
128 * trying to tx (during RFKILL, we're not trying to tx).
130 iwl_write32(trans
, HBUS_TARG_WRPTR
, txq
->write_ptr
| (txq
->id
<< 16));
133 static u8
iwl_pcie_gen2_get_num_tbs(struct iwl_trans
*trans
,
134 struct iwl_tfh_tfd
*tfd
)
136 return le16_to_cpu(tfd
->num_tbs
) & 0x1f;
139 static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans
*trans
,
140 struct iwl_cmd_meta
*meta
,
141 struct iwl_tfh_tfd
*tfd
)
143 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
146 /* Sanity check on number of chunks */
147 num_tbs
= iwl_pcie_gen2_get_num_tbs(trans
, tfd
);
149 if (num_tbs
>= trans_pcie
->max_tbs
) {
150 IWL_ERR(trans
, "Too many chunks: %i\n", num_tbs
);
154 /* first TB is never freed - it's the bidirectional DMA data */
155 for (i
= 1; i
< num_tbs
; i
++) {
156 if (meta
->tbs
& BIT(i
))
157 dma_unmap_page(trans
->dev
,
158 le64_to_cpu(tfd
->tbs
[i
].addr
),
159 le16_to_cpu(tfd
->tbs
[i
].tb_len
),
162 dma_unmap_single(trans
->dev
,
163 le64_to_cpu(tfd
->tbs
[i
].addr
),
164 le16_to_cpu(tfd
->tbs
[i
].tb_len
),
171 static void iwl_pcie_gen2_free_tfd(struct iwl_trans
*trans
, struct iwl_txq
*txq
)
173 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
175 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
176 * idx is bounded by n_window
178 int rd_ptr
= txq
->read_ptr
;
179 int idx
= get_cmd_index(txq
, rd_ptr
);
181 lockdep_assert_held(&txq
->lock
);
183 /* We have only q->n_window txq->entries, but we use
184 * TFD_QUEUE_SIZE_MAX tfds
186 iwl_pcie_gen2_tfd_unmap(trans
, &txq
->entries
[idx
].meta
,
187 iwl_pcie_get_tfd(trans_pcie
, txq
, rd_ptr
));
193 skb
= txq
->entries
[idx
].skb
;
195 /* Can be called from irqs-disabled context
196 * If skb is not NULL, it means that the whole queue is being
197 * freed and that the queue is not empty - free the skb
200 iwl_op_mode_free_skb(trans
->op_mode
, skb
);
201 txq
->entries
[idx
].skb
= NULL
;
206 static int iwl_pcie_gen2_set_tb(struct iwl_trans
*trans
,
207 struct iwl_tfh_tfd
*tfd
, dma_addr_t addr
,
210 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
211 int idx
= iwl_pcie_gen2_get_num_tbs(trans
, tfd
);
212 struct iwl_tfh_tb
*tb
= &tfd
->tbs
[idx
];
214 /* Each TFD can point to a maximum max_tbs Tx buffers */
215 if (le16_to_cpu(tfd
->num_tbs
) >= trans_pcie
->max_tbs
) {
216 IWL_ERR(trans
, "Error can not send more than %d chunks\n",
217 trans_pcie
->max_tbs
);
221 put_unaligned_le64(addr
, &tb
->addr
);
222 tb
->tb_len
= cpu_to_le16(len
);
224 tfd
->num_tbs
= cpu_to_le16(idx
+ 1);
230 struct iwl_tfh_tfd
*iwl_pcie_gen2_build_tfd(struct iwl_trans
*trans
,
232 struct iwl_device_cmd
*dev_cmd
,
234 struct iwl_cmd_meta
*out_meta
)
236 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
237 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
238 struct iwl_tfh_tfd
*tfd
=
239 iwl_pcie_get_tfd(trans_pcie
, txq
, txq
->write_ptr
);
241 int i
, len
, tb1_len
, tb2_len
, hdr_len
;
244 memset(tfd
, 0, sizeof(*tfd
));
246 tb_phys
= iwl_pcie_get_first_tb_dma(txq
, txq
->write_ptr
);
247 /* The first TB points to bi-directional DMA data */
248 memcpy(&txq
->first_tb_bufs
[txq
->write_ptr
], &dev_cmd
->hdr
,
251 iwl_pcie_gen2_set_tb(trans
, tfd
, tb_phys
, IWL_FIRST_TB_SIZE
);
253 /* there must be data left over for TB1 or this code must be changed */
254 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2
) < IWL_FIRST_TB_SIZE
);
257 * The second TB (tb1) points to the remainder of the TX command
258 * and the 802.11 header - dword aligned size
259 * (This calculation modifies the TX command, so do it before the
260 * setup of the first TB)
262 len
= sizeof(struct iwl_tx_cmd_gen2
) + sizeof(struct iwl_cmd_header
) +
263 ieee80211_hdrlen(hdr
->frame_control
) - IWL_FIRST_TB_SIZE
;
265 tb1_len
= ALIGN(len
, 4);
267 /* map the data for TB1 */
268 tb1_addr
= ((u8
*)&dev_cmd
->hdr
) + IWL_FIRST_TB_SIZE
;
269 tb_phys
= dma_map_single(trans
->dev
, tb1_addr
, tb1_len
, DMA_TO_DEVICE
);
270 if (unlikely(dma_mapping_error(trans
->dev
, tb_phys
)))
272 iwl_pcie_gen2_set_tb(trans
, tfd
, tb_phys
, tb1_len
);
274 /* set up TFD's third entry to point to remainder of skb's head */
275 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
276 tb2_len
= skb_headlen(skb
) - hdr_len
;
279 tb_phys
= dma_map_single(trans
->dev
, skb
->data
+ hdr_len
,
280 tb2_len
, DMA_TO_DEVICE
);
281 if (unlikely(dma_mapping_error(trans
->dev
, tb_phys
)))
283 iwl_pcie_gen2_set_tb(trans
, tfd
, tb_phys
, tb2_len
);
286 /* set up the remaining entries to point to the data */
287 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
288 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
291 if (!skb_frag_size(frag
))
294 tb_phys
= skb_frag_dma_map(trans
->dev
, frag
, 0,
295 skb_frag_size(frag
), DMA_TO_DEVICE
);
297 if (unlikely(dma_mapping_error(trans
->dev
, tb_phys
)))
299 tb_idx
= iwl_pcie_gen2_set_tb(trans
, tfd
, tb_phys
,
300 skb_frag_size(frag
));
302 out_meta
->tbs
|= BIT(tb_idx
);
305 trace_iwlwifi_dev_tx(trans
->dev
, skb
, tfd
, sizeof(*tfd
), &dev_cmd
->hdr
,
306 IWL_FIRST_TB_SIZE
+ tb1_len
,
307 skb
->data
+ hdr_len
, tb2_len
);
308 trace_iwlwifi_dev_tx_data(trans
->dev
, skb
, hdr_len
,
314 iwl_pcie_gen2_tfd_unmap(trans
, out_meta
, tfd
);
318 int iwl_trans_pcie_gen2_tx(struct iwl_trans
*trans
, struct sk_buff
*skb
,
319 struct iwl_device_cmd
*dev_cmd
, int txq_id
)
321 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
322 struct iwl_tx_cmd_gen2
*tx_cmd
= (void *)dev_cmd
->payload
;
323 struct iwl_cmd_meta
*out_meta
;
324 struct iwl_txq
*txq
= trans_pcie
->txq
[txq_id
];
327 if (WARN_ONCE(!test_bit(txq_id
, trans_pcie
->queue_used
),
328 "TX on unused queue %d\n", txq_id
))
331 if (skb_is_nonlinear(skb
) &&
332 skb_shinfo(skb
)->nr_frags
> IWL_PCIE_MAX_FRAGS(trans_pcie
) &&
333 __skb_linearize(skb
))
336 spin_lock(&txq
->lock
);
338 /* Set up driver data for this TFD */
339 txq
->entries
[txq
->write_ptr
].skb
= skb
;
340 txq
->entries
[txq
->write_ptr
].cmd
= dev_cmd
;
342 dev_cmd
->hdr
.sequence
=
343 cpu_to_le16((u16
)(QUEUE_TO_SEQ(txq_id
) |
344 INDEX_TO_SEQ(txq
->write_ptr
)));
346 /* Set up first empty entry in queue's array of Tx/cmd buffers */
347 out_meta
= &txq
->entries
[txq
->write_ptr
].meta
;
350 tfd
= iwl_pcie_gen2_build_tfd(trans
, txq
, dev_cmd
, skb
, out_meta
);
352 spin_unlock(&txq
->lock
);
356 /* Set up entry for this TFD in Tx byte-count array */
357 iwl_pcie_gen2_update_byte_tbl(txq
, le16_to_cpu(tx_cmd
->len
),
358 iwl_pcie_gen2_get_num_tbs(trans
, tfd
));
360 /* start timer if queue currently empty */
361 if (txq
->read_ptr
== txq
->write_ptr
) {
363 mod_timer(&txq
->stuck_timer
, jiffies
+ txq
->wd_timeout
);
364 IWL_DEBUG_RPM(trans
, "Q: %d first tx - take ref\n", txq
->id
);
365 iwl_trans_ref(trans
);
368 /* Tell device the write index *just past* this latest filled TFD */
369 txq
->write_ptr
= iwl_queue_inc_wrap(txq
->write_ptr
);
370 iwl_pcie_gen2_txq_inc_wr_ptr(trans
, txq
);
371 if (iwl_queue_space(txq
) < txq
->high_mark
)
372 iwl_stop_queue(trans
, txq
);
375 * At this point the frame is "transmitted" successfully
376 * and we will get a TX status notification eventually.
378 spin_unlock(&txq
->lock
);
382 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
385 * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
386 * @priv: device private data point
387 * @cmd: a pointer to the ucode command structure
389 * The function returns < 0 values to indicate the operation
390 * failed. On success, it returns the index (>= 0) of command in the
393 static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans
*trans
,
394 struct iwl_host_cmd
*cmd
)
396 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
397 struct iwl_txq
*txq
= trans_pcie
->txq
[trans_pcie
->cmd_queue
];
398 struct iwl_device_cmd
*out_cmd
;
399 struct iwl_cmd_meta
*out_meta
;
401 void *dup_buf
= NULL
;
402 dma_addr_t phys_addr
;
404 u16 copy_size
, cmd_size
, tb0_size
;
405 bool had_nocopy
= false;
406 u8 group_id
= iwl_cmd_groupid(cmd
->id
);
407 const u8
*cmddata
[IWL_MAX_CMD_TBS_PER_TFD
];
408 u16 cmdlen
[IWL_MAX_CMD_TBS_PER_TFD
];
409 struct iwl_tfh_tfd
*tfd
=
410 iwl_pcie_get_tfd(trans_pcie
, txq
, txq
->write_ptr
);
412 memset(tfd
, 0, sizeof(*tfd
));
414 copy_size
= sizeof(struct iwl_cmd_header_wide
);
415 cmd_size
= sizeof(struct iwl_cmd_header_wide
);
417 for (i
= 0; i
< IWL_MAX_CMD_TBS_PER_TFD
; i
++) {
418 cmddata
[i
] = cmd
->data
[i
];
419 cmdlen
[i
] = cmd
->len
[i
];
424 /* need at least IWL_FIRST_TB_SIZE copied */
425 if (copy_size
< IWL_FIRST_TB_SIZE
) {
426 int copy
= IWL_FIRST_TB_SIZE
- copy_size
;
428 if (copy
> cmdlen
[i
])
435 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
) {
437 if (WARN_ON(cmd
->dataflags
[i
] & IWL_HCMD_DFL_DUP
)) {
441 } else if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_DUP
) {
443 * This is also a chunk that isn't copied
444 * to the static buffer so set had_nocopy.
448 /* only allowed once */
449 if (WARN_ON(dup_buf
)) {
454 dup_buf
= kmemdup(cmddata
[i
], cmdlen
[i
],
459 /* NOCOPY must not be followed by normal! */
460 if (WARN_ON(had_nocopy
)) {
464 copy_size
+= cmdlen
[i
];
466 cmd_size
+= cmd
->len
[i
];
470 * If any of the command structures end up being larger than the
471 * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
472 * separate TFDs, then we will need to increase the size of the buffers
474 if (WARN(copy_size
> TFD_MAX_PAYLOAD_SIZE
,
475 "Command %s (%#x) is too large (%d bytes)\n",
476 iwl_get_cmd_string(trans
, cmd
->id
), cmd
->id
, copy_size
)) {
481 spin_lock_bh(&txq
->lock
);
483 if (iwl_queue_space(txq
) < ((cmd
->flags
& CMD_ASYNC
) ? 2 : 1)) {
484 spin_unlock_bh(&txq
->lock
);
486 IWL_ERR(trans
, "No space in command queue\n");
487 iwl_op_mode_cmd_queue_full(trans
->op_mode
);
492 idx
= get_cmd_index(txq
, txq
->write_ptr
);
493 out_cmd
= txq
->entries
[idx
].cmd
;
494 out_meta
= &txq
->entries
[idx
].meta
;
496 /* re-initialize to NULL */
497 memset(out_meta
, 0, sizeof(*out_meta
));
498 if (cmd
->flags
& CMD_WANT_SKB
)
499 out_meta
->source
= cmd
;
501 /* set up the header */
502 out_cmd
->hdr_wide
.cmd
= iwl_cmd_opcode(cmd
->id
);
503 out_cmd
->hdr_wide
.group_id
= group_id
;
504 out_cmd
->hdr_wide
.version
= iwl_cmd_version(cmd
->id
);
505 out_cmd
->hdr_wide
.length
=
506 cpu_to_le16(cmd_size
- sizeof(struct iwl_cmd_header_wide
));
507 out_cmd
->hdr_wide
.reserved
= 0;
508 out_cmd
->hdr_wide
.sequence
=
509 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie
->cmd_queue
) |
510 INDEX_TO_SEQ(txq
->write_ptr
));
512 cmd_pos
= sizeof(struct iwl_cmd_header_wide
);
513 copy_size
= sizeof(struct iwl_cmd_header_wide
);
515 /* and copy the data that needs to be copied */
516 for (i
= 0; i
< IWL_MAX_CMD_TBS_PER_TFD
; i
++) {
522 /* copy everything if not nocopy/dup */
523 if (!(cmd
->dataflags
[i
] & (IWL_HCMD_DFL_NOCOPY
|
524 IWL_HCMD_DFL_DUP
))) {
527 memcpy((u8
*)out_cmd
+ cmd_pos
, cmd
->data
[i
], copy
);
534 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
535 * in total (for bi-directional DMA), but copy up to what
536 * we can fit into the payload for debug dump purposes.
538 copy
= min_t(int, TFD_MAX_PAYLOAD_SIZE
- cmd_pos
, cmd
->len
[i
]);
540 memcpy((u8
*)out_cmd
+ cmd_pos
, cmd
->data
[i
], copy
);
543 /* However, treat copy_size the proper way, we need it below */
544 if (copy_size
< IWL_FIRST_TB_SIZE
) {
545 copy
= IWL_FIRST_TB_SIZE
- copy_size
;
547 if (copy
> cmd
->len
[i
])
554 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
555 iwl_get_cmd_string(trans
, cmd
->id
), group_id
,
556 out_cmd
->hdr
.cmd
, le16_to_cpu(out_cmd
->hdr
.sequence
),
557 cmd_size
, txq
->write_ptr
, idx
, trans_pcie
->cmd_queue
);
559 /* start the TFD with the minimum copy bytes */
560 tb0_size
= min_t(int, copy_size
, IWL_FIRST_TB_SIZE
);
561 memcpy(&txq
->first_tb_bufs
[idx
], &out_cmd
->hdr
, tb0_size
);
562 iwl_pcie_gen2_set_tb(trans
, tfd
, iwl_pcie_get_first_tb_dma(txq
, idx
),
565 /* map first command fragment, if any remains */
566 if (copy_size
> tb0_size
) {
567 phys_addr
= dma_map_single(trans
->dev
,
568 ((u8
*)&out_cmd
->hdr
) + tb0_size
,
569 copy_size
- tb0_size
,
571 if (dma_mapping_error(trans
->dev
, phys_addr
)) {
573 iwl_pcie_gen2_tfd_unmap(trans
, out_meta
, tfd
);
576 iwl_pcie_gen2_set_tb(trans
, tfd
, phys_addr
,
577 copy_size
- tb0_size
);
580 /* map the remaining (adjusted) nocopy/dup fragments */
581 for (i
= 0; i
< IWL_MAX_CMD_TBS_PER_TFD
; i
++) {
582 const void *data
= cmddata
[i
];
586 if (!(cmd
->dataflags
[i
] & (IWL_HCMD_DFL_NOCOPY
|
589 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_DUP
)
591 phys_addr
= dma_map_single(trans
->dev
, (void *)data
,
592 cmdlen
[i
], DMA_TO_DEVICE
);
593 if (dma_mapping_error(trans
->dev
, phys_addr
)) {
595 iwl_pcie_gen2_tfd_unmap(trans
, out_meta
, tfd
);
598 iwl_pcie_gen2_set_tb(trans
, tfd
, phys_addr
, cmdlen
[i
]);
601 BUILD_BUG_ON(IWL_TFH_NUM_TBS
> sizeof(out_meta
->tbs
) * BITS_PER_BYTE
);
602 out_meta
->flags
= cmd
->flags
;
603 if (WARN_ON_ONCE(txq
->entries
[idx
].free_buf
))
604 kzfree(txq
->entries
[idx
].free_buf
);
605 txq
->entries
[idx
].free_buf
= dup_buf
;
607 trace_iwlwifi_dev_hcmd(trans
->dev
, cmd
, cmd_size
, &out_cmd
->hdr_wide
);
609 /* start timer if queue currently empty */
610 if (txq
->read_ptr
== txq
->write_ptr
&& txq
->wd_timeout
)
611 mod_timer(&txq
->stuck_timer
, jiffies
+ txq
->wd_timeout
);
613 spin_lock_irqsave(&trans_pcie
->reg_lock
, flags
);
614 if (!(cmd
->flags
& CMD_SEND_IN_IDLE
) &&
615 !trans_pcie
->ref_cmd_in_flight
) {
616 trans_pcie
->ref_cmd_in_flight
= true;
617 IWL_DEBUG_RPM(trans
, "set ref_cmd_in_flight - ref\n");
618 iwl_trans_ref(trans
);
620 /* Increment and update queue's write index */
621 txq
->write_ptr
= iwl_queue_inc_wrap(txq
->write_ptr
);
622 iwl_pcie_gen2_txq_inc_wr_ptr(trans
, txq
);
623 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, flags
);
626 spin_unlock_bh(&txq
->lock
);
633 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
635 static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans
*trans
,
636 struct iwl_host_cmd
*cmd
)
638 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
639 const char *cmd_str
= iwl_get_cmd_string(trans
, cmd
->id
);
640 struct iwl_txq
*txq
= trans_pcie
->txq
[trans_pcie
->cmd_queue
];
644 IWL_DEBUG_INFO(trans
, "Attempting to send sync command %s\n", cmd_str
);
646 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE
,
648 "Command %s: a command is already active!\n", cmd_str
))
651 IWL_DEBUG_INFO(trans
, "Setting HCMD_ACTIVE for command %s\n", cmd_str
);
653 if (pm_runtime_suspended(&trans_pcie
->pci_dev
->dev
)) {
654 ret
= wait_event_timeout(trans_pcie
->d0i3_waitq
,
655 pm_runtime_active(&trans_pcie
->pci_dev
->dev
),
656 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT
));
658 IWL_ERR(trans
, "Timeout exiting D0i3 before hcmd\n");
663 cmd_idx
= iwl_pcie_gen2_enqueue_hcmd(trans
, cmd
);
666 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
667 IWL_ERR(trans
, "Error sending %s: enqueue_hcmd failed: %d\n",
672 ret
= wait_event_timeout(trans_pcie
->wait_command_queue
,
673 !test_bit(STATUS_SYNC_HCMD_ACTIVE
,
675 HOST_COMPLETE_TIMEOUT
);
677 IWL_ERR(trans
, "Error sending %s: time out after %dms.\n",
678 cmd_str
, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT
));
680 IWL_ERR(trans
, "Current CMD queue read_ptr %d write_ptr %d\n",
681 txq
->read_ptr
, txq
->write_ptr
);
683 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
684 IWL_DEBUG_INFO(trans
, "Clearing HCMD_ACTIVE for command %s\n",
688 iwl_force_nmi(trans
);
689 iwl_trans_fw_error(trans
);
694 if (test_bit(STATUS_FW_ERROR
, &trans
->status
)) {
695 IWL_ERR(trans
, "FW error in SYNC CMD %s\n", cmd_str
);
701 if (!(cmd
->flags
& CMD_SEND_IN_RFKILL
) &&
702 test_bit(STATUS_RFKILL
, &trans
->status
)) {
703 IWL_DEBUG_RF_KILL(trans
, "RFKILL in SYNC CMD... no rsp\n");
708 if ((cmd
->flags
& CMD_WANT_SKB
) && !cmd
->resp_pkt
) {
709 IWL_ERR(trans
, "Error: Response NULL in '%s'\n", cmd_str
);
717 if (cmd
->flags
& CMD_WANT_SKB
) {
719 * Cancel the CMD_WANT_SKB flag for the cmd in the
720 * TX cmd queue. Otherwise in case the cmd comes
721 * in later, it will possibly set an invalid
722 * address (cmd->meta.source).
724 txq
->entries
[cmd_idx
].meta
.flags
&= ~CMD_WANT_SKB
;
729 cmd
->resp_pkt
= NULL
;
735 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans
*trans
,
736 struct iwl_host_cmd
*cmd
)
738 if (!(cmd
->flags
& CMD_SEND_IN_RFKILL
) &&
739 test_bit(STATUS_RFKILL
, &trans
->status
)) {
740 IWL_DEBUG_RF_KILL(trans
, "Dropping CMD 0x%x: RF KILL\n",
745 if (cmd
->flags
& CMD_ASYNC
) {
748 /* An asynchronous command can not expect an SKB to be set. */
749 if (WARN_ON(cmd
->flags
& CMD_WANT_SKB
))
752 ret
= iwl_pcie_gen2_enqueue_hcmd(trans
, cmd
);
755 "Error sending %s: enqueue_hcmd failed: %d\n",
756 iwl_get_cmd_string(trans
, cmd
->id
), ret
);
762 return iwl_pcie_gen2_send_hcmd_sync(trans
, cmd
);
766 * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's
768 void iwl_pcie_gen2_txq_unmap(struct iwl_trans
*trans
, int txq_id
)
770 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
771 struct iwl_txq
*txq
= trans_pcie
->txq
[txq_id
];
773 spin_lock_bh(&txq
->lock
);
774 while (txq
->write_ptr
!= txq
->read_ptr
) {
775 IWL_DEBUG_TX_REPLY(trans
, "Q %d Free %d\n",
776 txq_id
, txq
->read_ptr
);
778 iwl_pcie_gen2_free_tfd(trans
, txq
);
779 txq
->read_ptr
= iwl_queue_inc_wrap(txq
->read_ptr
);
781 if (txq
->read_ptr
== txq
->write_ptr
) {
784 spin_lock_irqsave(&trans_pcie
->reg_lock
, flags
);
785 if (txq_id
!= trans_pcie
->cmd_queue
) {
786 IWL_DEBUG_RPM(trans
, "Q %d - last tx freed\n",
788 iwl_trans_unref(trans
);
789 } else if (trans_pcie
->ref_cmd_in_flight
) {
790 trans_pcie
->ref_cmd_in_flight
= false;
792 "clear ref_cmd_in_flight\n");
793 iwl_trans_unref(trans
);
795 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, flags
);
798 spin_unlock_bh(&txq
->lock
);
800 /* just in case - this queue may have been stopped */
801 iwl_wake_queue(trans
, txq
);
804 static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans
*trans
,
807 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
808 struct device
*dev
= trans
->dev
;
810 /* De-alloc circular buffer of TFDs */
812 dma_free_coherent(dev
,
813 trans_pcie
->tfd_size
* TFD_QUEUE_SIZE_MAX
,
814 txq
->tfds
, txq
->dma_addr
);
815 dma_free_coherent(dev
,
816 sizeof(*txq
->first_tb_bufs
) * txq
->n_window
,
817 txq
->first_tb_bufs
, txq
->first_tb_dma
);
821 iwl_pcie_free_dma_ptr(trans
, &txq
->bc_tbl
);
826 * iwl_pcie_txq_free - Deallocate DMA queue.
827 * @txq: Transmit queue to deallocate.
829 * Empty queue by removing and destroying all BD's.
831 * 0-fill, but do not free "txq" descriptor structure.
833 static void iwl_pcie_gen2_txq_free(struct iwl_trans
*trans
, int txq_id
)
835 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
836 struct iwl_txq
*txq
= trans_pcie
->txq
[txq_id
];
842 iwl_pcie_gen2_txq_unmap(trans
, txq_id
);
844 /* De-alloc array of command/tx buffers */
845 if (txq_id
== trans_pcie
->cmd_queue
)
846 for (i
= 0; i
< txq
->n_window
; i
++) {
847 kzfree(txq
->entries
[i
].cmd
);
848 kzfree(txq
->entries
[i
].free_buf
);
850 del_timer_sync(&txq
->stuck_timer
);
852 iwl_pcie_gen2_txq_free_memory(trans
, txq
);
854 trans_pcie
->txq
[txq_id
] = NULL
;
856 clear_bit(txq_id
, trans_pcie
->queue_used
);
859 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans
*trans
,
860 struct iwl_tx_queue_cfg_cmd
*cmd
,
862 unsigned int timeout
)
864 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
865 struct iwl_tx_queue_cfg_rsp
*rsp
;
867 struct iwl_host_cmd hcmd
= {
869 .len
= { sizeof(*cmd
) },
871 .flags
= CMD_WANT_SKB
,
875 txq
= kzalloc(sizeof(*txq
), GFP_KERNEL
);
878 ret
= iwl_pcie_alloc_dma_ptr(trans
, &txq
->bc_tbl
,
879 sizeof(struct iwlagn_scd_bc_tbl
));
881 IWL_ERR(trans
, "Scheduler BC Table allocation failed\n");
886 ret
= iwl_pcie_txq_alloc(trans
, txq
, TFD_TX_CMD_SLOTS
, false);
888 IWL_ERR(trans
, "Tx queue alloc failed\n");
891 ret
= iwl_pcie_txq_init(trans
, txq
, TFD_TX_CMD_SLOTS
, false);
893 IWL_ERR(trans
, "Tx queue init failed\n");
897 txq
->wd_timeout
= msecs_to_jiffies(timeout
);
899 cmd
->tfdq_addr
= cpu_to_le64(txq
->dma_addr
);
900 cmd
->byte_cnt_addr
= cpu_to_le64(txq
->bc_tbl
.dma
);
901 cmd
->cb_size
= cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX
));
903 ret
= iwl_trans_send_cmd(trans
, &hcmd
);
907 if (WARN_ON(iwl_rx_packet_payload_len(hcmd
.resp_pkt
) != sizeof(*rsp
))) {
909 goto error_free_resp
;
912 rsp
= (void *)hcmd
.resp_pkt
->data
;
913 qid
= le16_to_cpu(rsp
->queue_number
);
915 if (qid
> ARRAY_SIZE(trans_pcie
->txq
)) {
916 WARN_ONCE(1, "queue index %d unsupported", qid
);
918 goto error_free_resp
;
921 if (test_and_set_bit(qid
, trans_pcie
->queue_used
)) {
922 WARN_ONCE(1, "queue %d already used", qid
);
924 goto error_free_resp
;
928 trans_pcie
->txq
[qid
] = txq
;
930 /* Place first TFD at index corresponding to start sequence number */
931 txq
->read_ptr
= le16_to_cpu(rsp
->write_pointer
);
932 txq
->write_ptr
= le16_to_cpu(rsp
->write_pointer
);
933 iwl_write_direct32(trans
, HBUS_TARG_WRPTR
,
934 (txq
->write_ptr
) | (qid
<< 16));
935 IWL_DEBUG_TX_QUEUES(trans
, "Activate queue %d\n", qid
);
937 iwl_free_resp(&hcmd
);
941 iwl_free_resp(&hcmd
);
943 iwl_pcie_gen2_txq_free_memory(trans
, txq
);
947 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans
*trans
, int queue
)
949 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
952 * Upon HW Rfkill - we stop the device, and then stop the queues
953 * in the op_mode. Just for the sake of the simplicity of the op_mode,
954 * allow the op_mode to call txq_disable after it already called
957 if (!test_and_clear_bit(queue
, trans_pcie
->queue_used
)) {
958 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
),
959 "queue %d not used", queue
);
963 iwl_pcie_gen2_txq_unmap(trans
, queue
);
965 IWL_DEBUG_TX_QUEUES(trans
, "Deactivate queue %d\n", queue
);
968 void iwl_pcie_gen2_tx_free(struct iwl_trans
*trans
)
970 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
973 memset(trans_pcie
->queue_used
, 0, sizeof(trans_pcie
->queue_used
));
975 /* Free all TX queues */
976 for (i
= 0; i
< ARRAY_SIZE(trans_pcie
->txq
); i
++) {
977 if (!trans_pcie
->txq
[i
])
980 iwl_pcie_gen2_txq_free(trans
, i
);
984 int iwl_pcie_gen2_tx_init(struct iwl_trans
*trans
)
986 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
987 struct iwl_txq
*cmd_queue
;
988 int txq_id
= trans_pcie
->cmd_queue
, ret
;
990 /* alloc and init the command queue */
991 if (!trans_pcie
->txq
[txq_id
]) {
992 cmd_queue
= kzalloc(sizeof(*cmd_queue
), GFP_KERNEL
);
994 IWL_ERR(trans
, "Not enough memory for command queue\n");
997 trans_pcie
->txq
[txq_id
] = cmd_queue
;
998 ret
= iwl_pcie_txq_alloc(trans
, cmd_queue
, TFD_CMD_SLOTS
, true);
1000 IWL_ERR(trans
, "Tx %d queue init failed\n", txq_id
);
1004 cmd_queue
= trans_pcie
->txq
[txq_id
];
1007 ret
= iwl_pcie_txq_init(trans
, cmd_queue
, TFD_CMD_SLOTS
, true);
1009 IWL_ERR(trans
, "Tx %d queue alloc failed\n", txq_id
);
1012 trans_pcie
->txq
[txq_id
]->id
= txq_id
;
1013 set_bit(txq_id
, trans_pcie
->queue_used
);
1018 iwl_pcie_gen2_tx_free(trans
);