Commit | Line | Data |
---|---|---|
1053d35f RR |
1 | /****************************************************************************** |
2 | * | |
01f8162a | 3 | * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. |
1053d35f RR |
4 | * |
5 | * Portions of this file are derived from the ipw3945 project, as well | |
6 | * as portions of the ieee80211 subsystem header files. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of version 2 of the GNU General Public License as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along with | |
18 | * this program; if not, write to the Free Software Foundation, Inc., | |
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | |
20 | * | |
21 | * The full GNU General Public License is included in this distribution in the | |
22 | * file called LICENSE. | |
23 | * | |
24 | * Contact Information: | |
759ef89f | 25 | * Intel Linux Wireless <ilw@linux.intel.com> |
1053d35f RR |
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
27 | * | |
28 | *****************************************************************************/ | |
29 | ||
fd4abac5 | 30 | #include <linux/etherdevice.h> |
1053d35f RR |
31 | #include <net/mac80211.h> |
32 | #include "iwl-eeprom.h" | |
33 | #include "iwl-dev.h" | |
34 | #include "iwl-core.h" | |
35 | #include "iwl-sta.h" | |
36 | #include "iwl-io.h" | |
37 | #include "iwl-helpers.h" | |
38 | ||
30e553e3 TW |
39 | static const u16 default_tid_to_tx_fifo[] = { |
40 | IWL_TX_FIFO_AC1, | |
41 | IWL_TX_FIFO_AC0, | |
42 | IWL_TX_FIFO_AC0, | |
43 | IWL_TX_FIFO_AC1, | |
44 | IWL_TX_FIFO_AC2, | |
45 | IWL_TX_FIFO_AC2, | |
46 | IWL_TX_FIFO_AC3, | |
47 | IWL_TX_FIFO_AC3, | |
48 | IWL_TX_FIFO_NONE, | |
49 | IWL_TX_FIFO_NONE, | |
50 | IWL_TX_FIFO_NONE, | |
51 | IWL_TX_FIFO_NONE, | |
52 | IWL_TX_FIFO_NONE, | |
53 | IWL_TX_FIFO_NONE, | |
54 | IWL_TX_FIFO_NONE, | |
55 | IWL_TX_FIFO_NONE, | |
56 | IWL_TX_FIFO_AC3 | |
57 | }; | |
58 | ||
4ddbb7d0 TW |
59 | static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv, |
60 | struct iwl_dma_ptr *ptr, size_t size) | |
61 | { | |
62 | ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma); | |
63 | if (!ptr->addr) | |
64 | return -ENOMEM; | |
65 | ptr->size = size; | |
66 | return 0; | |
67 | } | |
68 | ||
69 | static inline void iwl_free_dma_ptr(struct iwl_priv *priv, | |
70 | struct iwl_dma_ptr *ptr) | |
71 | { | |
72 | if (unlikely(!ptr->addr)) | |
73 | return; | |
74 | ||
75 | pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma); | |
76 | memset(ptr, 0, sizeof(*ptr)); | |
77 | } | |
78 | ||
fd4abac5 TW |
79 | /** |
80 | * iwl_txq_update_write_ptr - Send new write index to hardware | |
81 | */ | |
82 | int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) | |
83 | { | |
84 | u32 reg = 0; | |
85 | int ret = 0; | |
86 | int txq_id = txq->q.id; | |
87 | ||
88 | if (txq->need_update == 0) | |
89 | return ret; | |
90 | ||
91 | /* if we're trying to save power */ | |
92 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { | |
93 | /* wake up nic if it's powered down ... | |
94 | * uCode will wake up, and interrupt us again, so next | |
95 | * time we'll skip this part. */ | |
96 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | |
97 | ||
98 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | |
e1623446 | 99 | IWL_DEBUG_INFO(priv, "Requesting wakeup, GP1 = 0x%x\n", reg); |
fd4abac5 TW |
100 | iwl_set_bit(priv, CSR_GP_CNTRL, |
101 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | |
102 | return ret; | |
103 | } | |
104 | ||
fd4abac5 TW |
105 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, |
106 | txq->q.write_ptr | (txq_id << 8)); | |
fd4abac5 TW |
107 | |
108 | /* else not in power-save mode, uCode will never sleep when we're | |
109 | * trying to tx (during RFKILL, we're not trying to tx). */ | |
110 | } else | |
111 | iwl_write32(priv, HBUS_TARG_WRPTR, | |
112 | txq->q.write_ptr | (txq_id << 8)); | |
113 | ||
114 | txq->need_update = 0; | |
115 | ||
116 | return ret; | |
117 | } | |
118 | EXPORT_SYMBOL(iwl_txq_update_write_ptr); | |
119 | ||
120 | ||
1053d35f RR |
121 | /** |
122 | * iwl_tx_queue_free - Deallocate DMA queue. | |
123 | * @txq: Transmit queue to deallocate. | |
124 | * | |
125 | * Empty queue by removing and destroying all BD's. | |
126 | * Free all buffers. | |
127 | * 0-fill, but do not free "txq" descriptor structure. | |
128 | */ | |
a8e74e27 | 129 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) |
1053d35f | 130 | { |
da99c4b6 | 131 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; |
443cfd45 | 132 | struct iwl_queue *q = &txq->q; |
1053d35f | 133 | struct pci_dev *dev = priv->pci_dev; |
71c55d90 | 134 | int i; |
1053d35f RR |
135 | |
136 | if (q->n_bd == 0) | |
137 | return; | |
138 | ||
139 | /* first, empty all BD's */ | |
140 | for (; q->write_ptr != q->read_ptr; | |
141 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) | |
7aaa1d79 | 142 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); |
1053d35f | 143 | |
1053d35f | 144 | /* De-alloc array of command/tx buffers */ |
961ba60a | 145 | for (i = 0; i < TFD_TX_CMD_SLOTS; i++) |
da99c4b6 | 146 | kfree(txq->cmd[i]); |
1053d35f RR |
147 | |
148 | /* De-alloc circular buffer of TFDs */ | |
149 | if (txq->q.n_bd) | |
a8e74e27 | 150 | pci_free_consistent(dev, priv->hw_params.tfd_size * |
499b1883 | 151 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); |
1053d35f RR |
152 | |
153 | /* De-alloc array of per-TFD driver data */ | |
154 | kfree(txq->txb); | |
155 | txq->txb = NULL; | |
156 | ||
c2acea8e JB |
157 | /* deallocate arrays */ |
158 | kfree(txq->cmd); | |
159 | kfree(txq->meta); | |
160 | txq->cmd = NULL; | |
161 | txq->meta = NULL; | |
162 | ||
1053d35f RR |
163 | /* 0-fill queue descriptor structure */ |
164 | memset(txq, 0, sizeof(*txq)); | |
165 | } | |
a8e74e27 | 166 | EXPORT_SYMBOL(iwl_tx_queue_free); |
961ba60a TW |
167 | |
168 | /** | |
169 | * iwl_cmd_queue_free - Deallocate DMA queue. | |
170 | * @txq: Transmit queue to deallocate. | |
171 | * | |
172 | * Empty queue by removing and destroying all BD's. | |
173 | * Free all buffers. | |
174 | * 0-fill, but do not free "txq" descriptor structure. | |
175 | */ | |
3e5d238f | 176 | void iwl_cmd_queue_free(struct iwl_priv *priv) |
961ba60a TW |
177 | { |
178 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | |
179 | struct iwl_queue *q = &txq->q; | |
180 | struct pci_dev *dev = priv->pci_dev; | |
71c55d90 | 181 | int i; |
961ba60a TW |
182 | |
183 | if (q->n_bd == 0) | |
184 | return; | |
185 | ||
961ba60a TW |
186 | /* De-alloc array of command/tx buffers */ |
187 | for (i = 0; i <= TFD_CMD_SLOTS; i++) | |
188 | kfree(txq->cmd[i]); | |
189 | ||
190 | /* De-alloc circular buffer of TFDs */ | |
191 | if (txq->q.n_bd) | |
3e5d238f | 192 | pci_free_consistent(dev, priv->hw_params.tfd_size * |
499b1883 | 193 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); |
961ba60a | 194 | |
28142986 RC |
195 | /* deallocate arrays */ |
196 | kfree(txq->cmd); | |
197 | kfree(txq->meta); | |
198 | txq->cmd = NULL; | |
199 | txq->meta = NULL; | |
200 | ||
961ba60a TW |
201 | /* 0-fill queue descriptor structure */ |
202 | memset(txq, 0, sizeof(*txq)); | |
203 | } | |
3e5d238f AK |
204 | EXPORT_SYMBOL(iwl_cmd_queue_free); |
205 | ||
fd4abac5 TW |
206 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** |
207 | * DMA services | |
208 | * | |
209 | * Theory of operation | |
210 | * | |
211 | * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer | |
212 | * of buffer descriptors, each of which points to one or more data buffers for | |
213 | * the device to read from or fill. Driver and device exchange status of each | |
214 | * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty | |
215 | * entries in each circular buffer, to protect against confusing empty and full | |
216 | * queue states. | |
217 | * | |
218 | * The device reads or writes the data in the queues via the device's several | |
219 | * DMA/FIFO channels. Each queue is mapped to a single DMA channel. | |
220 | * | |
221 | * For Tx queue, there are low mark and high mark limits. If, after queuing | |
222 | * the packet for Tx, free space become < low mark, Tx queue stopped. When | |
223 | * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | |
224 | * Tx queue resumed. | |
225 | * | |
226 | * See more detailed info in iwl-4965-hw.h. | |
227 | ***************************************************/ | |
228 | ||
229 | int iwl_queue_space(const struct iwl_queue *q) | |
230 | { | |
231 | int s = q->read_ptr - q->write_ptr; | |
232 | ||
233 | if (q->read_ptr > q->write_ptr) | |
234 | s -= q->n_bd; | |
235 | ||
236 | if (s <= 0) | |
237 | s += q->n_window; | |
238 | /* keep some reserve to not confuse empty and full situations */ | |
239 | s -= 2; | |
240 | if (s < 0) | |
241 | s = 0; | |
242 | return s; | |
243 | } | |
244 | EXPORT_SYMBOL(iwl_queue_space); | |
245 | ||
246 | ||
1053d35f RR |
247 | /** |
248 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes | |
249 | */ | |
443cfd45 | 250 | static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, |
1053d35f RR |
251 | int count, int slots_num, u32 id) |
252 | { | |
253 | q->n_bd = count; | |
254 | q->n_window = slots_num; | |
255 | q->id = id; | |
256 | ||
257 | /* count must be power-of-two size, otherwise iwl_queue_inc_wrap | |
258 | * and iwl_queue_dec_wrap are broken. */ | |
259 | BUG_ON(!is_power_of_2(count)); | |
260 | ||
261 | /* slots_num must be power-of-two size, otherwise | |
262 | * get_cmd_index is broken. */ | |
263 | BUG_ON(!is_power_of_2(slots_num)); | |
264 | ||
265 | q->low_mark = q->n_window / 4; | |
266 | if (q->low_mark < 4) | |
267 | q->low_mark = 4; | |
268 | ||
269 | q->high_mark = q->n_window / 8; | |
270 | if (q->high_mark < 2) | |
271 | q->high_mark = 2; | |
272 | ||
273 | q->write_ptr = q->read_ptr = 0; | |
274 | ||
275 | return 0; | |
276 | } | |
277 | ||
278 | /** | |
279 | * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue | |
280 | */ | |
281 | static int iwl_tx_queue_alloc(struct iwl_priv *priv, | |
16466903 | 282 | struct iwl_tx_queue *txq, u32 id) |
1053d35f RR |
283 | { |
284 | struct pci_dev *dev = priv->pci_dev; | |
3978e5bc | 285 | size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; |
1053d35f RR |
286 | |
287 | /* Driver private data, only for Tx (not command) queues, | |
288 | * not shared with device. */ | |
289 | if (id != IWL_CMD_QUEUE_NUM) { | |
290 | txq->txb = kmalloc(sizeof(txq->txb[0]) * | |
291 | TFD_QUEUE_SIZE_MAX, GFP_KERNEL); | |
292 | if (!txq->txb) { | |
15b1687c | 293 | IWL_ERR(priv, "kmalloc for auxiliary BD " |
1053d35f RR |
294 | "structures failed\n"); |
295 | goto error; | |
296 | } | |
3978e5bc | 297 | } else { |
1053d35f | 298 | txq->txb = NULL; |
3978e5bc | 299 | } |
1053d35f RR |
300 | |
301 | /* Circular buffer of transmit frame descriptors (TFDs), | |
302 | * shared with device */ | |
3978e5bc | 303 | txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr); |
1053d35f | 304 | |
499b1883 | 305 | if (!txq->tfds) { |
3978e5bc | 306 | IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz); |
1053d35f RR |
307 | goto error; |
308 | } | |
309 | txq->q.id = id; | |
310 | ||
311 | return 0; | |
312 | ||
313 | error: | |
314 | kfree(txq->txb); | |
315 | txq->txb = NULL; | |
316 | ||
317 | return -ENOMEM; | |
318 | } | |
319 | ||
1053d35f RR |
320 | /** |
321 | * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue | |
322 | */ | |
a8e74e27 SO |
323 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, |
324 | int slots_num, u32 txq_id) | |
1053d35f | 325 | { |
da99c4b6 | 326 | int i, len; |
73b7d742 | 327 | int ret; |
c2acea8e | 328 | int actual_slots = slots_num; |
1053d35f RR |
329 | |
330 | /* | |
331 | * Alloc buffer array for commands (Tx or other types of commands). | |
332 | * For the command queue (#4), allocate command space + one big | |
333 | * command for scan, since scan command is very huge; the system will | |
334 | * not have two scans at the same time, so only one is needed. | |
335 | * For normal Tx queues (all other queues), no super-size command | |
336 | * space is needed. | |
337 | */ | |
c2acea8e JB |
338 | if (txq_id == IWL_CMD_QUEUE_NUM) |
339 | actual_slots++; | |
340 | ||
341 | txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots, | |
342 | GFP_KERNEL); | |
343 | txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots, | |
344 | GFP_KERNEL); | |
345 | ||
346 | if (!txq->meta || !txq->cmd) | |
347 | goto out_free_arrays; | |
348 | ||
349 | len = sizeof(struct iwl_device_cmd); | |
350 | for (i = 0; i < actual_slots; i++) { | |
351 | /* only happens for cmd queue */ | |
352 | if (i == slots_num) | |
353 | len += IWL_MAX_SCAN_SIZE; | |
da99c4b6 | 354 | |
49898852 | 355 | txq->cmd[i] = kmalloc(len, GFP_KERNEL); |
da99c4b6 | 356 | if (!txq->cmd[i]) |
73b7d742 | 357 | goto err; |
da99c4b6 | 358 | } |
1053d35f RR |
359 | |
360 | /* Alloc driver data array and TFD circular buffer */ | |
73b7d742 TW |
361 | ret = iwl_tx_queue_alloc(priv, txq, txq_id); |
362 | if (ret) | |
363 | goto err; | |
1053d35f | 364 | |
1053d35f RR |
365 | txq->need_update = 0; |
366 | ||
45af8195 JB |
367 | /* aggregation TX queues will get their ID when aggregation begins */ |
368 | if (txq_id <= IWL_TX_FIFO_AC3) | |
369 | txq->swq_id = txq_id; | |
370 | ||
1053d35f RR |
371 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise |
372 | * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | |
373 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | |
374 | ||
375 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | |
376 | iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); | |
377 | ||
378 | /* Tell device where to find queue */ | |
a8e74e27 | 379 | priv->cfg->ops->lib->txq_init(priv, txq); |
1053d35f RR |
380 | |
381 | return 0; | |
73b7d742 | 382 | err: |
c2acea8e | 383 | for (i = 0; i < actual_slots; i++) |
73b7d742 | 384 | kfree(txq->cmd[i]); |
c2acea8e JB |
385 | out_free_arrays: |
386 | kfree(txq->meta); | |
387 | kfree(txq->cmd); | |
73b7d742 | 388 | |
73b7d742 | 389 | return -ENOMEM; |
1053d35f | 390 | } |
a8e74e27 SO |
391 | EXPORT_SYMBOL(iwl_tx_queue_init); |
392 | ||
da1bc453 TW |
393 | /** |
394 | * iwl_hw_txq_ctx_free - Free TXQ Context | |
395 | * | |
396 | * Destroy all TX DMA queues and structures | |
397 | */ | |
398 | void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | |
399 | { | |
400 | int txq_id; | |
401 | ||
402 | /* Tx queues */ | |
88804e2b WYG |
403 | if (priv->txq) |
404 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; | |
405 | txq_id++) | |
406 | if (txq_id == IWL_CMD_QUEUE_NUM) | |
407 | iwl_cmd_queue_free(priv); | |
408 | else | |
409 | iwl_tx_queue_free(priv, txq_id); | |
4ddbb7d0 TW |
410 | iwl_free_dma_ptr(priv, &priv->kw); |
411 | ||
412 | iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); | |
88804e2b WYG |
413 | |
414 | /* free tx queue structure */ | |
415 | iwl_free_txq_mem(priv); | |
da1bc453 TW |
416 | } |
417 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); | |
418 | ||
1053d35f RR |
419 | /** |
420 | * iwl_txq_ctx_reset - Reset TX queue context | |
a96a27f9 | 421 | * Destroys all DMA structures and initialize them again |
1053d35f RR |
422 | * |
423 | * @param priv | |
424 | * @return error code | |
425 | */ | |
426 | int iwl_txq_ctx_reset(struct iwl_priv *priv) | |
427 | { | |
428 | int ret = 0; | |
429 | int txq_id, slots_num; | |
da1bc453 | 430 | unsigned long flags; |
1053d35f | 431 | |
1053d35f RR |
432 | /* Free all tx/cmd queues and keep-warm buffer */ |
433 | iwl_hw_txq_ctx_free(priv); | |
434 | ||
4ddbb7d0 TW |
435 | ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls, |
436 | priv->hw_params.scd_bc_tbls_size); | |
437 | if (ret) { | |
15b1687c | 438 | IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); |
4ddbb7d0 TW |
439 | goto error_bc_tbls; |
440 | } | |
1053d35f | 441 | /* Alloc keep-warm buffer */ |
4ddbb7d0 | 442 | ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); |
1053d35f | 443 | if (ret) { |
15b1687c | 444 | IWL_ERR(priv, "Keep Warm allocation failed\n"); |
1053d35f RR |
445 | goto error_kw; |
446 | } | |
88804e2b WYG |
447 | |
448 | /* allocate tx queue structure */ | |
449 | ret = iwl_alloc_txq_mem(priv); | |
450 | if (ret) | |
451 | goto error; | |
452 | ||
da1bc453 | 453 | spin_lock_irqsave(&priv->lock, flags); |
1053d35f RR |
454 | |
455 | /* Turn off all Tx DMA fifos */ | |
da1bc453 TW |
456 | priv->cfg->ops->lib->txq_set_sched(priv, 0); |
457 | ||
4ddbb7d0 TW |
458 | /* Tell NIC where to find the "keep warm" buffer */ |
459 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | |
460 | ||
da1bc453 TW |
461 | spin_unlock_irqrestore(&priv->lock, flags); |
462 | ||
da1bc453 | 463 | /* Alloc and init all Tx queues, including the command queue (#4) */ |
1053d35f RR |
464 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { |
465 | slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? | |
466 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | |
467 | ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, | |
468 | txq_id); | |
469 | if (ret) { | |
15b1687c | 470 | IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); |
1053d35f RR |
471 | goto error; |
472 | } | |
473 | } | |
474 | ||
475 | return ret; | |
476 | ||
477 | error: | |
478 | iwl_hw_txq_ctx_free(priv); | |
4ddbb7d0 | 479 | iwl_free_dma_ptr(priv, &priv->kw); |
1053d35f | 480 | error_kw: |
4ddbb7d0 TW |
481 | iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); |
482 | error_bc_tbls: | |
1053d35f RR |
483 | return ret; |
484 | } | |
a33c2f47 | 485 | |
da1bc453 TW |
486 | /** |
487 | * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory | |
488 | */ | |
489 | void iwl_txq_ctx_stop(struct iwl_priv *priv) | |
490 | { | |
f3f911d1 | 491 | int ch; |
da1bc453 TW |
492 | unsigned long flags; |
493 | ||
da1bc453 TW |
494 | /* Turn off all Tx DMA fifos */ |
495 | spin_lock_irqsave(&priv->lock, flags); | |
da1bc453 TW |
496 | |
497 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | |
498 | ||
499 | /* Stop each Tx DMA channel, and wait for it to be idle */ | |
f3f911d1 ZY |
500 | for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { |
501 | iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | |
da1bc453 | 502 | iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, |
f3f911d1 | 503 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), |
f056658b | 504 | 1000); |
da1bc453 | 505 | } |
da1bc453 TW |
506 | spin_unlock_irqrestore(&priv->lock, flags); |
507 | ||
508 | /* Deallocate memory for all Tx queues */ | |
509 | iwl_hw_txq_ctx_free(priv); | |
510 | } | |
511 | EXPORT_SYMBOL(iwl_txq_ctx_stop); | |
fd4abac5 TW |
512 | |
513 | /* | |
514 | * handle build REPLY_TX command notification. | |
515 | */ | |
516 | static void iwl_tx_cmd_build_basic(struct iwl_priv *priv, | |
517 | struct iwl_tx_cmd *tx_cmd, | |
e039fa4a | 518 | struct ieee80211_tx_info *info, |
fd4abac5 | 519 | struct ieee80211_hdr *hdr, |
0e7690f1 | 520 | u8 std_id) |
fd4abac5 | 521 | { |
fd7c8a40 | 522 | __le16 fc = hdr->frame_control; |
fd4abac5 TW |
523 | __le32 tx_flags = tx_cmd->tx_flags; |
524 | ||
525 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | |
e039fa4a | 526 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { |
fd4abac5 | 527 | tx_flags |= TX_CMD_FLG_ACK_MSK; |
fd7c8a40 | 528 | if (ieee80211_is_mgmt(fc)) |
fd4abac5 | 529 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; |
fd7c8a40 | 530 | if (ieee80211_is_probe_resp(fc) && |
fd4abac5 TW |
531 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) |
532 | tx_flags |= TX_CMD_FLG_TSF_MSK; | |
533 | } else { | |
534 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | |
535 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | |
536 | } | |
537 | ||
fd7c8a40 | 538 | if (ieee80211_is_back_req(fc)) |
fd4abac5 TW |
539 | tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; |
540 | ||
541 | ||
542 | tx_cmd->sta_id = std_id; | |
8b7b1e05 | 543 | if (ieee80211_has_morefrags(fc)) |
fd4abac5 TW |
544 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; |
545 | ||
fd7c8a40 HH |
546 | if (ieee80211_is_data_qos(fc)) { |
547 | u8 *qc = ieee80211_get_qos_ctl(hdr); | |
fd4abac5 TW |
548 | tx_cmd->tid_tspec = qc[0] & 0xf; |
549 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | |
550 | } else { | |
551 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | |
552 | } | |
553 | ||
a326a5d0 | 554 | priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); |
fd4abac5 TW |
555 | |
556 | if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) | |
557 | tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; | |
558 | ||
559 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | |
fd7c8a40 HH |
560 | if (ieee80211_is_mgmt(fc)) { |
561 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | |
fd4abac5 TW |
562 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); |
563 | else | |
564 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | |
565 | } else { | |
566 | tx_cmd->timeout.pm_frame_timeout = 0; | |
567 | } | |
568 | ||
569 | tx_cmd->driver_txop = 0; | |
570 | tx_cmd->tx_flags = tx_flags; | |
571 | tx_cmd->next_frame_len = 0; | |
572 | } | |
573 | ||
574 | #define RTS_HCCA_RETRY_LIMIT 3 | |
575 | #define RTS_DFAULT_RETRY_LIMIT 60 | |
576 | ||
577 | static void iwl_tx_cmd_build_rate(struct iwl_priv *priv, | |
578 | struct iwl_tx_cmd *tx_cmd, | |
e039fa4a | 579 | struct ieee80211_tx_info *info, |
b58ef214 | 580 | __le16 fc, int is_hcca) |
fd4abac5 | 581 | { |
b58ef214 | 582 | u32 rate_flags; |
76eff18b | 583 | int rate_idx; |
b58ef214 DH |
584 | u8 rts_retry_limit; |
585 | u8 data_retry_limit; | |
fd4abac5 | 586 | u8 rate_plcp; |
2e92e6f2 | 587 | |
b58ef214 | 588 | /* Set retry limit on DATA packets and Probe Responses*/ |
1f0436f4 | 589 | if (ieee80211_is_probe_resp(fc)) |
b58ef214 DH |
590 | data_retry_limit = 3; |
591 | else | |
592 | data_retry_limit = IWL_DEFAULT_TX_RETRY; | |
593 | tx_cmd->data_retry_limit = data_retry_limit; | |
fd4abac5 | 594 | |
b58ef214 DH |
595 | /* Set retry limit on RTS packets */ |
596 | rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT : | |
597 | RTS_DFAULT_RETRY_LIMIT; | |
598 | if (data_retry_limit < rts_retry_limit) | |
599 | rts_retry_limit = data_retry_limit; | |
600 | tx_cmd->rts_retry_limit = rts_retry_limit; | |
fd4abac5 | 601 | |
b58ef214 DH |
602 | /* DATA packets will use the uCode station table for rate/antenna |
603 | * selection */ | |
fd4abac5 TW |
604 | if (ieee80211_is_data(fc)) { |
605 | tx_cmd->initial_rate_index = 0; | |
606 | tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | |
b58ef214 DH |
607 | return; |
608 | } | |
609 | ||
610 | /** | |
611 | * If the current TX rate stored in mac80211 has the MCS bit set, it's | |
612 | * not really a TX rate. Thus, we use the lowest supported rate for | |
613 | * this band. Also use the lowest supported rate if the stored rate | |
614 | * index is invalid. | |
615 | */ | |
616 | rate_idx = info->control.rates[0].idx; | |
617 | if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || | |
618 | (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) | |
619 | rate_idx = rate_lowest_index(&priv->bands[info->band], | |
620 | info->control.sta); | |
621 | /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ | |
622 | if (info->band == IEEE80211_BAND_5GHZ) | |
623 | rate_idx += IWL_FIRST_OFDM_RATE; | |
624 | /* Get PLCP rate for tx_cmd->rate_n_flags */ | |
625 | rate_plcp = iwl_rates[rate_idx].plcp; | |
626 | /* Zero out flags for this packet */ | |
627 | rate_flags = 0; | |
fd4abac5 | 628 | |
b58ef214 DH |
629 | /* Set CCK flag as needed */ |
630 | if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) | |
631 | rate_flags |= RATE_MCS_CCK_MSK; | |
632 | ||
633 | /* Set up RTS and CTS flags for certain packets */ | |
634 | switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { | |
635 | case cpu_to_le16(IEEE80211_STYPE_AUTH): | |
636 | case cpu_to_le16(IEEE80211_STYPE_DEAUTH): | |
637 | case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): | |
638 | case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): | |
639 | if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) { | |
640 | tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK; | |
641 | tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK; | |
642 | } | |
643 | break; | |
644 | default: | |
645 | break; | |
fd4abac5 TW |
646 | } |
647 | ||
b58ef214 DH |
648 | /* Set up antennas */ |
649 | priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); | |
650 | rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); | |
651 | ||
652 | /* Set the rate in the TX cmd */ | |
e7d326ac | 653 | tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); |
fd4abac5 TW |
654 | } |
655 | ||
656 | static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv, | |
e039fa4a | 657 | struct ieee80211_tx_info *info, |
fd4abac5 TW |
658 | struct iwl_tx_cmd *tx_cmd, |
659 | struct sk_buff *skb_frag, | |
660 | int sta_id) | |
661 | { | |
e039fa4a | 662 | struct ieee80211_key_conf *keyconf = info->control.hw_key; |
fd4abac5 | 663 | |
ccc038ab | 664 | switch (keyconf->alg) { |
fd4abac5 TW |
665 | case ALG_CCMP: |
666 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; | |
ccc038ab | 667 | memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); |
e039fa4a | 668 | if (info->flags & IEEE80211_TX_CTL_AMPDU) |
fd4abac5 | 669 | tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; |
e1623446 | 670 | IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); |
fd4abac5 TW |
671 | break; |
672 | ||
673 | case ALG_TKIP: | |
674 | tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; | |
ccc038ab | 675 | ieee80211_get_tkip_key(keyconf, skb_frag, |
fd4abac5 | 676 | IEEE80211_TKIP_P2_KEY, tx_cmd->key); |
e1623446 | 677 | IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); |
fd4abac5 TW |
678 | break; |
679 | ||
680 | case ALG_WEP: | |
fd4abac5 | 681 | tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | |
ccc038ab EG |
682 | (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); |
683 | ||
684 | if (keyconf->keylen == WEP_KEY_LEN_128) | |
685 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | |
686 | ||
687 | memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); | |
fd4abac5 | 688 | |
e1623446 | 689 | IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " |
ccc038ab | 690 | "with key %d\n", keyconf->keyidx); |
fd4abac5 TW |
691 | break; |
692 | ||
693 | default: | |
978785a3 | 694 | IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg); |
fd4abac5 TW |
695 | break; |
696 | } | |
697 | } | |
698 | ||
fd4abac5 TW |
699 | /* |
700 | * start REPLY_TX command process | |
701 | */ | |
e039fa4a | 702 | int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) |
fd4abac5 TW |
703 | { |
704 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
e039fa4a | 705 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
f3674227 TW |
706 | struct iwl_tx_queue *txq; |
707 | struct iwl_queue *q; | |
c2acea8e JB |
708 | struct iwl_device_cmd *out_cmd; |
709 | struct iwl_cmd_meta *out_meta; | |
f3674227 TW |
710 | struct iwl_tx_cmd *tx_cmd; |
711 | int swq_id, txq_id; | |
fd4abac5 TW |
712 | dma_addr_t phys_addr; |
713 | dma_addr_t txcmd_phys; | |
714 | dma_addr_t scratch_phys; | |
be1a71a1 | 715 | u16 len, len_org, firstlen, secondlen; |
fd4abac5 | 716 | u16 seq_number = 0; |
fd7c8a40 | 717 | __le16 fc; |
0e7690f1 | 718 | u8 hdr_len; |
f3674227 | 719 | u8 sta_id; |
fd4abac5 TW |
720 | u8 wait_write_ptr = 0; |
721 | u8 tid = 0; | |
722 | u8 *qc = NULL; | |
723 | unsigned long flags; | |
724 | int ret; | |
725 | ||
726 | spin_lock_irqsave(&priv->lock, flags); | |
727 | if (iwl_is_rfkill(priv)) { | |
e1623446 | 728 | IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); |
fd4abac5 TW |
729 | goto drop_unlock; |
730 | } | |
731 | ||
fd7c8a40 | 732 | fc = hdr->frame_control; |
fd4abac5 TW |
733 | |
734 | #ifdef CONFIG_IWLWIFI_DEBUG | |
735 | if (ieee80211_is_auth(fc)) | |
e1623446 | 736 | IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); |
fd7c8a40 | 737 | else if (ieee80211_is_assoc_req(fc)) |
e1623446 | 738 | IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); |
fd7c8a40 | 739 | else if (ieee80211_is_reassoc_req(fc)) |
e1623446 | 740 | IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); |
fd4abac5 TW |
741 | #endif |
742 | ||
aa065263 | 743 | /* drop all non-injected data frame if we are not associated */ |
fd7c8a40 | 744 | if (ieee80211_is_data(fc) && |
aa065263 | 745 | !(info->flags & IEEE80211_TX_CTL_INJECTED) && |
d10c4ec8 | 746 | (!iwl_is_associated(priv) || |
05c914fe | 747 | ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) || |
d10c4ec8 | 748 | !priv->assoc_station_added)) { |
e1623446 | 749 | IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n"); |
fd4abac5 TW |
750 | goto drop_unlock; |
751 | } | |
752 | ||
7294ec95 | 753 | hdr_len = ieee80211_hdrlen(fc); |
fd4abac5 TW |
754 | |
755 | /* Find (or create) index into station table for destination station */ | |
aa065263 GS |
756 | if (info->flags & IEEE80211_TX_CTL_INJECTED) |
757 | sta_id = priv->hw_params.bcast_sta_id; | |
758 | else | |
759 | sta_id = iwl_get_sta_id(priv, hdr); | |
fd4abac5 | 760 | if (sta_id == IWL_INVALID_STATION) { |
e1623446 | 761 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", |
e174961c | 762 | hdr->addr1); |
3995bd93 | 763 | goto drop_unlock; |
fd4abac5 TW |
764 | } |
765 | ||
e1623446 | 766 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); |
fd4abac5 | 767 | |
45af8195 | 768 | txq_id = skb_get_queue_mapping(skb); |
fd7c8a40 HH |
769 | if (ieee80211_is_data_qos(fc)) { |
770 | qc = ieee80211_get_qos_ctl(hdr); | |
7294ec95 | 771 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; |
e6a6cf4c RC |
772 | if (unlikely(tid >= MAX_TID_COUNT)) |
773 | goto drop_unlock; | |
f3674227 TW |
774 | seq_number = priv->stations[sta_id].tid[tid].seq_number; |
775 | seq_number &= IEEE80211_SCTL_SEQ; | |
776 | hdr->seq_ctrl = hdr->seq_ctrl & | |
c1b4aa3f | 777 | cpu_to_le16(IEEE80211_SCTL_FRAG); |
f3674227 | 778 | hdr->seq_ctrl |= cpu_to_le16(seq_number); |
fd4abac5 | 779 | seq_number += 0x10; |
fd4abac5 | 780 | /* aggregation is on for this <sta,tid> */ |
45af8195 | 781 | if (info->flags & IEEE80211_TX_CTL_AMPDU) |
fd4abac5 | 782 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; |
fd4abac5 TW |
783 | } |
784 | ||
fd4abac5 | 785 | txq = &priv->txq[txq_id]; |
45af8195 | 786 | swq_id = txq->swq_id; |
fd4abac5 TW |
787 | q = &txq->q; |
788 | ||
3995bd93 JB |
789 | if (unlikely(iwl_queue_space(q) < q->high_mark)) |
790 | goto drop_unlock; | |
791 | ||
792 | if (ieee80211_is_data_qos(fc)) | |
793 | priv->stations[sta_id].tid[tid].tfds_in_queue++; | |
fd4abac5 | 794 | |
fd4abac5 TW |
795 | /* Set up driver data for this TFD */ |
796 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | |
797 | txq->txb[q->write_ptr].skb[0] = skb; | |
fd4abac5 TW |
798 | |
799 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | |
b88b15df | 800 | out_cmd = txq->cmd[q->write_ptr]; |
c2acea8e | 801 | out_meta = &txq->meta[q->write_ptr]; |
fd4abac5 TW |
802 | tx_cmd = &out_cmd->cmd.tx; |
803 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | |
804 | memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); | |
805 | ||
806 | /* | |
807 | * Set up the Tx-command (not MAC!) header. | |
808 | * Store the chosen Tx queue and TFD index within the sequence field; | |
809 | * after Tx, uCode's Tx response will return this value so driver can | |
810 | * locate the frame within the tx queue and do post-tx processing. | |
811 | */ | |
812 | out_cmd->hdr.cmd = REPLY_TX; | |
813 | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | |
814 | INDEX_TO_SEQ(q->write_ptr))); | |
815 | ||
816 | /* Copy MAC header from skb into command buffer */ | |
817 | memcpy(tx_cmd->hdr, hdr, hdr_len); | |
818 | ||
df833b1d RC |
819 | |
820 | /* Total # bytes to be transmitted */ | |
821 | len = (u16)skb->len; | |
822 | tx_cmd->len = cpu_to_le16(len); | |
823 | ||
824 | if (info->control.hw_key) | |
825 | iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); | |
826 | ||
827 | /* TODO need this for burst mode later on */ | |
828 | iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); | |
20594eb0 | 829 | iwl_dbg_log_tx_data_frame(priv, len, hdr); |
df833b1d RC |
830 | |
831 | /* set is_hcca to 0; it probably will never be implemented */ | |
b58ef214 | 832 | iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0); |
df833b1d | 833 | |
22fdf3c9 | 834 | iwl_update_stats(priv, true, fc, len); |
fd4abac5 TW |
835 | /* |
836 | * Use the first empty entry in this queue's command buffer array | |
837 | * to contain the Tx command and MAC header concatenated together | |
838 | * (payload data will be in another buffer). | |
839 | * Size of this varies, due to varying MAC header length. | |
840 | * If end is not dword aligned, we'll have 2 extra bytes at the end | |
841 | * of the MAC header (device reads on dword boundaries). | |
842 | * We'll tell device about this padding later. | |
843 | */ | |
844 | len = sizeof(struct iwl_tx_cmd) + | |
845 | sizeof(struct iwl_cmd_header) + hdr_len; | |
846 | ||
847 | len_org = len; | |
be1a71a1 | 848 | firstlen = len = (len + 3) & ~3; |
fd4abac5 TW |
849 | |
850 | if (len_org != len) | |
851 | len_org = 1; | |
852 | else | |
853 | len_org = 0; | |
854 | ||
df833b1d RC |
855 | /* Tell NIC about any 2-byte padding after MAC header */ |
856 | if (len_org) | |
857 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | |
858 | ||
fd4abac5 TW |
859 | /* Physical address of this Tx command's header (not MAC header!), |
860 | * within command buffer array. */ | |
499b1883 | 861 | txcmd_phys = pci_map_single(priv->pci_dev, |
df833b1d | 862 | &out_cmd->hdr, len, |
96891cee | 863 | PCI_DMA_BIDIRECTIONAL); |
c2acea8e JB |
864 | pci_unmap_addr_set(out_meta, mapping, txcmd_phys); |
865 | pci_unmap_len_set(out_meta, len, len); | |
fd4abac5 TW |
866 | /* Add buffer containing Tx command and MAC(!) header to TFD's |
867 | * first entry */ | |
7aaa1d79 SO |
868 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, |
869 | txcmd_phys, len, 1, 0); | |
fd4abac5 | 870 | |
df833b1d RC |
871 | if (!ieee80211_has_morefrags(hdr->frame_control)) { |
872 | txq->need_update = 1; | |
873 | if (qc) | |
874 | priv->stations[sta_id].tid[tid].seq_number = seq_number; | |
875 | } else { | |
876 | wait_write_ptr = 1; | |
877 | txq->need_update = 0; | |
878 | } | |
fd4abac5 TW |
879 | |
880 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | |
881 | * if any (802.11 null frames have no payload). */ | |
be1a71a1 | 882 | secondlen = len = skb->len - hdr_len; |
fd4abac5 TW |
883 | if (len) { |
884 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | |
885 | len, PCI_DMA_TODEVICE); | |
7aaa1d79 SO |
886 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, |
887 | phys_addr, len, | |
888 | 0, 0); | |
fd4abac5 TW |
889 | } |
890 | ||
fd4abac5 | 891 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + |
df833b1d RC |
892 | offsetof(struct iwl_tx_cmd, scratch); |
893 | ||
894 | len = sizeof(struct iwl_tx_cmd) + | |
895 | sizeof(struct iwl_cmd_header) + hdr_len; | |
896 | /* take back ownership of DMA buffer to enable update */ | |
897 | pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, | |
898 | len, PCI_DMA_BIDIRECTIONAL); | |
fd4abac5 | 899 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); |
499b1883 | 900 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); |
fd4abac5 | 901 | |
d2ee9cd2 RC |
902 | IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", |
903 | le16_to_cpu(out_cmd->hdr.sequence)); | |
904 | IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags)); | |
3d816c77 RC |
905 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); |
906 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | |
fd4abac5 TW |
907 | |
908 | /* Set up entry for this TFD in Tx byte-count array */ | |
7b80ece4 RC |
909 | if (info->flags & IEEE80211_TX_CTL_AMPDU) |
910 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, | |
df833b1d RC |
911 | le16_to_cpu(tx_cmd->len)); |
912 | ||
913 | pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, | |
914 | len, PCI_DMA_BIDIRECTIONAL); | |
fd4abac5 | 915 | |
be1a71a1 JB |
916 | trace_iwlwifi_dev_tx(priv, |
917 | &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], | |
918 | sizeof(struct iwl_tfd), | |
919 | &out_cmd->hdr, firstlen, | |
920 | skb->data + hdr_len, secondlen); | |
921 | ||
fd4abac5 TW |
922 | /* Tell device the write index *just past* this latest filled TFD */ |
923 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | |
924 | ret = iwl_txq_update_write_ptr(priv, txq); | |
925 | spin_unlock_irqrestore(&priv->lock, flags); | |
926 | ||
927 | if (ret) | |
928 | return ret; | |
929 | ||
143b09ef | 930 | if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { |
fd4abac5 TW |
931 | if (wait_write_ptr) { |
932 | spin_lock_irqsave(&priv->lock, flags); | |
933 | txq->need_update = 1; | |
934 | iwl_txq_update_write_ptr(priv, txq); | |
935 | spin_unlock_irqrestore(&priv->lock, flags); | |
143b09ef | 936 | } else { |
e4e72fb4 | 937 | iwl_stop_queue(priv, txq->swq_id); |
fd4abac5 | 938 | } |
fd4abac5 TW |
939 | } |
940 | ||
941 | return 0; | |
942 | ||
943 | drop_unlock: | |
944 | spin_unlock_irqrestore(&priv->lock, flags); | |
fd4abac5 TW |
945 | return -1; |
946 | } | |
947 | EXPORT_SYMBOL(iwl_tx_skb); | |
948 | ||
949 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ | |
950 | ||
951 | /** | |
952 | * iwl_enqueue_hcmd - enqueue a uCode command | |
953 | * @priv: device private data point | |
954 | * @cmd: a point to the ucode command structure | |
955 | * | |
956 | * The function returns < 0 values to indicate the operation is | |
957 | * failed. On success, it turns the index (> 0) of command in the | |
958 | * command queue. | |
959 | */ | |
960 | int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |
961 | { | |
962 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | |
963 | struct iwl_queue *q = &txq->q; | |
c2acea8e JB |
964 | struct iwl_device_cmd *out_cmd; |
965 | struct iwl_cmd_meta *out_meta; | |
fd4abac5 | 966 | dma_addr_t phys_addr; |
fd4abac5 | 967 | unsigned long flags; |
f3674227 TW |
968 | int len, ret; |
969 | u32 idx; | |
970 | u16 fix_size; | |
fd4abac5 TW |
971 | |
972 | cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); | |
973 | fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); | |
974 | ||
975 | /* If any of the command structures end up being larger than | |
976 | * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then | |
977 | * we will need to increase the size of the TFD entries */ | |
978 | BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && | |
c2acea8e | 979 | !(cmd->flags & CMD_SIZE_HUGE)); |
fd4abac5 | 980 | |
7812b167 | 981 | if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { |
f2f21b49 RC |
982 | IWL_WARN(priv, "Not sending command - %s KILL\n", |
983 | iwl_is_rfkill(priv) ? "RF" : "CT"); | |
fd4abac5 TW |
984 | return -EIO; |
985 | } | |
986 | ||
c2acea8e | 987 | if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { |
15b1687c | 988 | IWL_ERR(priv, "No space for Tx\n"); |
7812b167 WYG |
989 | if (iwl_within_ct_kill_margin(priv)) |
990 | iwl_tt_enter_ct_kill(priv); | |
991 | else { | |
992 | IWL_ERR(priv, "Restarting adapter due to queue full\n"); | |
993 | queue_work(priv->workqueue, &priv->restart); | |
994 | } | |
fd4abac5 TW |
995 | return -ENOSPC; |
996 | } | |
997 | ||
998 | spin_lock_irqsave(&priv->hcmd_lock, flags); | |
999 | ||
c2acea8e | 1000 | idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); |
da99c4b6 | 1001 | out_cmd = txq->cmd[idx]; |
c2acea8e JB |
1002 | out_meta = &txq->meta[idx]; |
1003 | ||
8ce73f3a | 1004 | memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ |
c2acea8e JB |
1005 | out_meta->flags = cmd->flags; |
1006 | if (cmd->flags & CMD_WANT_SKB) | |
1007 | out_meta->source = cmd; | |
1008 | if (cmd->flags & CMD_ASYNC) | |
1009 | out_meta->callback = cmd->callback; | |
fd4abac5 TW |
1010 | |
1011 | out_cmd->hdr.cmd = cmd->id; | |
fd4abac5 TW |
1012 | memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); |
1013 | ||
1014 | /* At this point, the out_cmd now has all of the incoming cmd | |
1015 | * information */ | |
1016 | ||
1017 | out_cmd->hdr.flags = 0; | |
1018 | out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | | |
1019 | INDEX_TO_SEQ(q->write_ptr)); | |
c2acea8e | 1020 | if (cmd->flags & CMD_SIZE_HUGE) |
9734cb23 | 1021 | out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; |
c2acea8e | 1022 | len = sizeof(struct iwl_device_cmd); |
df833b1d | 1023 | len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0; |
499b1883 | 1024 | |
fd4abac5 | 1025 | |
ded2ae7c EK |
1026 | #ifdef CONFIG_IWLWIFI_DEBUG |
1027 | switch (out_cmd->hdr.cmd) { | |
1028 | case REPLY_TX_LINK_QUALITY_CMD: | |
1029 | case SENSITIVITY_CMD: | |
e1623446 | 1030 | IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, " |
ded2ae7c EK |
1031 | "%d bytes at %d[%d]:%d\n", |
1032 | get_cmd_string(out_cmd->hdr.cmd), | |
1033 | out_cmd->hdr.cmd, | |
1034 | le16_to_cpu(out_cmd->hdr.sequence), fix_size, | |
1035 | q->write_ptr, idx, IWL_CMD_QUEUE_NUM); | |
1036 | break; | |
1037 | default: | |
e1623446 | 1038 | IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " |
ded2ae7c EK |
1039 | "%d bytes at %d[%d]:%d\n", |
1040 | get_cmd_string(out_cmd->hdr.cmd), | |
1041 | out_cmd->hdr.cmd, | |
1042 | le16_to_cpu(out_cmd->hdr.sequence), fix_size, | |
1043 | q->write_ptr, idx, IWL_CMD_QUEUE_NUM); | |
1044 | } | |
1045 | #endif | |
fd4abac5 TW |
1046 | txq->need_update = 1; |
1047 | ||
518099a8 SO |
1048 | if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl) |
1049 | /* Set up entry in queue's byte count circular buffer */ | |
1050 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); | |
fd4abac5 | 1051 | |
df833b1d RC |
1052 | phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, |
1053 | fix_size, PCI_DMA_BIDIRECTIONAL); | |
c2acea8e JB |
1054 | pci_unmap_addr_set(out_meta, mapping, phys_addr); |
1055 | pci_unmap_len_set(out_meta, len, fix_size); | |
df833b1d | 1056 | |
be1a71a1 JB |
1057 | trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags); |
1058 | ||
df833b1d RC |
1059 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, |
1060 | phys_addr, fix_size, 1, | |
1061 | U32_PAD(cmd->len)); | |
1062 | ||
fd4abac5 TW |
1063 | /* Increment and update queue's write index */ |
1064 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | |
1065 | ret = iwl_txq_update_write_ptr(priv, txq); | |
1066 | ||
1067 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | |
1068 | return ret ? ret : idx; | |
1069 | } | |
1070 | ||
17b88929 TW |
1071 | int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) |
1072 | { | |
1073 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | |
1074 | struct iwl_queue *q = &txq->q; | |
1075 | struct iwl_tx_info *tx_info; | |
1076 | int nfreed = 0; | |
1077 | ||
1078 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | |
15b1687c | 1079 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " |
17b88929 TW |
1080 | "is out of range [0-%d] %d %d.\n", txq_id, |
1081 | index, q->n_bd, q->write_ptr, q->read_ptr); | |
1082 | return 0; | |
1083 | } | |
1084 | ||
499b1883 TW |
1085 | for (index = iwl_queue_inc_wrap(index, q->n_bd); |
1086 | q->read_ptr != index; | |
1087 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | |
17b88929 TW |
1088 | |
1089 | tx_info = &txq->txb[txq->q.read_ptr]; | |
1090 | ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); | |
1091 | tx_info->skb[0] = NULL; | |
17b88929 | 1092 | |
972cf447 TW |
1093 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) |
1094 | priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); | |
1095 | ||
7aaa1d79 | 1096 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); |
17b88929 TW |
1097 | nfreed++; |
1098 | } | |
1099 | return nfreed; | |
1100 | } | |
1101 | EXPORT_SYMBOL(iwl_tx_queue_reclaim); | |
1102 | ||
1103 | ||
1104 | /** | |
1105 | * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd | |
1106 | * | |
1107 | * When FW advances 'R' index, all entries between old and new 'R' index | |
1108 | * need to be reclaimed. As result, some free space forms. If there is | |
1109 | * enough free space (> low mark), wake the stack that feeds us. | |
1110 | */ | |
499b1883 TW |
1111 | static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, |
1112 | int idx, int cmd_idx) | |
17b88929 TW |
1113 | { |
1114 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | |
1115 | struct iwl_queue *q = &txq->q; | |
1116 | int nfreed = 0; | |
1117 | ||
499b1883 | 1118 | if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { |
15b1687c | 1119 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " |
17b88929 | 1120 | "is out of range [0-%d] %d %d.\n", txq_id, |
499b1883 | 1121 | idx, q->n_bd, q->write_ptr, q->read_ptr); |
17b88929 TW |
1122 | return; |
1123 | } | |
1124 | ||
499b1883 TW |
1125 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; |
1126 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | |
17b88929 | 1127 | |
499b1883 | 1128 | if (nfreed++ > 0) { |
15b1687c | 1129 | IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx, |
17b88929 TW |
1130 | q->write_ptr, q->read_ptr); |
1131 | queue_work(priv->workqueue, &priv->restart); | |
1132 | } | |
da99c4b6 | 1133 | |
17b88929 TW |
1134 | } |
1135 | } | |
1136 | ||
1137 | /** | |
1138 | * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them | |
1139 | * @rxb: Rx buffer to reclaim | |
1140 | * | |
1141 | * If an Rx buffer has an async callback associated with it the callback | |
1142 | * will be executed. The attached skb (if present) will only be freed | |
1143 | * if the callback returns 1 | |
1144 | */ | |
1145 | void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |
1146 | { | |
2f301227 | 1147 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
17b88929 TW |
1148 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); |
1149 | int txq_id = SEQ_TO_QUEUE(sequence); | |
1150 | int index = SEQ_TO_INDEX(sequence); | |
17b88929 | 1151 | int cmd_index; |
9734cb23 | 1152 | bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); |
c2acea8e JB |
1153 | struct iwl_device_cmd *cmd; |
1154 | struct iwl_cmd_meta *meta; | |
17b88929 TW |
1155 | |
1156 | /* If a Tx command is being handled and it isn't in the actual | |
1157 | * command queue then there a command routing bug has been introduced | |
1158 | * in the queue management code. */ | |
55d6a3cd | 1159 | if (WARN(txq_id != IWL_CMD_QUEUE_NUM, |
01ef9323 WT |
1160 | "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n", |
1161 | txq_id, sequence, | |
1162 | priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr, | |
1163 | priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) { | |
ec741164 | 1164 | iwl_print_hex_error(priv, pkt, 32); |
55d6a3cd | 1165 | return; |
01ef9323 | 1166 | } |
17b88929 TW |
1167 | |
1168 | cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); | |
da99c4b6 | 1169 | cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; |
c2acea8e | 1170 | meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; |
17b88929 | 1171 | |
c33de625 RC |
1172 | pci_unmap_single(priv->pci_dev, |
1173 | pci_unmap_addr(meta, mapping), | |
1174 | pci_unmap_len(meta, len), | |
1175 | PCI_DMA_BIDIRECTIONAL); | |
1176 | ||
17b88929 | 1177 | /* Input error checking is done when commands are added to queue. */ |
c2acea8e | 1178 | if (meta->flags & CMD_WANT_SKB) { |
2f301227 ZY |
1179 | meta->source->reply_page = (unsigned long)rxb_addr(rxb); |
1180 | rxb->page = NULL; | |
5696aea6 | 1181 | } else if (meta->callback) |
2f301227 | 1182 | meta->callback(priv, cmd, pkt); |
17b88929 | 1183 | |
499b1883 | 1184 | iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); |
17b88929 | 1185 | |
c2acea8e | 1186 | if (!(meta->flags & CMD_ASYNC)) { |
17b88929 TW |
1187 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); |
1188 | wake_up_interruptible(&priv->wait_command_queue); | |
1189 | } | |
1190 | } | |
1191 | EXPORT_SYMBOL(iwl_tx_cmd_complete); | |
1192 | ||
30e553e3 TW |
1193 | /* |
1194 | * Find first available (lowest unused) Tx Queue, mark it "active". | |
1195 | * Called only when finding queue for aggregation. | |
1196 | * Should never return anything < 7, because they should already | |
1197 | * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6). | |
1198 | */ | |
1199 | static int iwl_txq_ctx_activate_free(struct iwl_priv *priv) | |
1200 | { | |
1201 | int txq_id; | |
1202 | ||
1203 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | |
1204 | if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) | |
1205 | return txq_id; | |
1206 | return -1; | |
1207 | } | |
1208 | ||
1209 | int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) | |
1210 | { | |
1211 | int sta_id; | |
1212 | int tx_fifo; | |
1213 | int txq_id; | |
1214 | int ret; | |
1215 | unsigned long flags; | |
1216 | struct iwl_tid_data *tid_data; | |
30e553e3 TW |
1217 | |
1218 | if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) | |
1219 | tx_fifo = default_tid_to_tx_fifo[tid]; | |
1220 | else | |
1221 | return -EINVAL; | |
1222 | ||
39aadf8c | 1223 | IWL_WARN(priv, "%s on ra = %pM tid = %d\n", |
e174961c | 1224 | __func__, ra, tid); |
30e553e3 TW |
1225 | |
1226 | sta_id = iwl_find_station(priv, ra); | |
3eb92969 WYG |
1227 | if (sta_id == IWL_INVALID_STATION) { |
1228 | IWL_ERR(priv, "Start AGG on invalid station\n"); | |
30e553e3 | 1229 | return -ENXIO; |
3eb92969 | 1230 | } |
082e708a RK |
1231 | if (unlikely(tid >= MAX_TID_COUNT)) |
1232 | return -EINVAL; | |
30e553e3 TW |
1233 | |
1234 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { | |
15b1687c | 1235 | IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); |
30e553e3 TW |
1236 | return -ENXIO; |
1237 | } | |
1238 | ||
1239 | txq_id = iwl_txq_ctx_activate_free(priv); | |
3eb92969 WYG |
1240 | if (txq_id == -1) { |
1241 | IWL_ERR(priv, "No free aggregation queue available\n"); | |
30e553e3 | 1242 | return -ENXIO; |
3eb92969 | 1243 | } |
30e553e3 TW |
1244 | |
1245 | spin_lock_irqsave(&priv->sta_lock, flags); | |
1246 | tid_data = &priv->stations[sta_id].tid[tid]; | |
1247 | *ssn = SEQ_TO_SN(tid_data->seq_number); | |
1248 | tid_data->agg.txq_id = txq_id; | |
45af8195 | 1249 | priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id); |
30e553e3 TW |
1250 | spin_unlock_irqrestore(&priv->sta_lock, flags); |
1251 | ||
1252 | ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, | |
1253 | sta_id, tid, *ssn); | |
1254 | if (ret) | |
1255 | return ret; | |
1256 | ||
1257 | if (tid_data->tfds_in_queue == 0) { | |
3eb92969 | 1258 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); |
30e553e3 TW |
1259 | tid_data->agg.state = IWL_AGG_ON; |
1260 | ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid); | |
1261 | } else { | |
e1623446 | 1262 | IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", |
30e553e3 TW |
1263 | tid_data->tfds_in_queue); |
1264 | tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; | |
1265 | } | |
1266 | return ret; | |
1267 | } | |
1268 | EXPORT_SYMBOL(iwl_tx_agg_start); | |
1269 | ||
1270 | int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) | |
1271 | { | |
1272 | int tx_fifo_id, txq_id, sta_id, ssn = -1; | |
1273 | struct iwl_tid_data *tid_data; | |
1274 | int ret, write_ptr, read_ptr; | |
1275 | unsigned long flags; | |
30e553e3 TW |
1276 | |
1277 | if (!ra) { | |
15b1687c | 1278 | IWL_ERR(priv, "ra = NULL\n"); |
30e553e3 TW |
1279 | return -EINVAL; |
1280 | } | |
1281 | ||
e6a6cf4c RC |
1282 | if (unlikely(tid >= MAX_TID_COUNT)) |
1283 | return -EINVAL; | |
1284 | ||
30e553e3 TW |
1285 | if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) |
1286 | tx_fifo_id = default_tid_to_tx_fifo[tid]; | |
1287 | else | |
1288 | return -EINVAL; | |
1289 | ||
1290 | sta_id = iwl_find_station(priv, ra); | |
1291 | ||
a2f1cbeb WYG |
1292 | if (sta_id == IWL_INVALID_STATION) { |
1293 | IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); | |
30e553e3 | 1294 | return -ENXIO; |
a2f1cbeb | 1295 | } |
30e553e3 TW |
1296 | |
1297 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) | |
39aadf8c | 1298 | IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n"); |
30e553e3 TW |
1299 | |
1300 | tid_data = &priv->stations[sta_id].tid[tid]; | |
1301 | ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; | |
1302 | txq_id = tid_data->agg.txq_id; | |
1303 | write_ptr = priv->txq[txq_id].q.write_ptr; | |
1304 | read_ptr = priv->txq[txq_id].q.read_ptr; | |
1305 | ||
1306 | /* The queue is not empty */ | |
1307 | if (write_ptr != read_ptr) { | |
e1623446 | 1308 | IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); |
30e553e3 TW |
1309 | priv->stations[sta_id].tid[tid].agg.state = |
1310 | IWL_EMPTYING_HW_QUEUE_DELBA; | |
1311 | return 0; | |
1312 | } | |
1313 | ||
e1623446 | 1314 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); |
30e553e3 TW |
1315 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; |
1316 | ||
1317 | spin_lock_irqsave(&priv->lock, flags); | |
1318 | ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, | |
1319 | tx_fifo_id); | |
1320 | spin_unlock_irqrestore(&priv->lock, flags); | |
1321 | ||
1322 | if (ret) | |
1323 | return ret; | |
1324 | ||
1325 | ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid); | |
1326 | ||
1327 | return 0; | |
1328 | } | |
1329 | EXPORT_SYMBOL(iwl_tx_agg_stop); | |
1330 | ||
1331 | int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id) | |
1332 | { | |
1333 | struct iwl_queue *q = &priv->txq[txq_id].q; | |
1334 | u8 *addr = priv->stations[sta_id].sta.sta.addr; | |
1335 | struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; | |
1336 | ||
1337 | switch (priv->stations[sta_id].tid[tid].agg.state) { | |
1338 | case IWL_EMPTYING_HW_QUEUE_DELBA: | |
1339 | /* We are reclaiming the last packet of the */ | |
1340 | /* aggregated HW queue */ | |
3fd07a1e TW |
1341 | if ((txq_id == tid_data->agg.txq_id) && |
1342 | (q->read_ptr == q->write_ptr)) { | |
30e553e3 TW |
1343 | u16 ssn = SEQ_TO_SN(tid_data->seq_number); |
1344 | int tx_fifo = default_tid_to_tx_fifo[tid]; | |
e1623446 | 1345 | IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); |
30e553e3 TW |
1346 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, |
1347 | ssn, tx_fifo); | |
1348 | tid_data->agg.state = IWL_AGG_OFF; | |
1349 | ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid); | |
1350 | } | |
1351 | break; | |
1352 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | |
1353 | /* We are reclaiming the last packet of the queue */ | |
1354 | if (tid_data->tfds_in_queue == 0) { | |
e1623446 | 1355 | IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); |
30e553e3 TW |
1356 | tid_data->agg.state = IWL_AGG_ON; |
1357 | ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid); | |
1358 | } | |
1359 | break; | |
1360 | } | |
1361 | return 0; | |
1362 | } | |
1363 | EXPORT_SYMBOL(iwl_txq_check_empty); | |
30e553e3 | 1364 | |
653fa4a0 EG |
1365 | /** |
1366 | * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack | |
1367 | * | |
1368 | * Go through block-ack's bitmap of ACK'd frames, update driver's record of | |
1369 | * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. | |
1370 | */ | |
1371 | static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv, | |
1372 | struct iwl_ht_agg *agg, | |
1373 | struct iwl_compressed_ba_resp *ba_resp) | |
1374 | ||
1375 | { | |
1376 | int i, sh, ack; | |
1377 | u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); | |
1378 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | |
1379 | u64 bitmap; | |
1380 | int successes = 0; | |
1381 | struct ieee80211_tx_info *info; | |
1382 | ||
1383 | if (unlikely(!agg->wait_for_ba)) { | |
15b1687c | 1384 | IWL_ERR(priv, "Received BA when not expected\n"); |
653fa4a0 EG |
1385 | return -EINVAL; |
1386 | } | |
1387 | ||
1388 | /* Mark that the expected block-ack response arrived */ | |
1389 | agg->wait_for_ba = 0; | |
e1623446 | 1390 | IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); |
653fa4a0 EG |
1391 | |
1392 | /* Calculate shift to align block-ack bits with our Tx window bits */ | |
3fd07a1e | 1393 | sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); |
653fa4a0 EG |
1394 | if (sh < 0) /* tbw something is wrong with indices */ |
1395 | sh += 0x100; | |
1396 | ||
1397 | /* don't use 64-bit values for now */ | |
1398 | bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; | |
1399 | ||
1400 | if (agg->frame_count > (64 - sh)) { | |
e1623446 | 1401 | IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); |
653fa4a0 EG |
1402 | return -1; |
1403 | } | |
1404 | ||
1405 | /* check for success or failure according to the | |
1406 | * transmitted bitmap and block-ack bitmap */ | |
1407 | bitmap &= agg->bitmap; | |
1408 | ||
1409 | /* For each frame attempted in aggregation, | |
1410 | * update driver's record of tx frame's status. */ | |
1411 | for (i = 0; i < agg->frame_count ; i++) { | |
4aa41f12 | 1412 | ack = bitmap & (1ULL << i); |
653fa4a0 | 1413 | successes += !!ack; |
e1623446 | 1414 | IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", |
c3056065 | 1415 | ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff, |
653fa4a0 EG |
1416 | agg->start_idx + i); |
1417 | } | |
1418 | ||
1419 | info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); | |
1420 | memset(&info->status, 0, sizeof(info->status)); | |
91a55ae6 | 1421 | info->flags |= IEEE80211_TX_STAT_ACK; |
653fa4a0 EG |
1422 | info->flags |= IEEE80211_TX_STAT_AMPDU; |
1423 | info->status.ampdu_ack_map = successes; | |
1424 | info->status.ampdu_ack_len = agg->frame_count; | |
1425 | iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info); | |
1426 | ||
e1623446 | 1427 | IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap); |
653fa4a0 EG |
1428 | |
1429 | return 0; | |
1430 | } | |
1431 | ||
1432 | /** | |
1433 | * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA | |
1434 | * | |
1435 | * Handles block-acknowledge notification from device, which reports success | |
1436 | * of frames sent via aggregation. | |
1437 | */ | |
1438 | void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, | |
1439 | struct iwl_rx_mem_buffer *rxb) | |
1440 | { | |
2f301227 | 1441 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
653fa4a0 | 1442 | struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; |
653fa4a0 EG |
1443 | struct iwl_tx_queue *txq = NULL; |
1444 | struct iwl_ht_agg *agg; | |
3fd07a1e TW |
1445 | int index; |
1446 | int sta_id; | |
1447 | int tid; | |
653fa4a0 EG |
1448 | |
1449 | /* "flow" corresponds to Tx queue */ | |
1450 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | |
1451 | ||
1452 | /* "ssn" is start of block-ack Tx window, corresponds to index | |
1453 | * (in Tx queue's circular buffer) of first TFD/frame in window */ | |
1454 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); | |
1455 | ||
1456 | if (scd_flow >= priv->hw_params.max_txq_num) { | |
15b1687c WT |
1457 | IWL_ERR(priv, |
1458 | "BUG_ON scd_flow is bigger than number of queues\n"); | |
653fa4a0 EG |
1459 | return; |
1460 | } | |
1461 | ||
1462 | txq = &priv->txq[scd_flow]; | |
3fd07a1e TW |
1463 | sta_id = ba_resp->sta_id; |
1464 | tid = ba_resp->tid; | |
1465 | agg = &priv->stations[sta_id].tid[tid].agg; | |
653fa4a0 EG |
1466 | |
1467 | /* Find index just before block-ack window */ | |
1468 | index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); | |
1469 | ||
1470 | /* TODO: Need to get this copy more safely - now good for debug */ | |
1471 | ||
e1623446 | 1472 | IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " |
653fa4a0 EG |
1473 | "sta_id = %d\n", |
1474 | agg->wait_for_ba, | |
e174961c | 1475 | (u8 *) &ba_resp->sta_addr_lo32, |
653fa4a0 | 1476 | ba_resp->sta_id); |
e1623446 | 1477 | IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = " |
653fa4a0 EG |
1478 | "%d, scd_ssn = %d\n", |
1479 | ba_resp->tid, | |
1480 | ba_resp->seq_ctl, | |
1481 | (unsigned long long)le64_to_cpu(ba_resp->bitmap), | |
1482 | ba_resp->scd_flow, | |
1483 | ba_resp->scd_ssn); | |
e1623446 | 1484 | IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n", |
653fa4a0 EG |
1485 | agg->start_idx, |
1486 | (unsigned long long)agg->bitmap); | |
1487 | ||
1488 | /* Update driver's record of ACK vs. not for each frame in window */ | |
1489 | iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp); | |
1490 | ||
1491 | /* Release all TFDs before the SSN, i.e. all TFDs in front of | |
1492 | * block-ack window (we assume that they've been successfully | |
1493 | * transmitted ... if not, it's too late anyway). */ | |
1494 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { | |
1495 | /* calculate mac80211 ampdu sw queue to wake */ | |
653fa4a0 | 1496 | int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); |
3fd07a1e TW |
1497 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; |
1498 | ||
1499 | if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && | |
1500 | priv->mac80211_registered && | |
1501 | (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) | |
e4e72fb4 | 1502 | iwl_wake_queue(priv, txq->swq_id); |
3fd07a1e TW |
1503 | |
1504 | iwl_txq_check_empty(priv, sta_id, tid, scd_flow); | |
653fa4a0 EG |
1505 | } |
1506 | } | |
1507 | EXPORT_SYMBOL(iwl_rx_reply_compressed_ba); | |
1508 | ||
994d31f7 | 1509 | #ifdef CONFIG_IWLWIFI_DEBUG |
a332f8d6 TW |
1510 | #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x |
1511 | ||
1512 | const char *iwl_get_tx_fail_reason(u32 status) | |
1513 | { | |
1514 | switch (status & TX_STATUS_MSK) { | |
1515 | case TX_STATUS_SUCCESS: | |
1516 | return "SUCCESS"; | |
1517 | TX_STATUS_ENTRY(SHORT_LIMIT); | |
1518 | TX_STATUS_ENTRY(LONG_LIMIT); | |
1519 | TX_STATUS_ENTRY(FIFO_UNDERRUN); | |
1520 | TX_STATUS_ENTRY(MGMNT_ABORT); | |
1521 | TX_STATUS_ENTRY(NEXT_FRAG); | |
1522 | TX_STATUS_ENTRY(LIFE_EXPIRE); | |
1523 | TX_STATUS_ENTRY(DEST_PS); | |
1524 | TX_STATUS_ENTRY(ABORTED); | |
1525 | TX_STATUS_ENTRY(BT_RETRY); | |
1526 | TX_STATUS_ENTRY(STA_INVALID); | |
1527 | TX_STATUS_ENTRY(FRAG_DROPPED); | |
1528 | TX_STATUS_ENTRY(TID_DISABLE); | |
1529 | TX_STATUS_ENTRY(FRAME_FLUSHED); | |
1530 | TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); | |
1531 | TX_STATUS_ENTRY(TX_LOCKED); | |
1532 | TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); | |
1533 | } | |
1534 | ||
1535 | return "UNKNOWN"; | |
1536 | } | |
1537 | EXPORT_SYMBOL(iwl_get_tx_fail_reason); | |
1538 | #endif /* CONFIG_IWLWIFI_DEBUG */ |