Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / net / wireless / iwlwifi / iwl-tx.c
1 /******************************************************************************
2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30 #include <linux/etherdevice.h>
31 #include <net/mac80211.h>
32 #include "iwl-eeprom.h"
33 #include "iwl-dev.h"
34 #include "iwl-core.h"
35 #include "iwl-sta.h"
36 #include "iwl-io.h"
37 #include "iwl-helpers.h"
38
39 static const u16 default_tid_to_tx_fifo[] = {
40 IWL_TX_FIFO_AC1,
41 IWL_TX_FIFO_AC0,
42 IWL_TX_FIFO_AC0,
43 IWL_TX_FIFO_AC1,
44 IWL_TX_FIFO_AC2,
45 IWL_TX_FIFO_AC2,
46 IWL_TX_FIFO_AC3,
47 IWL_TX_FIFO_AC3,
48 IWL_TX_FIFO_NONE,
49 IWL_TX_FIFO_NONE,
50 IWL_TX_FIFO_NONE,
51 IWL_TX_FIFO_NONE,
52 IWL_TX_FIFO_NONE,
53 IWL_TX_FIFO_NONE,
54 IWL_TX_FIFO_NONE,
55 IWL_TX_FIFO_NONE,
56 IWL_TX_FIFO_AC3
57 };
58
59 static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
60 struct iwl_dma_ptr *ptr, size_t size)
61 {
62 ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
63 if (!ptr->addr)
64 return -ENOMEM;
65 ptr->size = size;
66 return 0;
67 }
68
69 static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
70 struct iwl_dma_ptr *ptr)
71 {
72 if (unlikely(!ptr->addr))
73 return;
74
75 pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
76 memset(ptr, 0, sizeof(*ptr));
77 }
78
79 /**
80 * iwl_txq_update_write_ptr - Send new write index to hardware
81 */
82 int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
83 {
84 u32 reg = 0;
85 int ret = 0;
86 int txq_id = txq->q.id;
87
88 if (txq->need_update == 0)
89 return ret;
90
91 /* if we're trying to save power */
92 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
93 /* wake up nic if it's powered down ...
94 * uCode will wake up, and interrupt us again, so next
95 * time we'll skip this part. */
96 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
97
98 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
99 IWL_DEBUG_INFO(priv, "Requesting wakeup, GP1 = 0x%x\n", reg);
100 iwl_set_bit(priv, CSR_GP_CNTRL,
101 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
102 return ret;
103 }
104
105 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
106 txq->q.write_ptr | (txq_id << 8));
107
108 /* else not in power-save mode, uCode will never sleep when we're
109 * trying to tx (during RFKILL, we're not trying to tx). */
110 } else
111 iwl_write32(priv, HBUS_TARG_WRPTR,
112 txq->q.write_ptr | (txq_id << 8));
113
114 txq->need_update = 0;
115
116 return ret;
117 }
118 EXPORT_SYMBOL(iwl_txq_update_write_ptr);
119
120
121 /**
122 * iwl_tx_queue_free - Deallocate DMA queue.
123 * @txq: Transmit queue to deallocate.
124 *
125 * Empty queue by removing and destroying all BD's.
126 * Free all buffers.
127 * 0-fill, but do not free "txq" descriptor structure.
128 */
129 void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
130 {
131 struct iwl_tx_queue *txq = &priv->txq[txq_id];
132 struct iwl_queue *q = &txq->q;
133 struct pci_dev *dev = priv->pci_dev;
134 int i, len;
135
136 if (q->n_bd == 0)
137 return;
138
139 /* first, empty all BD's */
140 for (; q->write_ptr != q->read_ptr;
141 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
142 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
143
144 len = sizeof(struct iwl_device_cmd) * q->n_window;
145
146 /* De-alloc array of command/tx buffers */
147 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
148 kfree(txq->cmd[i]);
149
150 /* De-alloc circular buffer of TFDs */
151 if (txq->q.n_bd)
152 pci_free_consistent(dev, priv->hw_params.tfd_size *
153 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
154
155 /* De-alloc array of per-TFD driver data */
156 kfree(txq->txb);
157 txq->txb = NULL;
158
159 /* deallocate arrays */
160 kfree(txq->cmd);
161 kfree(txq->meta);
162 txq->cmd = NULL;
163 txq->meta = NULL;
164
165 /* 0-fill queue descriptor structure */
166 memset(txq, 0, sizeof(*txq));
167 }
168 EXPORT_SYMBOL(iwl_tx_queue_free);
169
170 /**
171 * iwl_cmd_queue_free - Deallocate DMA queue.
172 * @txq: Transmit queue to deallocate.
173 *
174 * Empty queue by removing and destroying all BD's.
175 * Free all buffers.
176 * 0-fill, but do not free "txq" descriptor structure.
177 */
178 void iwl_cmd_queue_free(struct iwl_priv *priv)
179 {
180 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
181 struct iwl_queue *q = &txq->q;
182 struct pci_dev *dev = priv->pci_dev;
183 int i, len;
184
185 if (q->n_bd == 0)
186 return;
187
188 len = sizeof(struct iwl_device_cmd) * q->n_window;
189 len += IWL_MAX_SCAN_SIZE;
190
191 /* De-alloc array of command/tx buffers */
192 for (i = 0; i <= TFD_CMD_SLOTS; i++)
193 kfree(txq->cmd[i]);
194
195 /* De-alloc circular buffer of TFDs */
196 if (txq->q.n_bd)
197 pci_free_consistent(dev, priv->hw_params.tfd_size *
198 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
199
200 /* deallocate arrays */
201 kfree(txq->cmd);
202 kfree(txq->meta);
203 txq->cmd = NULL;
204 txq->meta = NULL;
205
206 /* 0-fill queue descriptor structure */
207 memset(txq, 0, sizeof(*txq));
208 }
209 EXPORT_SYMBOL(iwl_cmd_queue_free);
210
211 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
212 * DMA services
213 *
214 * Theory of operation
215 *
216 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
217 * of buffer descriptors, each of which points to one or more data buffers for
218 * the device to read from or fill. Driver and device exchange status of each
219 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
220 * entries in each circular buffer, to protect against confusing empty and full
221 * queue states.
222 *
223 * The device reads or writes the data in the queues via the device's several
224 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
225 *
226 * For Tx queue, there are low mark and high mark limits. If, after queuing
227 * the packet for Tx, free space become < low mark, Tx queue stopped. When
228 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
229 * Tx queue resumed.
230 *
231 * See more detailed info in iwl-4965-hw.h.
232 ***************************************************/
233
234 int iwl_queue_space(const struct iwl_queue *q)
235 {
236 int s = q->read_ptr - q->write_ptr;
237
238 if (q->read_ptr > q->write_ptr)
239 s -= q->n_bd;
240
241 if (s <= 0)
242 s += q->n_window;
243 /* keep some reserve to not confuse empty and full situations */
244 s -= 2;
245 if (s < 0)
246 s = 0;
247 return s;
248 }
249 EXPORT_SYMBOL(iwl_queue_space);
250
251
252 /**
253 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
254 */
255 static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
256 int count, int slots_num, u32 id)
257 {
258 q->n_bd = count;
259 q->n_window = slots_num;
260 q->id = id;
261
262 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
263 * and iwl_queue_dec_wrap are broken. */
264 BUG_ON(!is_power_of_2(count));
265
266 /* slots_num must be power-of-two size, otherwise
267 * get_cmd_index is broken. */
268 BUG_ON(!is_power_of_2(slots_num));
269
270 q->low_mark = q->n_window / 4;
271 if (q->low_mark < 4)
272 q->low_mark = 4;
273
274 q->high_mark = q->n_window / 8;
275 if (q->high_mark < 2)
276 q->high_mark = 2;
277
278 q->write_ptr = q->read_ptr = 0;
279
280 return 0;
281 }
282
283 /**
284 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
285 */
286 static int iwl_tx_queue_alloc(struct iwl_priv *priv,
287 struct iwl_tx_queue *txq, u32 id)
288 {
289 struct pci_dev *dev = priv->pci_dev;
290 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
291
292 /* Driver private data, only for Tx (not command) queues,
293 * not shared with device. */
294 if (id != IWL_CMD_QUEUE_NUM) {
295 txq->txb = kmalloc(sizeof(txq->txb[0]) *
296 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
297 if (!txq->txb) {
298 IWL_ERR(priv, "kmalloc for auxiliary BD "
299 "structures failed\n");
300 goto error;
301 }
302 } else {
303 txq->txb = NULL;
304 }
305
306 /* Circular buffer of transmit frame descriptors (TFDs),
307 * shared with device */
308 txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
309
310 if (!txq->tfds) {
311 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
312 goto error;
313 }
314 txq->q.id = id;
315
316 return 0;
317
318 error:
319 kfree(txq->txb);
320 txq->txb = NULL;
321
322 return -ENOMEM;
323 }
324
325 /**
326 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
327 */
328 int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
329 int slots_num, u32 txq_id)
330 {
331 int i, len;
332 int ret;
333 int actual_slots = slots_num;
334
335 /*
336 * Alloc buffer array for commands (Tx or other types of commands).
337 * For the command queue (#4), allocate command space + one big
338 * command for scan, since scan command is very huge; the system will
339 * not have two scans at the same time, so only one is needed.
340 * For normal Tx queues (all other queues), no super-size command
341 * space is needed.
342 */
343 if (txq_id == IWL_CMD_QUEUE_NUM)
344 actual_slots++;
345
346 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
347 GFP_KERNEL);
348 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
349 GFP_KERNEL);
350
351 if (!txq->meta || !txq->cmd)
352 goto out_free_arrays;
353
354 len = sizeof(struct iwl_device_cmd);
355 for (i = 0; i < actual_slots; i++) {
356 /* only happens for cmd queue */
357 if (i == slots_num)
358 len += IWL_MAX_SCAN_SIZE;
359
360 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
361 if (!txq->cmd[i])
362 goto err;
363 }
364
365 /* Alloc driver data array and TFD circular buffer */
366 ret = iwl_tx_queue_alloc(priv, txq, txq_id);
367 if (ret)
368 goto err;
369
370 txq->need_update = 0;
371
372 /* aggregation TX queues will get their ID when aggregation begins */
373 if (txq_id <= IWL_TX_FIFO_AC3)
374 txq->swq_id = txq_id;
375
376 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
377 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
378 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
379
380 /* Initialize queue's high/low-water marks, and head/tail indexes */
381 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
382
383 /* Tell device where to find queue */
384 priv->cfg->ops->lib->txq_init(priv, txq);
385
386 return 0;
387 err:
388 for (i = 0; i < actual_slots; i++)
389 kfree(txq->cmd[i]);
390 out_free_arrays:
391 kfree(txq->meta);
392 kfree(txq->cmd);
393
394 return -ENOMEM;
395 }
396 EXPORT_SYMBOL(iwl_tx_queue_init);
397
398 /**
399 * iwl_hw_txq_ctx_free - Free TXQ Context
400 *
401 * Destroy all TX DMA queues and structures
402 */
403 void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
404 {
405 int txq_id;
406
407 /* Tx queues */
408 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
409 if (txq_id == IWL_CMD_QUEUE_NUM)
410 iwl_cmd_queue_free(priv);
411 else
412 iwl_tx_queue_free(priv, txq_id);
413
414 iwl_free_dma_ptr(priv, &priv->kw);
415
416 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
417 }
418 EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
419
420 /**
421 * iwl_txq_ctx_reset - Reset TX queue context
422 * Destroys all DMA structures and initialize them again
423 *
424 * @param priv
425 * @return error code
426 */
427 int iwl_txq_ctx_reset(struct iwl_priv *priv)
428 {
429 int ret = 0;
430 int txq_id, slots_num;
431 unsigned long flags;
432
433 /* Free all tx/cmd queues and keep-warm buffer */
434 iwl_hw_txq_ctx_free(priv);
435
436 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
437 priv->hw_params.scd_bc_tbls_size);
438 if (ret) {
439 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
440 goto error_bc_tbls;
441 }
442 /* Alloc keep-warm buffer */
443 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
444 if (ret) {
445 IWL_ERR(priv, "Keep Warm allocation failed\n");
446 goto error_kw;
447 }
448 spin_lock_irqsave(&priv->lock, flags);
449
450 /* Turn off all Tx DMA fifos */
451 priv->cfg->ops->lib->txq_set_sched(priv, 0);
452
453 /* Tell NIC where to find the "keep warm" buffer */
454 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
455
456 spin_unlock_irqrestore(&priv->lock, flags);
457
458 /* Alloc and init all Tx queues, including the command queue (#4) */
459 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
460 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
461 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
462 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
463 txq_id);
464 if (ret) {
465 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
466 goto error;
467 }
468 }
469
470 return ret;
471
472 error:
473 iwl_hw_txq_ctx_free(priv);
474 iwl_free_dma_ptr(priv, &priv->kw);
475 error_kw:
476 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
477 error_bc_tbls:
478 return ret;
479 }
480
481 /**
482 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
483 */
484 void iwl_txq_ctx_stop(struct iwl_priv *priv)
485 {
486 int ch;
487 unsigned long flags;
488
489 /* Turn off all Tx DMA fifos */
490 spin_lock_irqsave(&priv->lock, flags);
491
492 priv->cfg->ops->lib->txq_set_sched(priv, 0);
493
494 /* Stop each Tx DMA channel, and wait for it to be idle */
495 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
496 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
497 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
498 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
499 1000);
500 }
501 spin_unlock_irqrestore(&priv->lock, flags);
502
503 /* Deallocate memory for all Tx queues */
504 iwl_hw_txq_ctx_free(priv);
505 }
506 EXPORT_SYMBOL(iwl_txq_ctx_stop);
507
508 /*
509 * handle build REPLY_TX command notification.
510 */
511 static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
512 struct iwl_tx_cmd *tx_cmd,
513 struct ieee80211_tx_info *info,
514 struct ieee80211_hdr *hdr,
515 u8 std_id)
516 {
517 __le16 fc = hdr->frame_control;
518 __le32 tx_flags = tx_cmd->tx_flags;
519
520 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
521 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
522 tx_flags |= TX_CMD_FLG_ACK_MSK;
523 if (ieee80211_is_mgmt(fc))
524 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
525 if (ieee80211_is_probe_resp(fc) &&
526 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
527 tx_flags |= TX_CMD_FLG_TSF_MSK;
528 } else {
529 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
530 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
531 }
532
533 if (ieee80211_is_back_req(fc))
534 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
535
536
537 tx_cmd->sta_id = std_id;
538 if (ieee80211_has_morefrags(fc))
539 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
540
541 if (ieee80211_is_data_qos(fc)) {
542 u8 *qc = ieee80211_get_qos_ctl(hdr);
543 tx_cmd->tid_tspec = qc[0] & 0xf;
544 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
545 } else {
546 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
547 }
548
549 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
550
551 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
552 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
553
554 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
555 if (ieee80211_is_mgmt(fc)) {
556 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
557 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
558 else
559 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
560 } else {
561 tx_cmd->timeout.pm_frame_timeout = 0;
562 }
563
564 tx_cmd->driver_txop = 0;
565 tx_cmd->tx_flags = tx_flags;
566 tx_cmd->next_frame_len = 0;
567 }
568
569 #define RTS_HCCA_RETRY_LIMIT 3
570 #define RTS_DFAULT_RETRY_LIMIT 60
571
572 static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
573 struct iwl_tx_cmd *tx_cmd,
574 struct ieee80211_tx_info *info,
575 __le16 fc, int is_hcca)
576 {
577 u32 rate_flags;
578 int rate_idx;
579 u8 rts_retry_limit;
580 u8 data_retry_limit;
581 u8 rate_plcp;
582
583 /* Set retry limit on DATA packets and Probe Responses*/
584 if (priv->data_retry_limit != -1)
585 data_retry_limit = priv->data_retry_limit;
586 else if (ieee80211_is_probe_resp(fc))
587 data_retry_limit = 3;
588 else
589 data_retry_limit = IWL_DEFAULT_TX_RETRY;
590 tx_cmd->data_retry_limit = data_retry_limit;
591
592 /* Set retry limit on RTS packets */
593 rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT :
594 RTS_DFAULT_RETRY_LIMIT;
595 if (data_retry_limit < rts_retry_limit)
596 rts_retry_limit = data_retry_limit;
597 tx_cmd->rts_retry_limit = rts_retry_limit;
598
599 /* DATA packets will use the uCode station table for rate/antenna
600 * selection */
601 if (ieee80211_is_data(fc)) {
602 tx_cmd->initial_rate_index = 0;
603 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
604 return;
605 }
606
607 /**
608 * If the current TX rate stored in mac80211 has the MCS bit set, it's
609 * not really a TX rate. Thus, we use the lowest supported rate for
610 * this band. Also use the lowest supported rate if the stored rate
611 * index is invalid.
612 */
613 rate_idx = info->control.rates[0].idx;
614 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
615 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
616 rate_idx = rate_lowest_index(&priv->bands[info->band],
617 info->control.sta);
618 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
619 if (info->band == IEEE80211_BAND_5GHZ)
620 rate_idx += IWL_FIRST_OFDM_RATE;
621 /* Get PLCP rate for tx_cmd->rate_n_flags */
622 rate_plcp = iwl_rates[rate_idx].plcp;
623 /* Zero out flags for this packet */
624 rate_flags = 0;
625
626 /* Set CCK flag as needed */
627 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
628 rate_flags |= RATE_MCS_CCK_MSK;
629
630 /* Set up RTS and CTS flags for certain packets */
631 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
632 case cpu_to_le16(IEEE80211_STYPE_AUTH):
633 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
634 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
635 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
636 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
637 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
638 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
639 }
640 break;
641 default:
642 break;
643 }
644
645 /* Set up antennas */
646 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
647 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
648
649 /* Set the rate in the TX cmd */
650 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
651 }
652
653 static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
654 struct ieee80211_tx_info *info,
655 struct iwl_tx_cmd *tx_cmd,
656 struct sk_buff *skb_frag,
657 int sta_id)
658 {
659 struct ieee80211_key_conf *keyconf = info->control.hw_key;
660
661 switch (keyconf->alg) {
662 case ALG_CCMP:
663 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
664 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
665 if (info->flags & IEEE80211_TX_CTL_AMPDU)
666 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
667 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
668 break;
669
670 case ALG_TKIP:
671 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
672 ieee80211_get_tkip_key(keyconf, skb_frag,
673 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
674 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
675 break;
676
677 case ALG_WEP:
678 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
679 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
680
681 if (keyconf->keylen == WEP_KEY_LEN_128)
682 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
683
684 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
685
686 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
687 "with key %d\n", keyconf->keyidx);
688 break;
689
690 default:
691 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
692 break;
693 }
694 }
695
696 /*
697 * start REPLY_TX command process
698 */
699 int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
700 {
701 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
702 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
703 struct iwl_tx_queue *txq;
704 struct iwl_queue *q;
705 struct iwl_device_cmd *out_cmd;
706 struct iwl_cmd_meta *out_meta;
707 struct iwl_tx_cmd *tx_cmd;
708 int swq_id, txq_id;
709 dma_addr_t phys_addr;
710 dma_addr_t txcmd_phys;
711 dma_addr_t scratch_phys;
712 u16 len, len_org;
713 u16 seq_number = 0;
714 __le16 fc;
715 u8 hdr_len;
716 u8 sta_id;
717 u8 wait_write_ptr = 0;
718 u8 tid = 0;
719 u8 *qc = NULL;
720 unsigned long flags;
721 int ret;
722
723 spin_lock_irqsave(&priv->lock, flags);
724 if (iwl_is_rfkill(priv)) {
725 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
726 goto drop_unlock;
727 }
728
729 fc = hdr->frame_control;
730
731 #ifdef CONFIG_IWLWIFI_DEBUG
732 if (ieee80211_is_auth(fc))
733 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
734 else if (ieee80211_is_assoc_req(fc))
735 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
736 else if (ieee80211_is_reassoc_req(fc))
737 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
738 #endif
739
740 /* drop all non-injected data frame if we are not associated */
741 if (ieee80211_is_data(fc) &&
742 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
743 (!iwl_is_associated(priv) ||
744 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
745 !priv->assoc_station_added)) {
746 IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
747 goto drop_unlock;
748 }
749
750 hdr_len = ieee80211_hdrlen(fc);
751
752 /* Find (or create) index into station table for destination station */
753 if (info->flags & IEEE80211_TX_CTL_INJECTED)
754 sta_id = priv->hw_params.bcast_sta_id;
755 else
756 sta_id = iwl_get_sta_id(priv, hdr);
757 if (sta_id == IWL_INVALID_STATION) {
758 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
759 hdr->addr1);
760 goto drop_unlock;
761 }
762
763 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
764
765 txq_id = skb_get_queue_mapping(skb);
766 if (ieee80211_is_data_qos(fc)) {
767 qc = ieee80211_get_qos_ctl(hdr);
768 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
769 if (unlikely(tid >= MAX_TID_COUNT))
770 goto drop_unlock;
771 seq_number = priv->stations[sta_id].tid[tid].seq_number;
772 seq_number &= IEEE80211_SCTL_SEQ;
773 hdr->seq_ctrl = hdr->seq_ctrl &
774 cpu_to_le16(IEEE80211_SCTL_FRAG);
775 hdr->seq_ctrl |= cpu_to_le16(seq_number);
776 seq_number += 0x10;
777 /* aggregation is on for this <sta,tid> */
778 if (info->flags & IEEE80211_TX_CTL_AMPDU)
779 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
780 }
781
782 txq = &priv->txq[txq_id];
783 swq_id = txq->swq_id;
784 q = &txq->q;
785
786 if (unlikely(iwl_queue_space(q) < q->high_mark))
787 goto drop_unlock;
788
789 if (ieee80211_is_data_qos(fc))
790 priv->stations[sta_id].tid[tid].tfds_in_queue++;
791
792 /* Set up driver data for this TFD */
793 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
794 txq->txb[q->write_ptr].skb[0] = skb;
795
796 /* Set up first empty entry in queue's array of Tx/cmd buffers */
797 out_cmd = txq->cmd[q->write_ptr];
798 out_meta = &txq->meta[q->write_ptr];
799 tx_cmd = &out_cmd->cmd.tx;
800 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
801 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
802
803 /*
804 * Set up the Tx-command (not MAC!) header.
805 * Store the chosen Tx queue and TFD index within the sequence field;
806 * after Tx, uCode's Tx response will return this value so driver can
807 * locate the frame within the tx queue and do post-tx processing.
808 */
809 out_cmd->hdr.cmd = REPLY_TX;
810 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
811 INDEX_TO_SEQ(q->write_ptr)));
812
813 /* Copy MAC header from skb into command buffer */
814 memcpy(tx_cmd->hdr, hdr, hdr_len);
815
816
817 /* Total # bytes to be transmitted */
818 len = (u16)skb->len;
819 tx_cmd->len = cpu_to_le16(len);
820
821 if (info->control.hw_key)
822 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
823
824 /* TODO need this for burst mode later on */
825 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
826 iwl_dbg_log_tx_data_frame(priv, len, hdr);
827
828 /* set is_hcca to 0; it probably will never be implemented */
829 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0);
830
831 iwl_update_stats(priv, true, fc, len);
832 /*
833 * Use the first empty entry in this queue's command buffer array
834 * to contain the Tx command and MAC header concatenated together
835 * (payload data will be in another buffer).
836 * Size of this varies, due to varying MAC header length.
837 * If end is not dword aligned, we'll have 2 extra bytes at the end
838 * of the MAC header (device reads on dword boundaries).
839 * We'll tell device about this padding later.
840 */
841 len = sizeof(struct iwl_tx_cmd) +
842 sizeof(struct iwl_cmd_header) + hdr_len;
843
844 len_org = len;
845 len = (len + 3) & ~3;
846
847 if (len_org != len)
848 len_org = 1;
849 else
850 len_org = 0;
851
852 /* Tell NIC about any 2-byte padding after MAC header */
853 if (len_org)
854 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
855
856 /* Physical address of this Tx command's header (not MAC header!),
857 * within command buffer array. */
858 txcmd_phys = pci_map_single(priv->pci_dev,
859 &out_cmd->hdr, len,
860 PCI_DMA_BIDIRECTIONAL);
861 pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
862 pci_unmap_len_set(out_meta, len, len);
863 /* Add buffer containing Tx command and MAC(!) header to TFD's
864 * first entry */
865 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
866 txcmd_phys, len, 1, 0);
867
868 if (!ieee80211_has_morefrags(hdr->frame_control)) {
869 txq->need_update = 1;
870 if (qc)
871 priv->stations[sta_id].tid[tid].seq_number = seq_number;
872 } else {
873 wait_write_ptr = 1;
874 txq->need_update = 0;
875 }
876
877 /* Set up TFD's 2nd entry to point directly to remainder of skb,
878 * if any (802.11 null frames have no payload). */
879 len = skb->len - hdr_len;
880 if (len) {
881 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
882 len, PCI_DMA_TODEVICE);
883 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
884 phys_addr, len,
885 0, 0);
886 }
887
888 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
889 offsetof(struct iwl_tx_cmd, scratch);
890
891 len = sizeof(struct iwl_tx_cmd) +
892 sizeof(struct iwl_cmd_header) + hdr_len;
893 /* take back ownership of DMA buffer to enable update */
894 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
895 len, PCI_DMA_BIDIRECTIONAL);
896 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
897 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
898
899 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
900 le16_to_cpu(out_cmd->hdr.sequence));
901 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
902 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
903 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
904
905 /* Set up entry for this TFD in Tx byte-count array */
906 if (info->flags & IEEE80211_TX_CTL_AMPDU)
907 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
908 le16_to_cpu(tx_cmd->len));
909
910 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
911 len, PCI_DMA_BIDIRECTIONAL);
912
913 /* Tell device the write index *just past* this latest filled TFD */
914 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
915 ret = iwl_txq_update_write_ptr(priv, txq);
916 spin_unlock_irqrestore(&priv->lock, flags);
917
918 if (ret)
919 return ret;
920
921 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
922 if (wait_write_ptr) {
923 spin_lock_irqsave(&priv->lock, flags);
924 txq->need_update = 1;
925 iwl_txq_update_write_ptr(priv, txq);
926 spin_unlock_irqrestore(&priv->lock, flags);
927 } else {
928 iwl_stop_queue(priv, txq->swq_id);
929 }
930 }
931
932 return 0;
933
934 drop_unlock:
935 spin_unlock_irqrestore(&priv->lock, flags);
936 return -1;
937 }
938 EXPORT_SYMBOL(iwl_tx_skb);
939
940 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
941
942 /**
943 * iwl_enqueue_hcmd - enqueue a uCode command
944 * @priv: device private data point
945 * @cmd: a point to the ucode command structure
946 *
947 * The function returns < 0 values to indicate the operation is
948 * failed. On success, it turns the index (> 0) of command in the
949 * command queue.
950 */
951 int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
952 {
953 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
954 struct iwl_queue *q = &txq->q;
955 struct iwl_device_cmd *out_cmd;
956 struct iwl_cmd_meta *out_meta;
957 dma_addr_t phys_addr;
958 unsigned long flags;
959 int len, ret;
960 u32 idx;
961 u16 fix_size;
962
963 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
964 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
965
966 /* If any of the command structures end up being larger than
967 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
968 * we will need to increase the size of the TFD entries */
969 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
970 !(cmd->flags & CMD_SIZE_HUGE));
971
972 if (iwl_is_rfkill(priv)) {
973 IWL_DEBUG_INFO(priv, "Not sending command - RF KILL\n");
974 return -EIO;
975 }
976
977 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
978 IWL_ERR(priv, "No space for Tx\n");
979 return -ENOSPC;
980 }
981
982 spin_lock_irqsave(&priv->hcmd_lock, flags);
983
984 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
985 out_cmd = txq->cmd[idx];
986 out_meta = &txq->meta[idx];
987
988 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
989 out_meta->flags = cmd->flags;
990 if (cmd->flags & CMD_WANT_SKB)
991 out_meta->source = cmd;
992 if (cmd->flags & CMD_ASYNC)
993 out_meta->callback = cmd->callback;
994
995 out_cmd->hdr.cmd = cmd->id;
996 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
997
998 /* At this point, the out_cmd now has all of the incoming cmd
999 * information */
1000
1001 out_cmd->hdr.flags = 0;
1002 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
1003 INDEX_TO_SEQ(q->write_ptr));
1004 if (cmd->flags & CMD_SIZE_HUGE)
1005 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
1006 len = sizeof(struct iwl_device_cmd);
1007 len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0;
1008
1009
1010 #ifdef CONFIG_IWLWIFI_DEBUG
1011 switch (out_cmd->hdr.cmd) {
1012 case REPLY_TX_LINK_QUALITY_CMD:
1013 case SENSITIVITY_CMD:
1014 IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, "
1015 "%d bytes at %d[%d]:%d\n",
1016 get_cmd_string(out_cmd->hdr.cmd),
1017 out_cmd->hdr.cmd,
1018 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1019 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1020 break;
1021 default:
1022 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
1023 "%d bytes at %d[%d]:%d\n",
1024 get_cmd_string(out_cmd->hdr.cmd),
1025 out_cmd->hdr.cmd,
1026 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1027 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1028 }
1029 #endif
1030 txq->need_update = 1;
1031
1032 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
1033 /* Set up entry in queue's byte count circular buffer */
1034 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
1035
1036 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
1037 fix_size, PCI_DMA_BIDIRECTIONAL);
1038 pci_unmap_addr_set(out_meta, mapping, phys_addr);
1039 pci_unmap_len_set(out_meta, len, fix_size);
1040
1041 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1042 phys_addr, fix_size, 1,
1043 U32_PAD(cmd->len));
1044
1045 /* Increment and update queue's write index */
1046 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1047 ret = iwl_txq_update_write_ptr(priv, txq);
1048
1049 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
1050 return ret ? ret : idx;
1051 }
1052
1053 int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1054 {
1055 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1056 struct iwl_queue *q = &txq->q;
1057 struct iwl_tx_info *tx_info;
1058 int nfreed = 0;
1059
1060 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1061 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1062 "is out of range [0-%d] %d %d.\n", txq_id,
1063 index, q->n_bd, q->write_ptr, q->read_ptr);
1064 return 0;
1065 }
1066
1067 for (index = iwl_queue_inc_wrap(index, q->n_bd);
1068 q->read_ptr != index;
1069 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1070
1071 tx_info = &txq->txb[txq->q.read_ptr];
1072 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
1073 tx_info->skb[0] = NULL;
1074
1075 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1076 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1077
1078 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1079 nfreed++;
1080 }
1081 return nfreed;
1082 }
1083 EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1084
1085
1086 /**
1087 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
1088 *
1089 * When FW advances 'R' index, all entries between old and new 'R' index
1090 * need to be reclaimed. As result, some free space forms. If there is
1091 * enough free space (> low mark), wake the stack that feeds us.
1092 */
1093 static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1094 int idx, int cmd_idx)
1095 {
1096 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1097 struct iwl_queue *q = &txq->q;
1098 int nfreed = 0;
1099
1100 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
1101 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1102 "is out of range [0-%d] %d %d.\n", txq_id,
1103 idx, q->n_bd, q->write_ptr, q->read_ptr);
1104 return;
1105 }
1106
1107 pci_unmap_single(priv->pci_dev,
1108 pci_unmap_addr(&txq->meta[cmd_idx], mapping),
1109 pci_unmap_len(&txq->meta[cmd_idx], len),
1110 PCI_DMA_BIDIRECTIONAL);
1111
1112 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
1113 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1114
1115 if (nfreed++ > 0) {
1116 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
1117 q->write_ptr, q->read_ptr);
1118 queue_work(priv->workqueue, &priv->restart);
1119 }
1120
1121 }
1122 }
1123
1124 /**
1125 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
1126 * @rxb: Rx buffer to reclaim
1127 *
1128 * If an Rx buffer has an async callback associated with it the callback
1129 * will be executed. The attached skb (if present) will only be freed
1130 * if the callback returns 1
1131 */
1132 void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1133 {
1134 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1135 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1136 int txq_id = SEQ_TO_QUEUE(sequence);
1137 int index = SEQ_TO_INDEX(sequence);
1138 int cmd_index;
1139 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
1140 struct iwl_device_cmd *cmd;
1141 struct iwl_cmd_meta *meta;
1142
1143 /* If a Tx command is being handled and it isn't in the actual
1144 * command queue then there a command routing bug has been introduced
1145 * in the queue management code. */
1146 if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
1147 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
1148 txq_id, sequence,
1149 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
1150 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
1151 iwl_print_hex_error(priv, pkt, 32);
1152 return;
1153 }
1154
1155 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
1156 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
1157 meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
1158
1159 /* Input error checking is done when commands are added to queue. */
1160 if (meta->flags & CMD_WANT_SKB) {
1161 meta->source->reply_skb = rxb->skb;
1162 rxb->skb = NULL;
1163 } else if (meta->callback)
1164 meta->callback(priv, cmd, rxb->skb);
1165
1166 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
1167
1168 if (!(meta->flags & CMD_ASYNC)) {
1169 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1170 wake_up_interruptible(&priv->wait_command_queue);
1171 }
1172 }
1173 EXPORT_SYMBOL(iwl_tx_cmd_complete);
1174
1175 /*
1176 * Find first available (lowest unused) Tx Queue, mark it "active".
1177 * Called only when finding queue for aggregation.
1178 * Should never return anything < 7, because they should already
1179 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
1180 */
1181 static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
1182 {
1183 int txq_id;
1184
1185 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1186 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
1187 return txq_id;
1188 return -1;
1189 }
1190
1191 int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1192 {
1193 int sta_id;
1194 int tx_fifo;
1195 int txq_id;
1196 int ret;
1197 unsigned long flags;
1198 struct iwl_tid_data *tid_data;
1199
1200 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1201 tx_fifo = default_tid_to_tx_fifo[tid];
1202 else
1203 return -EINVAL;
1204
1205 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
1206 __func__, ra, tid);
1207
1208 sta_id = iwl_find_station(priv, ra);
1209 if (sta_id == IWL_INVALID_STATION) {
1210 IWL_ERR(priv, "Start AGG on invalid station\n");
1211 return -ENXIO;
1212 }
1213 if (unlikely(tid >= MAX_TID_COUNT))
1214 return -EINVAL;
1215
1216 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1217 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
1218 return -ENXIO;
1219 }
1220
1221 txq_id = iwl_txq_ctx_activate_free(priv);
1222 if (txq_id == -1) {
1223 IWL_ERR(priv, "No free aggregation queue available\n");
1224 return -ENXIO;
1225 }
1226
1227 spin_lock_irqsave(&priv->sta_lock, flags);
1228 tid_data = &priv->stations[sta_id].tid[tid];
1229 *ssn = SEQ_TO_SN(tid_data->seq_number);
1230 tid_data->agg.txq_id = txq_id;
1231 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
1232 spin_unlock_irqrestore(&priv->sta_lock, flags);
1233
1234 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1235 sta_id, tid, *ssn);
1236 if (ret)
1237 return ret;
1238
1239 if (tid_data->tfds_in_queue == 0) {
1240 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1241 tid_data->agg.state = IWL_AGG_ON;
1242 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1243 } else {
1244 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
1245 tid_data->tfds_in_queue);
1246 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1247 }
1248 return ret;
1249 }
1250 EXPORT_SYMBOL(iwl_tx_agg_start);
1251
1252 int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1253 {
1254 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1255 struct iwl_tid_data *tid_data;
1256 int ret, write_ptr, read_ptr;
1257 unsigned long flags;
1258
1259 if (!ra) {
1260 IWL_ERR(priv, "ra = NULL\n");
1261 return -EINVAL;
1262 }
1263
1264 if (unlikely(tid >= MAX_TID_COUNT))
1265 return -EINVAL;
1266
1267 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1268 tx_fifo_id = default_tid_to_tx_fifo[tid];
1269 else
1270 return -EINVAL;
1271
1272 sta_id = iwl_find_station(priv, ra);
1273
1274 if (sta_id == IWL_INVALID_STATION) {
1275 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1276 return -ENXIO;
1277 }
1278
1279 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1280 IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n");
1281
1282 tid_data = &priv->stations[sta_id].tid[tid];
1283 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1284 txq_id = tid_data->agg.txq_id;
1285 write_ptr = priv->txq[txq_id].q.write_ptr;
1286 read_ptr = priv->txq[txq_id].q.read_ptr;
1287
1288 /* The queue is not empty */
1289 if (write_ptr != read_ptr) {
1290 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1291 priv->stations[sta_id].tid[tid].agg.state =
1292 IWL_EMPTYING_HW_QUEUE_DELBA;
1293 return 0;
1294 }
1295
1296 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1297 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1298
1299 spin_lock_irqsave(&priv->lock, flags);
1300 ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1301 tx_fifo_id);
1302 spin_unlock_irqrestore(&priv->lock, flags);
1303
1304 if (ret)
1305 return ret;
1306
1307 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1308
1309 return 0;
1310 }
1311 EXPORT_SYMBOL(iwl_tx_agg_stop);
1312
1313 int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1314 {
1315 struct iwl_queue *q = &priv->txq[txq_id].q;
1316 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1317 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1318
1319 switch (priv->stations[sta_id].tid[tid].agg.state) {
1320 case IWL_EMPTYING_HW_QUEUE_DELBA:
1321 /* We are reclaiming the last packet of the */
1322 /* aggregated HW queue */
1323 if ((txq_id == tid_data->agg.txq_id) &&
1324 (q->read_ptr == q->write_ptr)) {
1325 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1326 int tx_fifo = default_tid_to_tx_fifo[tid];
1327 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
1328 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1329 ssn, tx_fifo);
1330 tid_data->agg.state = IWL_AGG_OFF;
1331 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1332 }
1333 break;
1334 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1335 /* We are reclaiming the last packet of the queue */
1336 if (tid_data->tfds_in_queue == 0) {
1337 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
1338 tid_data->agg.state = IWL_AGG_ON;
1339 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
1340 }
1341 break;
1342 }
1343 return 0;
1344 }
1345 EXPORT_SYMBOL(iwl_txq_check_empty);
1346
1347 /**
1348 * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
1349 *
1350 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1351 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1352 */
1353 static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1354 struct iwl_ht_agg *agg,
1355 struct iwl_compressed_ba_resp *ba_resp)
1356
1357 {
1358 int i, sh, ack;
1359 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1360 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1361 u64 bitmap;
1362 int successes = 0;
1363 struct ieee80211_tx_info *info;
1364
1365 if (unlikely(!agg->wait_for_ba)) {
1366 IWL_ERR(priv, "Received BA when not expected\n");
1367 return -EINVAL;
1368 }
1369
1370 /* Mark that the expected block-ack response arrived */
1371 agg->wait_for_ba = 0;
1372 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
1373
1374 /* Calculate shift to align block-ack bits with our Tx window bits */
1375 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1376 if (sh < 0) /* tbw something is wrong with indices */
1377 sh += 0x100;
1378
1379 /* don't use 64-bit values for now */
1380 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1381
1382 if (agg->frame_count > (64 - sh)) {
1383 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1384 return -1;
1385 }
1386
1387 /* check for success or failure according to the
1388 * transmitted bitmap and block-ack bitmap */
1389 bitmap &= agg->bitmap;
1390
1391 /* For each frame attempted in aggregation,
1392 * update driver's record of tx frame's status. */
1393 for (i = 0; i < agg->frame_count ; i++) {
1394 ack = bitmap & (1ULL << i);
1395 successes += !!ack;
1396 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1397 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
1398 agg->start_idx + i);
1399 }
1400
1401 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1402 memset(&info->status, 0, sizeof(info->status));
1403 info->flags = IEEE80211_TX_STAT_ACK;
1404 info->flags |= IEEE80211_TX_STAT_AMPDU;
1405 info->status.ampdu_ack_map = successes;
1406 info->status.ampdu_ack_len = agg->frame_count;
1407 iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1408
1409 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
1410
1411 return 0;
1412 }
1413
1414 /**
1415 * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1416 *
1417 * Handles block-acknowledge notification from device, which reports success
1418 * of frames sent via aggregation.
1419 */
1420 void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1421 struct iwl_rx_mem_buffer *rxb)
1422 {
1423 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1424 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1425 struct iwl_tx_queue *txq = NULL;
1426 struct iwl_ht_agg *agg;
1427 int index;
1428 int sta_id;
1429 int tid;
1430
1431 /* "flow" corresponds to Tx queue */
1432 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1433
1434 /* "ssn" is start of block-ack Tx window, corresponds to index
1435 * (in Tx queue's circular buffer) of first TFD/frame in window */
1436 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1437
1438 if (scd_flow >= priv->hw_params.max_txq_num) {
1439 IWL_ERR(priv,
1440 "BUG_ON scd_flow is bigger than number of queues\n");
1441 return;
1442 }
1443
1444 txq = &priv->txq[scd_flow];
1445 sta_id = ba_resp->sta_id;
1446 tid = ba_resp->tid;
1447 agg = &priv->stations[sta_id].tid[tid].agg;
1448
1449 /* Find index just before block-ack window */
1450 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1451
1452 /* TODO: Need to get this copy more safely - now good for debug */
1453
1454 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1455 "sta_id = %d\n",
1456 agg->wait_for_ba,
1457 (u8 *) &ba_resp->sta_addr_lo32,
1458 ba_resp->sta_id);
1459 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1460 "%d, scd_ssn = %d\n",
1461 ba_resp->tid,
1462 ba_resp->seq_ctl,
1463 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1464 ba_resp->scd_flow,
1465 ba_resp->scd_ssn);
1466 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
1467 agg->start_idx,
1468 (unsigned long long)agg->bitmap);
1469
1470 /* Update driver's record of ACK vs. not for each frame in window */
1471 iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1472
1473 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1474 * block-ack window (we assume that they've been successfully
1475 * transmitted ... if not, it's too late anyway). */
1476 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1477 /* calculate mac80211 ampdu sw queue to wake */
1478 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
1479 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1480
1481 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1482 priv->mac80211_registered &&
1483 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1484 iwl_wake_queue(priv, txq->swq_id);
1485
1486 iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
1487 }
1488 }
1489 EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
1490
1491 #ifdef CONFIG_IWLWIFI_DEBUG
1492 #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1493
1494 const char *iwl_get_tx_fail_reason(u32 status)
1495 {
1496 switch (status & TX_STATUS_MSK) {
1497 case TX_STATUS_SUCCESS:
1498 return "SUCCESS";
1499 TX_STATUS_ENTRY(SHORT_LIMIT);
1500 TX_STATUS_ENTRY(LONG_LIMIT);
1501 TX_STATUS_ENTRY(FIFO_UNDERRUN);
1502 TX_STATUS_ENTRY(MGMNT_ABORT);
1503 TX_STATUS_ENTRY(NEXT_FRAG);
1504 TX_STATUS_ENTRY(LIFE_EXPIRE);
1505 TX_STATUS_ENTRY(DEST_PS);
1506 TX_STATUS_ENTRY(ABORTED);
1507 TX_STATUS_ENTRY(BT_RETRY);
1508 TX_STATUS_ENTRY(STA_INVALID);
1509 TX_STATUS_ENTRY(FRAG_DROPPED);
1510 TX_STATUS_ENTRY(TID_DISABLE);
1511 TX_STATUS_ENTRY(FRAME_FLUSHED);
1512 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
1513 TX_STATUS_ENTRY(TX_LOCKED);
1514 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
1515 }
1516
1517 return "UNKNOWN";
1518 }
1519 EXPORT_SYMBOL(iwl_get_tx_fail_reason);
1520 #endif /* CONFIG_IWLWIFI_DEBUG */