iwlagn: introduce iwl-shared.h
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / wireless / iwlwifi / iwl-trans.c
1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63 #include "iwl-dev.h"
64 #include "iwl-trans.h"
65 #include "iwl-core.h"
66 #include "iwl-helpers.h"
67 #include "iwl-trans-int-pcie.h"
68 /*TODO remove uneeded includes when the transport layer tx_free will be here */
69 #include "iwl-agn.h"
70 #include "iwl-core.h"
71 #include "iwl-shared.h"
72
73 static int iwl_trans_rx_alloc(struct iwl_priv *priv)
74 {
75 struct iwl_rx_queue *rxq = &priv->rxq;
76 struct device *dev = priv->bus->dev;
77
78 memset(&priv->rxq, 0, sizeof(priv->rxq));
79
80 spin_lock_init(&rxq->lock);
81 INIT_LIST_HEAD(&rxq->rx_free);
82 INIT_LIST_HEAD(&rxq->rx_used);
83
84 if (WARN_ON(rxq->bd || rxq->rb_stts))
85 return -EINVAL;
86
87 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
88 rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
89 &rxq->bd_dma, GFP_KERNEL);
90 if (!rxq->bd)
91 goto err_bd;
92 memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
93
94 /*Allocate the driver's pointer to receive buffer status */
95 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
96 &rxq->rb_stts_dma, GFP_KERNEL);
97 if (!rxq->rb_stts)
98 goto err_rb_stts;
99 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
100
101 return 0;
102
103 err_rb_stts:
104 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
105 rxq->bd, rxq->bd_dma);
106 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
107 rxq->bd = NULL;
108 err_bd:
109 return -ENOMEM;
110 }
111
112 static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv)
113 {
114 struct iwl_rx_queue *rxq = &priv->rxq;
115 int i;
116
117 /* Fill the rx_used queue with _all_ of the Rx buffers */
118 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
119 /* In the reset function, these buffers may have been allocated
120 * to an SKB, so we need to unmap and free potential storage */
121 if (rxq->pool[i].page != NULL) {
122 dma_unmap_page(priv->bus->dev, rxq->pool[i].page_dma,
123 PAGE_SIZE << priv->hw_params.rx_page_order,
124 DMA_FROM_DEVICE);
125 __iwl_free_pages(priv, rxq->pool[i].page);
126 rxq->pool[i].page = NULL;
127 }
128 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
129 }
130 }
131
132 static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
133 struct iwl_rx_queue *rxq)
134 {
135 u32 rb_size;
136 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
137 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
138
139 rb_timeout = RX_RB_TIMEOUT;
140
141 if (iwlagn_mod_params.amsdu_size_8K)
142 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
143 else
144 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
145
146 /* Stop Rx DMA */
147 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
148
149 /* Reset driver's Rx queue write index */
150 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
151
152 /* Tell device where to find RBD circular buffer in DRAM */
153 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
154 (u32)(rxq->bd_dma >> 8));
155
156 /* Tell device where in DRAM to update its Rx status */
157 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
158 rxq->rb_stts_dma >> 4);
159
160 /* Enable Rx DMA
161 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
162 * the credit mechanism in 5000 HW RX FIFO
163 * Direct rx interrupts to hosts
164 * Rx buffer size 4 or 8k
165 * RB timeout 0x10
166 * 256 RBDs
167 */
168 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
169 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
170 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
171 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
172 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
173 rb_size|
174 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
175 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
176
177 /* Set interrupt coalescing timer to default (2048 usecs) */
178 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
179 }
180
181 static int iwl_rx_init(struct iwl_priv *priv)
182 {
183 struct iwl_rx_queue *rxq = &priv->rxq;
184 int i, err;
185 unsigned long flags;
186
187 if (!rxq->bd) {
188 err = iwl_trans_rx_alloc(priv);
189 if (err)
190 return err;
191 }
192
193 spin_lock_irqsave(&rxq->lock, flags);
194 INIT_LIST_HEAD(&rxq->rx_free);
195 INIT_LIST_HEAD(&rxq->rx_used);
196
197 iwl_trans_rxq_free_rx_bufs(priv);
198
199 for (i = 0; i < RX_QUEUE_SIZE; i++)
200 rxq->queue[i] = NULL;
201
202 /* Set us so that we have processed and used all buffers, but have
203 * not restocked the Rx queue with fresh buffers */
204 rxq->read = rxq->write = 0;
205 rxq->write_actual = 0;
206 rxq->free_count = 0;
207 spin_unlock_irqrestore(&rxq->lock, flags);
208
209 iwlagn_rx_replenish(priv);
210
211 iwl_trans_rx_hw_init(priv, rxq);
212
213 spin_lock_irqsave(&priv->lock, flags);
214 rxq->need_update = 1;
215 iwl_rx_queue_update_write_ptr(priv, rxq);
216 spin_unlock_irqrestore(&priv->lock, flags);
217
218 return 0;
219 }
220
221 static void iwl_trans_rx_free(struct iwl_priv *priv)
222 {
223 struct iwl_rx_queue *rxq = &priv->rxq;
224 unsigned long flags;
225
226 /*if rxq->bd is NULL, it means that nothing has been allocated,
227 * exit now */
228 if (!rxq->bd) {
229 IWL_DEBUG_INFO(priv, "Free NULL rx context\n");
230 return;
231 }
232
233 spin_lock_irqsave(&rxq->lock, flags);
234 iwl_trans_rxq_free_rx_bufs(priv);
235 spin_unlock_irqrestore(&rxq->lock, flags);
236
237 dma_free_coherent(priv->bus->dev, sizeof(__le32) * RX_QUEUE_SIZE,
238 rxq->bd, rxq->bd_dma);
239 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
240 rxq->bd = NULL;
241
242 if (rxq->rb_stts)
243 dma_free_coherent(priv->bus->dev,
244 sizeof(struct iwl_rb_status),
245 rxq->rb_stts, rxq->rb_stts_dma);
246 else
247 IWL_DEBUG_INFO(priv, "Free rxq->rb_stts which is NULL\n");
248 memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
249 rxq->rb_stts = NULL;
250 }
251
252 static int iwl_trans_rx_stop(struct iwl_priv *priv)
253 {
254
255 /* stop Rx DMA */
256 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
257 return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
258 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
259 }
260
261 static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
262 struct iwl_dma_ptr *ptr, size_t size)
263 {
264 if (WARN_ON(ptr->addr))
265 return -EINVAL;
266
267 ptr->addr = dma_alloc_coherent(priv->bus->dev, size,
268 &ptr->dma, GFP_KERNEL);
269 if (!ptr->addr)
270 return -ENOMEM;
271 ptr->size = size;
272 return 0;
273 }
274
275 static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
276 struct iwl_dma_ptr *ptr)
277 {
278 if (unlikely(!ptr->addr))
279 return;
280
281 dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma);
282 memset(ptr, 0, sizeof(*ptr));
283 }
284
285 static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
286 int slots_num, u32 txq_id)
287 {
288 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
289 int i;
290
291 if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
292 return -EINVAL;
293
294 txq->q.n_window = slots_num;
295
296 txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
297 GFP_KERNEL);
298 txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
299 GFP_KERNEL);
300
301 if (!txq->meta || !txq->cmd)
302 goto error;
303
304 for (i = 0; i < slots_num; i++) {
305 txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
306 GFP_KERNEL);
307 if (!txq->cmd[i])
308 goto error;
309 }
310
311 /* Alloc driver data array and TFD circular buffer */
312 /* Driver private data, only for Tx (not command) queues,
313 * not shared with device. */
314 if (txq_id != priv->cmd_queue) {
315 txq->txb = kzalloc(sizeof(txq->txb[0]) *
316 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
317 if (!txq->txb) {
318 IWL_ERR(priv, "kmalloc for auxiliary BD "
319 "structures failed\n");
320 goto error;
321 }
322 } else {
323 txq->txb = NULL;
324 }
325
326 /* Circular buffer of transmit frame descriptors (TFDs),
327 * shared with device */
328 txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr,
329 GFP_KERNEL);
330 if (!txq->tfds) {
331 IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
332 goto error;
333 }
334 txq->q.id = txq_id;
335
336 return 0;
337 error:
338 kfree(txq->txb);
339 txq->txb = NULL;
340 /* since txq->cmd has been zeroed,
341 * all non allocated cmd[i] will be NULL */
342 if (txq->cmd)
343 for (i = 0; i < slots_num; i++)
344 kfree(txq->cmd[i]);
345 kfree(txq->meta);
346 kfree(txq->cmd);
347 txq->meta = NULL;
348 txq->cmd = NULL;
349
350 return -ENOMEM;
351
352 }
353
354 static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
355 int slots_num, u32 txq_id)
356 {
357 int ret;
358
359 txq->need_update = 0;
360 memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
361
362 /*
363 * For the default queues 0-3, set up the swq_id
364 * already -- all others need to get one later
365 * (if they need one at all).
366 */
367 if (txq_id < 4)
368 iwl_set_swq_id(txq, txq_id, txq_id);
369
370 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
371 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
372 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
373
374 /* Initialize queue's high/low-water marks, and head/tail indexes */
375 ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
376 txq_id);
377 if (ret)
378 return ret;
379
380 /*
381 * Tell nic where to find circular buffer of Tx Frame Descriptors for
382 * given Tx queue, and enable the DMA channel used for that queue.
383 * Circular buffer (TFD queue in DRAM) physical base address */
384 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
385 txq->q.dma_addr >> 8);
386
387 return 0;
388 }
389
390 /**
391 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
392 */
393 static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
394 {
395 struct iwl_tx_queue *txq = &priv->txq[txq_id];
396 struct iwl_queue *q = &txq->q;
397
398 if (!q->n_bd)
399 return;
400
401 while (q->write_ptr != q->read_ptr) {
402 /* The read_ptr needs to bound by q->n_window */
403 iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
404 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
405 }
406 }
407
408 /**
409 * iwl_tx_queue_free - Deallocate DMA queue.
410 * @txq: Transmit queue to deallocate.
411 *
412 * Empty queue by removing and destroying all BD's.
413 * Free all buffers.
414 * 0-fill, but do not free "txq" descriptor structure.
415 */
416 static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
417 {
418 struct iwl_tx_queue *txq = &priv->txq[txq_id];
419 struct device *dev = priv->bus->dev;
420 int i;
421 if (WARN_ON(!txq))
422 return;
423
424 iwl_tx_queue_unmap(priv, txq_id);
425
426 /* De-alloc array of command/tx buffers */
427 for (i = 0; i < txq->q.n_window; i++)
428 kfree(txq->cmd[i]);
429
430 /* De-alloc circular buffer of TFDs */
431 if (txq->q.n_bd) {
432 dma_free_coherent(dev, priv->hw_params.tfd_size *
433 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
434 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
435 }
436
437 /* De-alloc array of per-TFD driver data */
438 kfree(txq->txb);
439 txq->txb = NULL;
440
441 /* deallocate arrays */
442 kfree(txq->cmd);
443 kfree(txq->meta);
444 txq->cmd = NULL;
445 txq->meta = NULL;
446
447 /* 0-fill queue descriptor structure */
448 memset(txq, 0, sizeof(*txq));
449 }
450
451 /**
452 * iwl_trans_tx_free - Free TXQ Context
453 *
454 * Destroy all TX DMA queues and structures
455 */
456 static void iwl_trans_tx_free(struct iwl_priv *priv)
457 {
458 int txq_id;
459
460 /* Tx queues */
461 if (priv->txq) {
462 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
463 iwl_tx_queue_free(priv, txq_id);
464 }
465
466 kfree(priv->txq);
467 priv->txq = NULL;
468
469 iwlagn_free_dma_ptr(priv, &priv->kw);
470
471 iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
472 }
473
474 /**
475 * iwl_trans_tx_alloc - allocate TX context
476 * Allocate all Tx DMA structures and initialize them
477 *
478 * @param priv
479 * @return error code
480 */
481 static int iwl_trans_tx_alloc(struct iwl_priv *priv)
482 {
483 int ret;
484 int txq_id, slots_num;
485
486 /*It is not allowed to alloc twice, so warn when this happens.
487 * We cannot rely on the previous allocation, so free and fail */
488 if (WARN_ON(priv->txq)) {
489 ret = -EINVAL;
490 goto error;
491 }
492
493 ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
494 priv->hw_params.scd_bc_tbls_size);
495 if (ret) {
496 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
497 goto error;
498 }
499
500 /* Alloc keep-warm buffer */
501 ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
502 if (ret) {
503 IWL_ERR(priv, "Keep Warm allocation failed\n");
504 goto error;
505 }
506
507 priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
508 priv->cfg->base_params->num_of_queues, GFP_KERNEL);
509 if (!priv->txq) {
510 IWL_ERR(priv, "Not enough memory for txq\n");
511 ret = ENOMEM;
512 goto error;
513 }
514
515 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
516 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
517 slots_num = (txq_id == priv->cmd_queue) ?
518 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
519 ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
520 txq_id);
521 if (ret) {
522 IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id);
523 goto error;
524 }
525 }
526
527 return 0;
528
529 error:
530 trans_tx_free(&priv->trans);
531
532 return ret;
533 }
534 static int iwl_tx_init(struct iwl_priv *priv)
535 {
536 int ret;
537 int txq_id, slots_num;
538 unsigned long flags;
539 bool alloc = false;
540
541 if (!priv->txq) {
542 ret = iwl_trans_tx_alloc(priv);
543 if (ret)
544 goto error;
545 alloc = true;
546 }
547
548 spin_lock_irqsave(&priv->lock, flags);
549
550 /* Turn off all Tx DMA fifos */
551 iwl_write_prph(priv, SCD_TXFACT, 0);
552
553 /* Tell NIC where to find the "keep warm" buffer */
554 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
555
556 spin_unlock_irqrestore(&priv->lock, flags);
557
558 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
559 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
560 slots_num = (txq_id == priv->cmd_queue) ?
561 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
562 ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
563 txq_id);
564 if (ret) {
565 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
566 goto error;
567 }
568 }
569
570 return 0;
571 error:
572 /*Upon error, free only if we allocated something */
573 if (alloc)
574 trans_tx_free(&priv->trans);
575 return ret;
576 }
577
578 static void iwl_set_pwr_vmain(struct iwl_priv *priv)
579 {
580 /*
581 * (for documentation purposes)
582 * to set power to V_AUX, do:
583
584 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
585 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
586 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
587 ~APMG_PS_CTRL_MSK_PWR_SRC);
588 */
589
590 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
591 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
592 ~APMG_PS_CTRL_MSK_PWR_SRC);
593 }
594
595 static int iwl_nic_init(struct iwl_priv *priv)
596 {
597 unsigned long flags;
598
599 /* nic_init */
600 spin_lock_irqsave(&priv->lock, flags);
601 iwl_apm_init(priv);
602
603 /* Set interrupt coalescing calibration timer to default (512 usecs) */
604 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
605
606 spin_unlock_irqrestore(&priv->lock, flags);
607
608 iwl_set_pwr_vmain(priv);
609
610 priv->cfg->lib->nic_config(priv);
611
612 /* Allocate the RX queue, or reset if it is already allocated */
613 iwl_rx_init(priv);
614
615 /* Allocate or reset and init all Tx and Command queues */
616 if (iwl_tx_init(priv))
617 return -ENOMEM;
618
619 if (priv->cfg->base_params->shadow_reg_enable) {
620 /* enable shadow regs in HW */
621 iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
622 0x800FFFFF);
623 }
624
625 set_bit(STATUS_INIT, &priv->status);
626
627 return 0;
628 }
629
630 #define HW_READY_TIMEOUT (50)
631
632 /* Note: returns poll_bit return value, which is >= 0 if success */
633 static int iwl_set_hw_ready(struct iwl_priv *priv)
634 {
635 int ret;
636
637 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
638 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
639
640 /* See if we got it */
641 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
642 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
643 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
644 HW_READY_TIMEOUT);
645
646 IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
647 return ret;
648 }
649
650 /* Note: returns standard 0/-ERROR code */
651 static int iwl_trans_prepare_card_hw(struct iwl_priv *priv)
652 {
653 int ret;
654
655 IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n");
656
657 ret = iwl_set_hw_ready(priv);
658 if (ret >= 0)
659 return 0;
660
661 /* If HW is not ready, prepare the conditions to check again */
662 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
663 CSR_HW_IF_CONFIG_REG_PREPARE);
664
665 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
666 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
667 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
668
669 if (ret < 0)
670 return ret;
671
672 /* HW should be ready by now, check again. */
673 ret = iwl_set_hw_ready(priv);
674 if (ret >= 0)
675 return 0;
676 return ret;
677 }
678
679 static int iwl_trans_start_device(struct iwl_priv *priv)
680 {
681 int ret;
682
683 priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
684
685 if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
686 iwl_trans_prepare_card_hw(priv)) {
687 IWL_WARN(priv, "Exit HW not ready\n");
688 return -EIO;
689 }
690
691 /* If platform's RF_KILL switch is NOT set to KILL */
692 if (iwl_read32(priv, CSR_GP_CNTRL) &
693 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
694 clear_bit(STATUS_RF_KILL_HW, &priv->status);
695 else
696 set_bit(STATUS_RF_KILL_HW, &priv->status);
697
698 if (iwl_is_rfkill(priv)) {
699 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
700 iwl_enable_interrupts(priv);
701 return -ERFKILL;
702 }
703
704 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
705
706 ret = iwl_nic_init(priv);
707 if (ret) {
708 IWL_ERR(priv, "Unable to init nic\n");
709 return ret;
710 }
711
712 /* make sure rfkill handshake bits are cleared */
713 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
714 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
715 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
716
717 /* clear (again), then enable host interrupts */
718 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
719 iwl_enable_interrupts(priv);
720
721 /* really make sure rfkill handshake bits are cleared */
722 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
723 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
724
725 return 0;
726 }
727
728 /*
729 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
730 * must be called under priv->lock and mac access
731 */
732 static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask)
733 {
734 iwl_write_prph(priv, SCD_TXFACT, mask);
735 }
736
737 #define IWL_AC_UNSET -1
738
739 struct queue_to_fifo_ac {
740 s8 fifo, ac;
741 };
742
743 static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
744 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
745 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
746 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
747 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
748 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
749 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
750 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
751 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
752 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
753 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
754 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
755 };
756
757 static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
758 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
759 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
760 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
761 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
762 { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
763 { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
764 { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
765 { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
766 { IWL_TX_FIFO_BE_IPAN, 2, },
767 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
768 { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
769 };
770 static void iwl_trans_tx_start(struct iwl_priv *priv)
771 {
772 const struct queue_to_fifo_ac *queue_to_fifo;
773 struct iwl_rxon_context *ctx;
774 u32 a;
775 unsigned long flags;
776 int i, chan;
777 u32 reg_val;
778
779 spin_lock_irqsave(&priv->lock, flags);
780
781 priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
782 a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
783 /* reset conext data memory */
784 for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
785 a += 4)
786 iwl_write_targ_mem(priv, a, 0);
787 /* reset tx status memory */
788 for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
789 a += 4)
790 iwl_write_targ_mem(priv, a, 0);
791 for (; a < priv->scd_base_addr +
792 SCD_TRANS_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
793 iwl_write_targ_mem(priv, a, 0);
794
795 iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
796 priv->scd_bc_tbls.dma >> 10);
797
798 /* Enable DMA channel */
799 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
800 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
801 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
802 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
803
804 /* Update FH chicken bits */
805 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
806 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
807 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
808
809 iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
810 SCD_QUEUECHAIN_SEL_ALL(priv));
811 iwl_write_prph(priv, SCD_AGGR_SEL, 0);
812
813 /* initiate the queues */
814 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
815 iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
816 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
817 iwl_write_targ_mem(priv, priv->scd_base_addr +
818 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
819 iwl_write_targ_mem(priv, priv->scd_base_addr +
820 SCD_CONTEXT_QUEUE_OFFSET(i) +
821 sizeof(u32),
822 ((SCD_WIN_SIZE <<
823 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
824 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
825 ((SCD_FRAME_LIMIT <<
826 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
827 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
828 }
829
830 iwl_write_prph(priv, SCD_INTERRUPT_MASK,
831 IWL_MASK(0, priv->hw_params.max_txq_num));
832
833 /* Activate all Tx DMA/FIFO channels */
834 iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
835
836 /* map queues to FIFOs */
837 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
838 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
839 else
840 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
841
842 iwl_trans_set_wr_ptrs(priv, priv->cmd_queue, 0);
843
844 /* make sure all queue are not stopped */
845 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
846 for (i = 0; i < 4; i++)
847 atomic_set(&priv->queue_stop_count[i], 0);
848 for_each_context(priv, ctx)
849 ctx->last_tx_rejected = false;
850
851 /* reset to 0 to enable all the queue first */
852 priv->txq_ctx_active_msk = 0;
853
854 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) !=
855 IWLAGN_FIRST_AMPDU_QUEUE);
856 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) !=
857 IWLAGN_FIRST_AMPDU_QUEUE);
858
859 for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
860 int fifo = queue_to_fifo[i].fifo;
861 int ac = queue_to_fifo[i].ac;
862
863 iwl_txq_ctx_activate(priv, i);
864
865 if (fifo == IWL_TX_FIFO_UNUSED)
866 continue;
867
868 if (ac != IWL_AC_UNSET)
869 iwl_set_swq_id(&priv->txq[i], ac, i);
870 iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
871 }
872
873 spin_unlock_irqrestore(&priv->lock, flags);
874
875 /* Enable L1-Active */
876 iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
877 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
878 }
879
880 /**
881 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
882 */
883 static int iwl_trans_tx_stop(struct iwl_priv *priv)
884 {
885 int ch, txq_id;
886 unsigned long flags;
887
888 /* Turn off all Tx DMA fifos */
889 spin_lock_irqsave(&priv->lock, flags);
890
891 iwl_trans_txq_set_sched(priv, 0);
892
893 /* Stop each Tx DMA channel, and wait for it to be idle */
894 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
895 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
896 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
897 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
898 1000))
899 IWL_ERR(priv, "Failing on timeout while stopping"
900 " DMA channel %d [0x%08x]", ch,
901 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
902 }
903 spin_unlock_irqrestore(&priv->lock, flags);
904
905 if (!priv->txq) {
906 IWL_WARN(priv, "Stopping tx queues that aren't allocated...");
907 return 0;
908 }
909
910 /* Unmap DMA from host system and free skb's */
911 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
912 iwl_tx_queue_unmap(priv, txq_id);
913
914 return 0;
915 }
916
917 static void iwl_trans_stop_device(struct iwl_priv *priv)
918 {
919 unsigned long flags;
920
921 /* stop and reset the on-board processor */
922 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
923
924 /* tell the device to stop sending interrupts */
925 spin_lock_irqsave(&priv->lock, flags);
926 iwl_disable_interrupts(priv);
927 spin_unlock_irqrestore(&priv->lock, flags);
928 trans_sync_irq(&priv->trans);
929
930 /* device going down, Stop using ICT table */
931 iwl_disable_ict(priv);
932
933 /*
934 * If a HW restart happens during firmware loading,
935 * then the firmware loading might call this function
936 * and later it might be called again due to the
937 * restart. So don't process again if the device is
938 * already dead.
939 */
940 if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
941 iwl_trans_tx_stop(priv);
942 iwl_trans_rx_stop(priv);
943
944 /* Power-down device's busmaster DMA clocks */
945 iwl_write_prph(priv, APMG_CLK_DIS_REG,
946 APMG_CLK_VAL_DMA_CLK_RQT);
947 udelay(5);
948 }
949
950 /* Make sure (redundant) we've released our request to stay awake */
951 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
952
953 /* Stop the device, and put it in low power state */
954 iwl_apm_stop(priv);
955 }
956
957 static struct iwl_tx_cmd *iwl_trans_get_tx_cmd(struct iwl_priv *priv,
958 int txq_id)
959 {
960 struct iwl_tx_queue *txq = &priv->txq[txq_id];
961 struct iwl_queue *q = &txq->q;
962 struct iwl_device_cmd *dev_cmd;
963
964 if (unlikely(iwl_queue_space(q) < q->high_mark))
965 return NULL;
966
967 /*
968 * Set up the Tx-command (not MAC!) header.
969 * Store the chosen Tx queue and TFD index within the sequence field;
970 * after Tx, uCode's Tx response will return this value so driver can
971 * locate the frame within the tx queue and do post-tx processing.
972 */
973 dev_cmd = txq->cmd[q->write_ptr];
974 memset(dev_cmd, 0, sizeof(*dev_cmd));
975 dev_cmd->hdr.cmd = REPLY_TX;
976 dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
977 INDEX_TO_SEQ(q->write_ptr)));
978 return &dev_cmd->cmd.tx;
979 }
980
981 static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb,
982 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
983 struct iwl_rxon_context *ctx)
984 {
985 struct iwl_tx_queue *txq = &priv->txq[txq_id];
986 struct iwl_queue *q = &txq->q;
987 struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr];
988 struct iwl_cmd_meta *out_meta;
989
990 dma_addr_t phys_addr = 0;
991 dma_addr_t txcmd_phys;
992 dma_addr_t scratch_phys;
993 u16 len, firstlen, secondlen;
994 u8 wait_write_ptr = 0;
995 u8 hdr_len = ieee80211_hdrlen(fc);
996
997 /* Set up driver data for this TFD */
998 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
999 txq->txb[q->write_ptr].skb = skb;
1000 txq->txb[q->write_ptr].ctx = ctx;
1001
1002 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1003 out_meta = &txq->meta[q->write_ptr];
1004
1005 /*
1006 * Use the first empty entry in this queue's command buffer array
1007 * to contain the Tx command and MAC header concatenated together
1008 * (payload data will be in another buffer).
1009 * Size of this varies, due to varying MAC header length.
1010 * If end is not dword aligned, we'll have 2 extra bytes at the end
1011 * of the MAC header (device reads on dword boundaries).
1012 * We'll tell device about this padding later.
1013 */
1014 len = sizeof(struct iwl_tx_cmd) +
1015 sizeof(struct iwl_cmd_header) + hdr_len;
1016 firstlen = (len + 3) & ~3;
1017
1018 /* Tell NIC about any 2-byte padding after MAC header */
1019 if (firstlen != len)
1020 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1021
1022 /* Physical address of this Tx command's header (not MAC header!),
1023 * within command buffer array. */
1024 txcmd_phys = dma_map_single(priv->bus->dev,
1025 &dev_cmd->hdr, firstlen,
1026 DMA_BIDIRECTIONAL);
1027 if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys)))
1028 return -1;
1029 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1030 dma_unmap_len_set(out_meta, len, firstlen);
1031
1032 if (!ieee80211_has_morefrags(fc)) {
1033 txq->need_update = 1;
1034 } else {
1035 wait_write_ptr = 1;
1036 txq->need_update = 0;
1037 }
1038
1039 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1040 * if any (802.11 null frames have no payload). */
1041 secondlen = skb->len - hdr_len;
1042 if (secondlen > 0) {
1043 phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len,
1044 secondlen, DMA_TO_DEVICE);
1045 if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
1046 dma_unmap_single(priv->bus->dev,
1047 dma_unmap_addr(out_meta, mapping),
1048 dma_unmap_len(out_meta, len),
1049 DMA_BIDIRECTIONAL);
1050 return -1;
1051 }
1052 }
1053
1054 /* Attach buffers to TFD */
1055 iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
1056 if (secondlen > 0)
1057 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
1058 secondlen, 0);
1059
1060 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1061 offsetof(struct iwl_tx_cmd, scratch);
1062
1063 /* take back ownership of DMA buffer to enable update */
1064 dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen,
1065 DMA_BIDIRECTIONAL);
1066 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1067 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1068
1069 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
1070 le16_to_cpu(dev_cmd->hdr.sequence));
1071 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1072 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1073 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1074
1075 /* Set up entry for this TFD in Tx byte-count array */
1076 if (ampdu)
1077 iwl_trans_txq_update_byte_cnt_tbl(priv, txq,
1078 le16_to_cpu(tx_cmd->len));
1079
1080 dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
1081 DMA_BIDIRECTIONAL);
1082
1083 trace_iwlwifi_dev_tx(priv,
1084 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1085 sizeof(struct iwl_tfd),
1086 &dev_cmd->hdr, firstlen,
1087 skb->data + hdr_len, secondlen);
1088
1089 /* Tell device the write index *just past* this latest filled TFD */
1090 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1091 iwl_txq_update_write_ptr(priv, txq);
1092
1093 /*
1094 * At this point the frame is "transmitted" successfully
1095 * and we will get a TX status notification eventually,
1096 * regardless of the value of ret. "ret" only indicates
1097 * whether or not we should update the write pointer.
1098 */
1099 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
1100 if (wait_write_ptr) {
1101 txq->need_update = 1;
1102 iwl_txq_update_write_ptr(priv, txq);
1103 } else {
1104 iwl_stop_queue(priv, txq);
1105 }
1106 }
1107 return 0;
1108 }
1109
1110 static void iwl_trans_kick_nic(struct iwl_priv *priv)
1111 {
1112 /* Remove all resets to allow NIC to operate */
1113 iwl_write32(priv, CSR_RESET, 0);
1114 }
1115
1116 static void iwl_trans_sync_irq(struct iwl_priv *priv)
1117 {
1118 /* wait to make sure we flush pending tasklet*/
1119 synchronize_irq(priv->bus->irq);
1120 tasklet_kill(&priv->irq_tasklet);
1121 }
1122
1123 static void iwl_trans_free(struct iwl_priv *priv)
1124 {
1125 free_irq(priv->bus->irq, priv);
1126 iwl_free_isr_ict(priv);
1127 }
1128
1129 static const struct iwl_trans_ops trans_ops = {
1130 .start_device = iwl_trans_start_device,
1131 .prepare_card_hw = iwl_trans_prepare_card_hw,
1132 .stop_device = iwl_trans_stop_device,
1133
1134 .tx_start = iwl_trans_tx_start,
1135
1136 .rx_free = iwl_trans_rx_free,
1137 .tx_free = iwl_trans_tx_free,
1138
1139 .send_cmd = iwl_send_cmd,
1140 .send_cmd_pdu = iwl_send_cmd_pdu,
1141
1142 .get_tx_cmd = iwl_trans_get_tx_cmd,
1143 .tx = iwl_trans_tx,
1144
1145 .txq_agg_disable = iwl_trans_txq_agg_disable,
1146 .txq_agg_setup = iwl_trans_txq_agg_setup,
1147
1148 .kick_nic = iwl_trans_kick_nic,
1149
1150 .sync_irq = iwl_trans_sync_irq,
1151 .free = iwl_trans_free,
1152 };
1153
1154 int iwl_trans_register(struct iwl_trans *trans, struct iwl_priv *priv)
1155 {
1156 int err;
1157
1158 priv->trans.ops = &trans_ops;
1159 priv->trans.priv = priv;
1160
1161 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
1162 iwl_irq_tasklet, (unsigned long)priv);
1163
1164 iwl_alloc_isr_ict(priv);
1165
1166 err = request_irq(priv->bus->irq, iwl_isr_ict, IRQF_SHARED,
1167 DRV_NAME, priv);
1168 if (err) {
1169 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->bus->irq);
1170 iwl_free_isr_ict(priv);
1171 return err;
1172 }
1173
1174 INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
1175
1176 return 0;
1177 }