1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *****************************************************************************/
64 #include "iwl-trans.h"
66 #include "iwl-helpers.h"
67 #include "iwl-trans-int-pcie.h"
68 /*TODO remove uneeded includes when the transport layer tx_free will be here */
71 #include "iwl-shared.h"
73 static int iwl_trans_rx_alloc(struct iwl_priv
*priv
)
75 struct iwl_rx_queue
*rxq
= &priv
->rxq
;
76 struct device
*dev
= priv
->bus
->dev
;
78 memset(&priv
->rxq
, 0, sizeof(priv
->rxq
));
80 spin_lock_init(&rxq
->lock
);
81 INIT_LIST_HEAD(&rxq
->rx_free
);
82 INIT_LIST_HEAD(&rxq
->rx_used
);
84 if (WARN_ON(rxq
->bd
|| rxq
->rb_stts
))
87 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
88 rxq
->bd
= dma_alloc_coherent(dev
, sizeof(__le32
) * RX_QUEUE_SIZE
,
89 &rxq
->bd_dma
, GFP_KERNEL
);
92 memset(rxq
->bd
, 0, sizeof(__le32
) * RX_QUEUE_SIZE
);
94 /*Allocate the driver's pointer to receive buffer status */
95 rxq
->rb_stts
= dma_alloc_coherent(dev
, sizeof(*rxq
->rb_stts
),
96 &rxq
->rb_stts_dma
, GFP_KERNEL
);
99 memset(rxq
->rb_stts
, 0, sizeof(*rxq
->rb_stts
));
104 dma_free_coherent(dev
, sizeof(__le32
) * RX_QUEUE_SIZE
,
105 rxq
->bd
, rxq
->bd_dma
);
106 memset(&rxq
->bd_dma
, 0, sizeof(rxq
->bd_dma
));
112 static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv
*priv
)
114 struct iwl_rx_queue
*rxq
= &priv
->rxq
;
117 /* Fill the rx_used queue with _all_ of the Rx buffers */
118 for (i
= 0; i
< RX_FREE_BUFFERS
+ RX_QUEUE_SIZE
; i
++) {
119 /* In the reset function, these buffers may have been allocated
120 * to an SKB, so we need to unmap and free potential storage */
121 if (rxq
->pool
[i
].page
!= NULL
) {
122 dma_unmap_page(priv
->bus
->dev
, rxq
->pool
[i
].page_dma
,
123 PAGE_SIZE
<< priv
->hw_params
.rx_page_order
,
125 __iwl_free_pages(priv
, rxq
->pool
[i
].page
);
126 rxq
->pool
[i
].page
= NULL
;
128 list_add_tail(&rxq
->pool
[i
].list
, &rxq
->rx_used
);
132 static void iwl_trans_rx_hw_init(struct iwl_priv
*priv
,
133 struct iwl_rx_queue
*rxq
)
136 const u32 rfdnlog
= RX_QUEUE_SIZE_LOG
; /* 256 RBDs */
137 u32 rb_timeout
= 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
139 rb_timeout
= RX_RB_TIMEOUT
;
141 if (iwlagn_mod_params
.amsdu_size_8K
)
142 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K
;
144 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K
;
147 iwl_write_direct32(priv
, FH_MEM_RCSR_CHNL0_CONFIG_REG
, 0);
149 /* Reset driver's Rx queue write index */
150 iwl_write_direct32(priv
, FH_RSCSR_CHNL0_RBDCB_WPTR_REG
, 0);
152 /* Tell device where to find RBD circular buffer in DRAM */
153 iwl_write_direct32(priv
, FH_RSCSR_CHNL0_RBDCB_BASE_REG
,
154 (u32
)(rxq
->bd_dma
>> 8));
156 /* Tell device where in DRAM to update its Rx status */
157 iwl_write_direct32(priv
, FH_RSCSR_CHNL0_STTS_WPTR_REG
,
158 rxq
->rb_stts_dma
>> 4);
161 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
162 * the credit mechanism in 5000 HW RX FIFO
163 * Direct rx interrupts to hosts
164 * Rx buffer size 4 or 8k
168 iwl_write_direct32(priv
, FH_MEM_RCSR_CHNL0_CONFIG_REG
,
169 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL
|
170 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY
|
171 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL
|
172 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK
|
174 (rb_timeout
<< FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS
)|
175 (rfdnlog
<< FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS
));
177 /* Set interrupt coalescing timer to default (2048 usecs) */
178 iwl_write8(priv
, CSR_INT_COALESCING
, IWL_HOST_INT_TIMEOUT_DEF
);
181 static int iwl_rx_init(struct iwl_priv
*priv
)
183 struct iwl_rx_queue
*rxq
= &priv
->rxq
;
188 err
= iwl_trans_rx_alloc(priv
);
193 spin_lock_irqsave(&rxq
->lock
, flags
);
194 INIT_LIST_HEAD(&rxq
->rx_free
);
195 INIT_LIST_HEAD(&rxq
->rx_used
);
197 iwl_trans_rxq_free_rx_bufs(priv
);
199 for (i
= 0; i
< RX_QUEUE_SIZE
; i
++)
200 rxq
->queue
[i
] = NULL
;
202 /* Set us so that we have processed and used all buffers, but have
203 * not restocked the Rx queue with fresh buffers */
204 rxq
->read
= rxq
->write
= 0;
205 rxq
->write_actual
= 0;
207 spin_unlock_irqrestore(&rxq
->lock
, flags
);
209 iwlagn_rx_replenish(priv
);
211 iwl_trans_rx_hw_init(priv
, rxq
);
213 spin_lock_irqsave(&priv
->lock
, flags
);
214 rxq
->need_update
= 1;
215 iwl_rx_queue_update_write_ptr(priv
, rxq
);
216 spin_unlock_irqrestore(&priv
->lock
, flags
);
221 static void iwl_trans_rx_free(struct iwl_priv
*priv
)
223 struct iwl_rx_queue
*rxq
= &priv
->rxq
;
226 /*if rxq->bd is NULL, it means that nothing has been allocated,
229 IWL_DEBUG_INFO(priv
, "Free NULL rx context\n");
233 spin_lock_irqsave(&rxq
->lock
, flags
);
234 iwl_trans_rxq_free_rx_bufs(priv
);
235 spin_unlock_irqrestore(&rxq
->lock
, flags
);
237 dma_free_coherent(priv
->bus
->dev
, sizeof(__le32
) * RX_QUEUE_SIZE
,
238 rxq
->bd
, rxq
->bd_dma
);
239 memset(&rxq
->bd_dma
, 0, sizeof(rxq
->bd_dma
));
243 dma_free_coherent(priv
->bus
->dev
,
244 sizeof(struct iwl_rb_status
),
245 rxq
->rb_stts
, rxq
->rb_stts_dma
);
247 IWL_DEBUG_INFO(priv
, "Free rxq->rb_stts which is NULL\n");
248 memset(&rxq
->rb_stts_dma
, 0, sizeof(rxq
->rb_stts_dma
));
252 static int iwl_trans_rx_stop(struct iwl_priv
*priv
)
256 iwl_write_direct32(priv
, FH_MEM_RCSR_CHNL0_CONFIG_REG
, 0);
257 return iwl_poll_direct_bit(priv
, FH_MEM_RSSR_RX_STATUS_REG
,
258 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE
, 1000);
261 static inline int iwlagn_alloc_dma_ptr(struct iwl_priv
*priv
,
262 struct iwl_dma_ptr
*ptr
, size_t size
)
264 if (WARN_ON(ptr
->addr
))
267 ptr
->addr
= dma_alloc_coherent(priv
->bus
->dev
, size
,
268 &ptr
->dma
, GFP_KERNEL
);
275 static inline void iwlagn_free_dma_ptr(struct iwl_priv
*priv
,
276 struct iwl_dma_ptr
*ptr
)
278 if (unlikely(!ptr
->addr
))
281 dma_free_coherent(priv
->bus
->dev
, ptr
->size
, ptr
->addr
, ptr
->dma
);
282 memset(ptr
, 0, sizeof(*ptr
));
285 static int iwl_trans_txq_alloc(struct iwl_priv
*priv
, struct iwl_tx_queue
*txq
,
286 int slots_num
, u32 txq_id
)
288 size_t tfd_sz
= priv
->hw_params
.tfd_size
* TFD_QUEUE_SIZE_MAX
;
291 if (WARN_ON(txq
->meta
|| txq
->cmd
|| txq
->txb
|| txq
->tfds
))
294 txq
->q
.n_window
= slots_num
;
296 txq
->meta
= kzalloc(sizeof(txq
->meta
[0]) * slots_num
,
298 txq
->cmd
= kzalloc(sizeof(txq
->cmd
[0]) * slots_num
,
301 if (!txq
->meta
|| !txq
->cmd
)
304 for (i
= 0; i
< slots_num
; i
++) {
305 txq
->cmd
[i
] = kmalloc(sizeof(struct iwl_device_cmd
),
311 /* Alloc driver data array and TFD circular buffer */
312 /* Driver private data, only for Tx (not command) queues,
313 * not shared with device. */
314 if (txq_id
!= priv
->cmd_queue
) {
315 txq
->txb
= kzalloc(sizeof(txq
->txb
[0]) *
316 TFD_QUEUE_SIZE_MAX
, GFP_KERNEL
);
318 IWL_ERR(priv
, "kmalloc for auxiliary BD "
319 "structures failed\n");
326 /* Circular buffer of transmit frame descriptors (TFDs),
327 * shared with device */
328 txq
->tfds
= dma_alloc_coherent(priv
->bus
->dev
, tfd_sz
, &txq
->q
.dma_addr
,
331 IWL_ERR(priv
, "dma_alloc_coherent(%zd) failed\n", tfd_sz
);
340 /* since txq->cmd has been zeroed,
341 * all non allocated cmd[i] will be NULL */
343 for (i
= 0; i
< slots_num
; i
++)
354 static int iwl_trans_txq_init(struct iwl_priv
*priv
, struct iwl_tx_queue
*txq
,
355 int slots_num
, u32 txq_id
)
359 txq
->need_update
= 0;
360 memset(txq
->meta
, 0, sizeof(txq
->meta
[0]) * slots_num
);
363 * For the default queues 0-3, set up the swq_id
364 * already -- all others need to get one later
365 * (if they need one at all).
368 iwl_set_swq_id(txq
, txq_id
, txq_id
);
370 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
371 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
372 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX
& (TFD_QUEUE_SIZE_MAX
- 1));
374 /* Initialize queue's high/low-water marks, and head/tail indexes */
375 ret
= iwl_queue_init(priv
, &txq
->q
, TFD_QUEUE_SIZE_MAX
, slots_num
,
381 * Tell nic where to find circular buffer of Tx Frame Descriptors for
382 * given Tx queue, and enable the DMA channel used for that queue.
383 * Circular buffer (TFD queue in DRAM) physical base address */
384 iwl_write_direct32(priv
, FH_MEM_CBBC_QUEUE(txq_id
),
385 txq
->q
.dma_addr
>> 8);
391 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
393 static void iwl_tx_queue_unmap(struct iwl_priv
*priv
, int txq_id
)
395 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
396 struct iwl_queue
*q
= &txq
->q
;
401 while (q
->write_ptr
!= q
->read_ptr
) {
402 /* The read_ptr needs to bound by q->n_window */
403 iwlagn_txq_free_tfd(priv
, txq
, get_cmd_index(q
, q
->read_ptr
));
404 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
);
409 * iwl_tx_queue_free - Deallocate DMA queue.
410 * @txq: Transmit queue to deallocate.
412 * Empty queue by removing and destroying all BD's.
414 * 0-fill, but do not free "txq" descriptor structure.
416 static void iwl_tx_queue_free(struct iwl_priv
*priv
, int txq_id
)
418 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
419 struct device
*dev
= priv
->bus
->dev
;
424 iwl_tx_queue_unmap(priv
, txq_id
);
426 /* De-alloc array of command/tx buffers */
427 for (i
= 0; i
< txq
->q
.n_window
; i
++)
430 /* De-alloc circular buffer of TFDs */
432 dma_free_coherent(dev
, priv
->hw_params
.tfd_size
*
433 txq
->q
.n_bd
, txq
->tfds
, txq
->q
.dma_addr
);
434 memset(&txq
->q
.dma_addr
, 0, sizeof(txq
->q
.dma_addr
));
437 /* De-alloc array of per-TFD driver data */
441 /* deallocate arrays */
447 /* 0-fill queue descriptor structure */
448 memset(txq
, 0, sizeof(*txq
));
452 * iwl_trans_tx_free - Free TXQ Context
454 * Destroy all TX DMA queues and structures
456 static void iwl_trans_tx_free(struct iwl_priv
*priv
)
462 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++)
463 iwl_tx_queue_free(priv
, txq_id
);
469 iwlagn_free_dma_ptr(priv
, &priv
->kw
);
471 iwlagn_free_dma_ptr(priv
, &priv
->scd_bc_tbls
);
475 * iwl_trans_tx_alloc - allocate TX context
476 * Allocate all Tx DMA structures and initialize them
481 static int iwl_trans_tx_alloc(struct iwl_priv
*priv
)
484 int txq_id
, slots_num
;
486 /*It is not allowed to alloc twice, so warn when this happens.
487 * We cannot rely on the previous allocation, so free and fail */
488 if (WARN_ON(priv
->txq
)) {
493 ret
= iwlagn_alloc_dma_ptr(priv
, &priv
->scd_bc_tbls
,
494 priv
->hw_params
.scd_bc_tbls_size
);
496 IWL_ERR(priv
, "Scheduler BC Table allocation failed\n");
500 /* Alloc keep-warm buffer */
501 ret
= iwlagn_alloc_dma_ptr(priv
, &priv
->kw
, IWL_KW_SIZE
);
503 IWL_ERR(priv
, "Keep Warm allocation failed\n");
507 priv
->txq
= kzalloc(sizeof(struct iwl_tx_queue
) *
508 priv
->cfg
->base_params
->num_of_queues
, GFP_KERNEL
);
510 IWL_ERR(priv
, "Not enough memory for txq\n");
515 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
516 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++) {
517 slots_num
= (txq_id
== priv
->cmd_queue
) ?
518 TFD_CMD_SLOTS
: TFD_TX_CMD_SLOTS
;
519 ret
= iwl_trans_txq_alloc(priv
, &priv
->txq
[txq_id
], slots_num
,
522 IWL_ERR(priv
, "Tx %d queue alloc failed\n", txq_id
);
530 trans_tx_free(&priv
->trans
);
534 static int iwl_tx_init(struct iwl_priv
*priv
)
537 int txq_id
, slots_num
;
542 ret
= iwl_trans_tx_alloc(priv
);
548 spin_lock_irqsave(&priv
->lock
, flags
);
550 /* Turn off all Tx DMA fifos */
551 iwl_write_prph(priv
, SCD_TXFACT
, 0);
553 /* Tell NIC where to find the "keep warm" buffer */
554 iwl_write_direct32(priv
, FH_KW_MEM_ADDR_REG
, priv
->kw
.dma
>> 4);
556 spin_unlock_irqrestore(&priv
->lock
, flags
);
558 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
559 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++) {
560 slots_num
= (txq_id
== priv
->cmd_queue
) ?
561 TFD_CMD_SLOTS
: TFD_TX_CMD_SLOTS
;
562 ret
= iwl_trans_txq_init(priv
, &priv
->txq
[txq_id
], slots_num
,
565 IWL_ERR(priv
, "Tx %d queue init failed\n", txq_id
);
572 /*Upon error, free only if we allocated something */
574 trans_tx_free(&priv
->trans
);
578 static void iwl_set_pwr_vmain(struct iwl_priv
*priv
)
581 * (for documentation purposes)
582 * to set power to V_AUX, do:
584 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
585 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
586 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
587 ~APMG_PS_CTRL_MSK_PWR_SRC);
590 iwl_set_bits_mask_prph(priv
, APMG_PS_CTRL_REG
,
591 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN
,
592 ~APMG_PS_CTRL_MSK_PWR_SRC
);
595 static int iwl_nic_init(struct iwl_priv
*priv
)
600 spin_lock_irqsave(&priv
->lock
, flags
);
603 /* Set interrupt coalescing calibration timer to default (512 usecs) */
604 iwl_write8(priv
, CSR_INT_COALESCING
, IWL_HOST_INT_CALIB_TIMEOUT_DEF
);
606 spin_unlock_irqrestore(&priv
->lock
, flags
);
608 iwl_set_pwr_vmain(priv
);
610 priv
->cfg
->lib
->nic_config(priv
);
612 /* Allocate the RX queue, or reset if it is already allocated */
615 /* Allocate or reset and init all Tx and Command queues */
616 if (iwl_tx_init(priv
))
619 if (priv
->cfg
->base_params
->shadow_reg_enable
) {
620 /* enable shadow regs in HW */
621 iwl_set_bit(priv
, CSR_MAC_SHADOW_REG_CTRL
,
625 set_bit(STATUS_INIT
, &priv
->status
);
630 #define HW_READY_TIMEOUT (50)
632 /* Note: returns poll_bit return value, which is >= 0 if success */
633 static int iwl_set_hw_ready(struct iwl_priv
*priv
)
637 iwl_set_bit(priv
, CSR_HW_IF_CONFIG_REG
,
638 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
);
640 /* See if we got it */
641 ret
= iwl_poll_bit(priv
, CSR_HW_IF_CONFIG_REG
,
642 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
643 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
646 IWL_DEBUG_INFO(priv
, "hardware%s ready\n", ret
< 0 ? " not" : "");
650 /* Note: returns standard 0/-ERROR code */
651 static int iwl_trans_prepare_card_hw(struct iwl_priv
*priv
)
655 IWL_DEBUG_INFO(priv
, "iwl_trans_prepare_card_hw enter\n");
657 ret
= iwl_set_hw_ready(priv
);
661 /* If HW is not ready, prepare the conditions to check again */
662 iwl_set_bit(priv
, CSR_HW_IF_CONFIG_REG
,
663 CSR_HW_IF_CONFIG_REG_PREPARE
);
665 ret
= iwl_poll_bit(priv
, CSR_HW_IF_CONFIG_REG
,
666 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE
,
667 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE
, 150000);
672 /* HW should be ready by now, check again. */
673 ret
= iwl_set_hw_ready(priv
);
679 static int iwl_trans_start_device(struct iwl_priv
*priv
)
683 priv
->ucode_owner
= IWL_OWNERSHIP_DRIVER
;
685 if ((priv
->cfg
->sku
& EEPROM_SKU_CAP_AMT_ENABLE
) &&
686 iwl_trans_prepare_card_hw(priv
)) {
687 IWL_WARN(priv
, "Exit HW not ready\n");
691 /* If platform's RF_KILL switch is NOT set to KILL */
692 if (iwl_read32(priv
, CSR_GP_CNTRL
) &
693 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW
)
694 clear_bit(STATUS_RF_KILL_HW
, &priv
->status
);
696 set_bit(STATUS_RF_KILL_HW
, &priv
->status
);
698 if (iwl_is_rfkill(priv
)) {
699 wiphy_rfkill_set_hw_state(priv
->hw
->wiphy
, true);
700 iwl_enable_interrupts(priv
);
704 iwl_write32(priv
, CSR_INT
, 0xFFFFFFFF);
706 ret
= iwl_nic_init(priv
);
708 IWL_ERR(priv
, "Unable to init nic\n");
712 /* make sure rfkill handshake bits are cleared */
713 iwl_write32(priv
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
714 iwl_write32(priv
, CSR_UCODE_DRV_GP1_CLR
,
715 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED
);
717 /* clear (again), then enable host interrupts */
718 iwl_write32(priv
, CSR_INT
, 0xFFFFFFFF);
719 iwl_enable_interrupts(priv
);
721 /* really make sure rfkill handshake bits are cleared */
722 iwl_write32(priv
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
723 iwl_write32(priv
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
729 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
730 * must be called under priv->lock and mac access
732 static void iwl_trans_txq_set_sched(struct iwl_priv
*priv
, u32 mask
)
734 iwl_write_prph(priv
, SCD_TXFACT
, mask
);
737 #define IWL_AC_UNSET -1
739 struct queue_to_fifo_ac
{
743 static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo
[] = {
744 { IWL_TX_FIFO_VO
, IEEE80211_AC_VO
, },
745 { IWL_TX_FIFO_VI
, IEEE80211_AC_VI
, },
746 { IWL_TX_FIFO_BE
, IEEE80211_AC_BE
, },
747 { IWL_TX_FIFO_BK
, IEEE80211_AC_BK
, },
748 { IWLAGN_CMD_FIFO_NUM
, IWL_AC_UNSET
, },
749 { IWL_TX_FIFO_UNUSED
, IWL_AC_UNSET
, },
750 { IWL_TX_FIFO_UNUSED
, IWL_AC_UNSET
, },
751 { IWL_TX_FIFO_UNUSED
, IWL_AC_UNSET
, },
752 { IWL_TX_FIFO_UNUSED
, IWL_AC_UNSET
, },
753 { IWL_TX_FIFO_UNUSED
, IWL_AC_UNSET
, },
754 { IWL_TX_FIFO_UNUSED
, IWL_AC_UNSET
, },
757 static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo
[] = {
758 { IWL_TX_FIFO_VO
, IEEE80211_AC_VO
, },
759 { IWL_TX_FIFO_VI
, IEEE80211_AC_VI
, },
760 { IWL_TX_FIFO_BE
, IEEE80211_AC_BE
, },
761 { IWL_TX_FIFO_BK
, IEEE80211_AC_BK
, },
762 { IWL_TX_FIFO_BK_IPAN
, IEEE80211_AC_BK
, },
763 { IWL_TX_FIFO_BE_IPAN
, IEEE80211_AC_BE
, },
764 { IWL_TX_FIFO_VI_IPAN
, IEEE80211_AC_VI
, },
765 { IWL_TX_FIFO_VO_IPAN
, IEEE80211_AC_VO
, },
766 { IWL_TX_FIFO_BE_IPAN
, 2, },
767 { IWLAGN_CMD_FIFO_NUM
, IWL_AC_UNSET
, },
768 { IWL_TX_FIFO_AUX
, IWL_AC_UNSET
, },
770 static void iwl_trans_tx_start(struct iwl_priv
*priv
)
772 const struct queue_to_fifo_ac
*queue_to_fifo
;
773 struct iwl_rxon_context
*ctx
;
779 spin_lock_irqsave(&priv
->lock
, flags
);
781 priv
->scd_base_addr
= iwl_read_prph(priv
, SCD_SRAM_BASE_ADDR
);
782 a
= priv
->scd_base_addr
+ SCD_CONTEXT_MEM_LOWER_BOUND
;
783 /* reset conext data memory */
784 for (; a
< priv
->scd_base_addr
+ SCD_CONTEXT_MEM_UPPER_BOUND
;
786 iwl_write_targ_mem(priv
, a
, 0);
787 /* reset tx status memory */
788 for (; a
< priv
->scd_base_addr
+ SCD_TX_STTS_MEM_UPPER_BOUND
;
790 iwl_write_targ_mem(priv
, a
, 0);
791 for (; a
< priv
->scd_base_addr
+
792 SCD_TRANS_TBL_OFFSET_QUEUE(priv
->hw_params
.max_txq_num
); a
+= 4)
793 iwl_write_targ_mem(priv
, a
, 0);
795 iwl_write_prph(priv
, SCD_DRAM_BASE_ADDR
,
796 priv
->scd_bc_tbls
.dma
>> 10);
798 /* Enable DMA channel */
799 for (chan
= 0; chan
< FH_TCSR_CHNL_NUM
; chan
++)
800 iwl_write_direct32(priv
, FH_TCSR_CHNL_TX_CONFIG_REG(chan
),
801 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE
|
802 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE
);
804 /* Update FH chicken bits */
805 reg_val
= iwl_read_direct32(priv
, FH_TX_CHICKEN_BITS_REG
);
806 iwl_write_direct32(priv
, FH_TX_CHICKEN_BITS_REG
,
807 reg_val
| FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN
);
809 iwl_write_prph(priv
, SCD_QUEUECHAIN_SEL
,
810 SCD_QUEUECHAIN_SEL_ALL(priv
));
811 iwl_write_prph(priv
, SCD_AGGR_SEL
, 0);
813 /* initiate the queues */
814 for (i
= 0; i
< priv
->hw_params
.max_txq_num
; i
++) {
815 iwl_write_prph(priv
, SCD_QUEUE_RDPTR(i
), 0);
816 iwl_write_direct32(priv
, HBUS_TARG_WRPTR
, 0 | (i
<< 8));
817 iwl_write_targ_mem(priv
, priv
->scd_base_addr
+
818 SCD_CONTEXT_QUEUE_OFFSET(i
), 0);
819 iwl_write_targ_mem(priv
, priv
->scd_base_addr
+
820 SCD_CONTEXT_QUEUE_OFFSET(i
) +
823 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS
) &
824 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK
) |
826 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS
) &
827 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK
));
830 iwl_write_prph(priv
, SCD_INTERRUPT_MASK
,
831 IWL_MASK(0, priv
->hw_params
.max_txq_num
));
833 /* Activate all Tx DMA/FIFO channels */
834 iwl_trans_txq_set_sched(priv
, IWL_MASK(0, 7));
836 /* map queues to FIFOs */
837 if (priv
->valid_contexts
!= BIT(IWL_RXON_CTX_BSS
))
838 queue_to_fifo
= iwlagn_ipan_queue_to_tx_fifo
;
840 queue_to_fifo
= iwlagn_default_queue_to_tx_fifo
;
842 iwl_trans_set_wr_ptrs(priv
, priv
->cmd_queue
, 0);
844 /* make sure all queue are not stopped */
845 memset(&priv
->queue_stopped
[0], 0, sizeof(priv
->queue_stopped
));
846 for (i
= 0; i
< 4; i
++)
847 atomic_set(&priv
->queue_stop_count
[i
], 0);
848 for_each_context(priv
, ctx
)
849 ctx
->last_tx_rejected
= false;
851 /* reset to 0 to enable all the queue first */
852 priv
->txq_ctx_active_msk
= 0;
854 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo
) !=
855 IWLAGN_FIRST_AMPDU_QUEUE
);
856 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo
) !=
857 IWLAGN_FIRST_AMPDU_QUEUE
);
859 for (i
= 0; i
< IWLAGN_FIRST_AMPDU_QUEUE
; i
++) {
860 int fifo
= queue_to_fifo
[i
].fifo
;
861 int ac
= queue_to_fifo
[i
].ac
;
863 iwl_txq_ctx_activate(priv
, i
);
865 if (fifo
== IWL_TX_FIFO_UNUSED
)
868 if (ac
!= IWL_AC_UNSET
)
869 iwl_set_swq_id(&priv
->txq
[i
], ac
, i
);
870 iwl_trans_tx_queue_set_status(priv
, &priv
->txq
[i
], fifo
, 0);
873 spin_unlock_irqrestore(&priv
->lock
, flags
);
875 /* Enable L1-Active */
876 iwl_clear_bits_prph(priv
, APMG_PCIDEV_STT_REG
,
877 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
881 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
883 static int iwl_trans_tx_stop(struct iwl_priv
*priv
)
888 /* Turn off all Tx DMA fifos */
889 spin_lock_irqsave(&priv
->lock
, flags
);
891 iwl_trans_txq_set_sched(priv
, 0);
893 /* Stop each Tx DMA channel, and wait for it to be idle */
894 for (ch
= 0; ch
< FH_TCSR_CHNL_NUM
; ch
++) {
895 iwl_write_direct32(priv
, FH_TCSR_CHNL_TX_CONFIG_REG(ch
), 0x0);
896 if (iwl_poll_direct_bit(priv
, FH_TSSR_TX_STATUS_REG
,
897 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch
),
899 IWL_ERR(priv
, "Failing on timeout while stopping"
900 " DMA channel %d [0x%08x]", ch
,
901 iwl_read_direct32(priv
, FH_TSSR_TX_STATUS_REG
));
903 spin_unlock_irqrestore(&priv
->lock
, flags
);
906 IWL_WARN(priv
, "Stopping tx queues that aren't allocated...");
910 /* Unmap DMA from host system and free skb's */
911 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++)
912 iwl_tx_queue_unmap(priv
, txq_id
);
917 static void iwl_trans_stop_device(struct iwl_priv
*priv
)
921 /* stop and reset the on-board processor */
922 iwl_write32(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_NEVO_RESET
);
924 /* tell the device to stop sending interrupts */
925 spin_lock_irqsave(&priv
->lock
, flags
);
926 iwl_disable_interrupts(priv
);
927 spin_unlock_irqrestore(&priv
->lock
, flags
);
928 trans_sync_irq(&priv
->trans
);
930 /* device going down, Stop using ICT table */
931 iwl_disable_ict(priv
);
934 * If a HW restart happens during firmware loading,
935 * then the firmware loading might call this function
936 * and later it might be called again due to the
937 * restart. So don't process again if the device is
940 if (test_bit(STATUS_DEVICE_ENABLED
, &priv
->status
)) {
941 iwl_trans_tx_stop(priv
);
942 iwl_trans_rx_stop(priv
);
944 /* Power-down device's busmaster DMA clocks */
945 iwl_write_prph(priv
, APMG_CLK_DIS_REG
,
946 APMG_CLK_VAL_DMA_CLK_RQT
);
950 /* Make sure (redundant) we've released our request to stay awake */
951 iwl_clear_bit(priv
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
953 /* Stop the device, and put it in low power state */
957 static struct iwl_tx_cmd
*iwl_trans_get_tx_cmd(struct iwl_priv
*priv
,
960 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
961 struct iwl_queue
*q
= &txq
->q
;
962 struct iwl_device_cmd
*dev_cmd
;
964 if (unlikely(iwl_queue_space(q
) < q
->high_mark
))
968 * Set up the Tx-command (not MAC!) header.
969 * Store the chosen Tx queue and TFD index within the sequence field;
970 * after Tx, uCode's Tx response will return this value so driver can
971 * locate the frame within the tx queue and do post-tx processing.
973 dev_cmd
= txq
->cmd
[q
->write_ptr
];
974 memset(dev_cmd
, 0, sizeof(*dev_cmd
));
975 dev_cmd
->hdr
.cmd
= REPLY_TX
;
976 dev_cmd
->hdr
.sequence
= cpu_to_le16((u16
)(QUEUE_TO_SEQ(txq_id
) |
977 INDEX_TO_SEQ(q
->write_ptr
)));
978 return &dev_cmd
->cmd
.tx
;
981 static int iwl_trans_tx(struct iwl_priv
*priv
, struct sk_buff
*skb
,
982 struct iwl_tx_cmd
*tx_cmd
, int txq_id
, __le16 fc
, bool ampdu
,
983 struct iwl_rxon_context
*ctx
)
985 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
986 struct iwl_queue
*q
= &txq
->q
;
987 struct iwl_device_cmd
*dev_cmd
= txq
->cmd
[q
->write_ptr
];
988 struct iwl_cmd_meta
*out_meta
;
990 dma_addr_t phys_addr
= 0;
991 dma_addr_t txcmd_phys
;
992 dma_addr_t scratch_phys
;
993 u16 len
, firstlen
, secondlen
;
994 u8 wait_write_ptr
= 0;
995 u8 hdr_len
= ieee80211_hdrlen(fc
);
997 /* Set up driver data for this TFD */
998 memset(&(txq
->txb
[q
->write_ptr
]), 0, sizeof(struct iwl_tx_info
));
999 txq
->txb
[q
->write_ptr
].skb
= skb
;
1000 txq
->txb
[q
->write_ptr
].ctx
= ctx
;
1002 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1003 out_meta
= &txq
->meta
[q
->write_ptr
];
1006 * Use the first empty entry in this queue's command buffer array
1007 * to contain the Tx command and MAC header concatenated together
1008 * (payload data will be in another buffer).
1009 * Size of this varies, due to varying MAC header length.
1010 * If end is not dword aligned, we'll have 2 extra bytes at the end
1011 * of the MAC header (device reads on dword boundaries).
1012 * We'll tell device about this padding later.
1014 len
= sizeof(struct iwl_tx_cmd
) +
1015 sizeof(struct iwl_cmd_header
) + hdr_len
;
1016 firstlen
= (len
+ 3) & ~3;
1018 /* Tell NIC about any 2-byte padding after MAC header */
1019 if (firstlen
!= len
)
1020 tx_cmd
->tx_flags
|= TX_CMD_FLG_MH_PAD_MSK
;
1022 /* Physical address of this Tx command's header (not MAC header!),
1023 * within command buffer array. */
1024 txcmd_phys
= dma_map_single(priv
->bus
->dev
,
1025 &dev_cmd
->hdr
, firstlen
,
1027 if (unlikely(dma_mapping_error(priv
->bus
->dev
, txcmd_phys
)))
1029 dma_unmap_addr_set(out_meta
, mapping
, txcmd_phys
);
1030 dma_unmap_len_set(out_meta
, len
, firstlen
);
1032 if (!ieee80211_has_morefrags(fc
)) {
1033 txq
->need_update
= 1;
1036 txq
->need_update
= 0;
1039 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1040 * if any (802.11 null frames have no payload). */
1041 secondlen
= skb
->len
- hdr_len
;
1042 if (secondlen
> 0) {
1043 phys_addr
= dma_map_single(priv
->bus
->dev
, skb
->data
+ hdr_len
,
1044 secondlen
, DMA_TO_DEVICE
);
1045 if (unlikely(dma_mapping_error(priv
->bus
->dev
, phys_addr
))) {
1046 dma_unmap_single(priv
->bus
->dev
,
1047 dma_unmap_addr(out_meta
, mapping
),
1048 dma_unmap_len(out_meta
, len
),
1054 /* Attach buffers to TFD */
1055 iwlagn_txq_attach_buf_to_tfd(priv
, txq
, txcmd_phys
, firstlen
, 1);
1057 iwlagn_txq_attach_buf_to_tfd(priv
, txq
, phys_addr
,
1060 scratch_phys
= txcmd_phys
+ sizeof(struct iwl_cmd_header
) +
1061 offsetof(struct iwl_tx_cmd
, scratch
);
1063 /* take back ownership of DMA buffer to enable update */
1064 dma_sync_single_for_cpu(priv
->bus
->dev
, txcmd_phys
, firstlen
,
1066 tx_cmd
->dram_lsb_ptr
= cpu_to_le32(scratch_phys
);
1067 tx_cmd
->dram_msb_ptr
= iwl_get_dma_hi_addr(scratch_phys
);
1069 IWL_DEBUG_TX(priv
, "sequence nr = 0X%x\n",
1070 le16_to_cpu(dev_cmd
->hdr
.sequence
));
1071 IWL_DEBUG_TX(priv
, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd
->tx_flags
));
1072 iwl_print_hex_dump(priv
, IWL_DL_TX
, (u8
*)tx_cmd
, sizeof(*tx_cmd
));
1073 iwl_print_hex_dump(priv
, IWL_DL_TX
, (u8
*)tx_cmd
->hdr
, hdr_len
);
1075 /* Set up entry for this TFD in Tx byte-count array */
1077 iwl_trans_txq_update_byte_cnt_tbl(priv
, txq
,
1078 le16_to_cpu(tx_cmd
->len
));
1080 dma_sync_single_for_device(priv
->bus
->dev
, txcmd_phys
, firstlen
,
1083 trace_iwlwifi_dev_tx(priv
,
1084 &((struct iwl_tfd
*)txq
->tfds
)[txq
->q
.write_ptr
],
1085 sizeof(struct iwl_tfd
),
1086 &dev_cmd
->hdr
, firstlen
,
1087 skb
->data
+ hdr_len
, secondlen
);
1089 /* Tell device the write index *just past* this latest filled TFD */
1090 q
->write_ptr
= iwl_queue_inc_wrap(q
->write_ptr
, q
->n_bd
);
1091 iwl_txq_update_write_ptr(priv
, txq
);
1094 * At this point the frame is "transmitted" successfully
1095 * and we will get a TX status notification eventually,
1096 * regardless of the value of ret. "ret" only indicates
1097 * whether or not we should update the write pointer.
1099 if ((iwl_queue_space(q
) < q
->high_mark
) && priv
->mac80211_registered
) {
1100 if (wait_write_ptr
) {
1101 txq
->need_update
= 1;
1102 iwl_txq_update_write_ptr(priv
, txq
);
1104 iwl_stop_queue(priv
, txq
);
1110 static void iwl_trans_kick_nic(struct iwl_priv
*priv
)
1112 /* Remove all resets to allow NIC to operate */
1113 iwl_write32(priv
, CSR_RESET
, 0);
1116 static void iwl_trans_sync_irq(struct iwl_priv
*priv
)
1118 /* wait to make sure we flush pending tasklet*/
1119 synchronize_irq(priv
->bus
->irq
);
1120 tasklet_kill(&priv
->irq_tasklet
);
1123 static void iwl_trans_free(struct iwl_priv
*priv
)
1125 free_irq(priv
->bus
->irq
, priv
);
1126 iwl_free_isr_ict(priv
);
1129 static const struct iwl_trans_ops trans_ops
= {
1130 .start_device
= iwl_trans_start_device
,
1131 .prepare_card_hw
= iwl_trans_prepare_card_hw
,
1132 .stop_device
= iwl_trans_stop_device
,
1134 .tx_start
= iwl_trans_tx_start
,
1136 .rx_free
= iwl_trans_rx_free
,
1137 .tx_free
= iwl_trans_tx_free
,
1139 .send_cmd
= iwl_send_cmd
,
1140 .send_cmd_pdu
= iwl_send_cmd_pdu
,
1142 .get_tx_cmd
= iwl_trans_get_tx_cmd
,
1145 .txq_agg_disable
= iwl_trans_txq_agg_disable
,
1146 .txq_agg_setup
= iwl_trans_txq_agg_setup
,
1148 .kick_nic
= iwl_trans_kick_nic
,
1150 .sync_irq
= iwl_trans_sync_irq
,
1151 .free
= iwl_trans_free
,
1154 int iwl_trans_register(struct iwl_trans
*trans
, struct iwl_priv
*priv
)
1158 priv
->trans
.ops
= &trans_ops
;
1159 priv
->trans
.priv
= priv
;
1161 tasklet_init(&priv
->irq_tasklet
, (void (*)(unsigned long))
1162 iwl_irq_tasklet
, (unsigned long)priv
);
1164 iwl_alloc_isr_ict(priv
);
1166 err
= request_irq(priv
->bus
->irq
, iwl_isr_ict
, IRQF_SHARED
,
1169 IWL_ERR(priv
, "Error allocating IRQ %d\n", priv
->bus
->irq
);
1170 iwl_free_isr_ict(priv
);
1174 INIT_WORK(&priv
->rx_replenish
, iwl_bg_rx_replenish
);