1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *****************************************************************************/
67 #include <linux/pci.h>
68 #include <linux/pci-aspm.h>
69 #include <linux/interrupt.h>
70 #include <linux/debugfs.h>
71 #include <linux/sched.h>
72 #include <linux/bitops.h>
73 #include <linux/gfp.h>
74 #include <linux/vmalloc.h>
75 #include <linux/pm_runtime.h>
78 #include "iwl-trans.h"
82 #include "iwl-agn-hw.h"
83 #include "iwl-fw-error-dump.h"
87 /* extended range in FW SRAM */
88 #define IWL_FW_MEM_EXTENDED_START 0x40000
89 #define IWL_FW_MEM_EXTENDED_END 0x57FFF
91 static void iwl_pcie_free_fw_monitor(struct iwl_trans
*trans
)
93 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
95 if (!trans_pcie
->fw_mon_page
)
98 dma_unmap_page(trans
->dev
, trans_pcie
->fw_mon_phys
,
99 trans_pcie
->fw_mon_size
, DMA_FROM_DEVICE
);
100 __free_pages(trans_pcie
->fw_mon_page
,
101 get_order(trans_pcie
->fw_mon_size
));
102 trans_pcie
->fw_mon_page
= NULL
;
103 trans_pcie
->fw_mon_phys
= 0;
104 trans_pcie
->fw_mon_size
= 0;
107 static void iwl_pcie_alloc_fw_monitor(struct iwl_trans
*trans
, u8 max_power
)
109 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
110 struct page
*page
= NULL
;
116 /* default max_power is maximum */
122 if (WARN(max_power
> 26,
123 "External buffer size for monitor is too big %d, check the FW TLV\n",
127 if (trans_pcie
->fw_mon_page
) {
128 dma_sync_single_for_device(trans
->dev
, trans_pcie
->fw_mon_phys
,
129 trans_pcie
->fw_mon_size
,
135 for (power
= max_power
; power
>= 11; power
--) {
139 order
= get_order(size
);
140 page
= alloc_pages(__GFP_COMP
| __GFP_NOWARN
| __GFP_ZERO
,
145 phys
= dma_map_page(trans
->dev
, page
, 0, PAGE_SIZE
<< order
,
147 if (dma_mapping_error(trans
->dev
, phys
)) {
148 __free_pages(page
, order
);
153 "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
158 if (WARN_ON_ONCE(!page
))
161 if (power
!= max_power
)
163 "Sorry - debug buffer is only %luK while you requested %luK\n",
164 (unsigned long)BIT(power
- 10),
165 (unsigned long)BIT(max_power
- 10));
167 trans_pcie
->fw_mon_page
= page
;
168 trans_pcie
->fw_mon_phys
= phys
;
169 trans_pcie
->fw_mon_size
= size
;
172 static u32
iwl_trans_pcie_read_shr(struct iwl_trans
*trans
, u32 reg
)
174 iwl_write32(trans
, HEEP_CTRL_WRD_PCIEX_CTRL_REG
,
175 ((reg
& 0x0000ffff) | (2 << 28)));
176 return iwl_read32(trans
, HEEP_CTRL_WRD_PCIEX_DATA_REG
);
179 static void iwl_trans_pcie_write_shr(struct iwl_trans
*trans
, u32 reg
, u32 val
)
181 iwl_write32(trans
, HEEP_CTRL_WRD_PCIEX_DATA_REG
, val
);
182 iwl_write32(trans
, HEEP_CTRL_WRD_PCIEX_CTRL_REG
,
183 ((reg
& 0x0000ffff) | (3 << 28)));
186 static void iwl_pcie_set_pwr(struct iwl_trans
*trans
, bool vaux
)
188 if (trans
->cfg
->apmg_not_supported
)
191 if (vaux
&& pci_pme_capable(to_pci_dev(trans
->dev
), PCI_D3cold
))
192 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
193 APMG_PS_CTRL_VAL_PWR_SRC_VAUX
,
194 ~APMG_PS_CTRL_MSK_PWR_SRC
);
196 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
197 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN
,
198 ~APMG_PS_CTRL_MSK_PWR_SRC
);
202 #define PCI_CFG_RETRY_TIMEOUT 0x041
204 void iwl_pcie_apm_config(struct iwl_trans
*trans
)
206 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
211 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
212 * Check if BIOS (or OS) enabled L1-ASPM on this device.
213 * If so (likely), disable L0S, so device moves directly L0->L1;
214 * costs negligible amount of power savings.
215 * If not (unlikely), enable L0S, so there is at least some
216 * power savings, even without L1.
218 pcie_capability_read_word(trans_pcie
->pci_dev
, PCI_EXP_LNKCTL
, &lctl
);
219 if (lctl
& PCI_EXP_LNKCTL_ASPM_L1
)
220 iwl_set_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
222 iwl_clear_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
223 trans
->pm_support
= !(lctl
& PCI_EXP_LNKCTL_ASPM_L0S
);
225 pcie_capability_read_word(trans_pcie
->pci_dev
, PCI_EXP_DEVCTL2
, &cap
);
226 trans
->ltr_enabled
= cap
& PCI_EXP_DEVCTL2_LTR_EN
;
227 dev_info(trans
->dev
, "L1 %sabled - LTR %sabled\n",
228 (lctl
& PCI_EXP_LNKCTL_ASPM_L1
) ? "En" : "Dis",
229 trans
->ltr_enabled
? "En" : "Dis");
233 * Start up NIC's basic functionality after it has been reset
234 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
235 * NOTE: This does not load uCode nor start the embedded processor
237 static int iwl_pcie_apm_init(struct iwl_trans
*trans
)
240 IWL_DEBUG_INFO(trans
, "Init card's basic functions\n");
243 * Use "set_bit" below rather than "write", to preserve any hardware
244 * bits already set by default after reset.
247 /* Disable L0S exit timer (platform NMI Work/Around) */
248 if (trans
->cfg
->device_family
< IWL_DEVICE_FAMILY_8000
)
249 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
250 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER
);
253 * Disable L0s without affecting L1;
254 * don't wait for ICH L0s (ICH bug W/A)
256 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
257 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX
);
259 /* Set FH wait threshold to maximum (HW error during stress W/A) */
260 iwl_set_bit(trans
, CSR_DBG_HPET_MEM_REG
, CSR_DBG_HPET_MEM_REG_VAL
);
263 * Enable HAP INTA (interrupt from management bus) to
264 * wake device's PCI Express link L1a -> L0s
266 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
267 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A
);
269 iwl_pcie_apm_config(trans
);
271 /* Configure analog phase-lock-loop before activating to D0A */
272 if (trans
->cfg
->base_params
->pll_cfg
)
273 iwl_set_bit(trans
, CSR_ANA_PLL_CFG
, CSR50_ANA_PLL_CFG_VAL
);
276 * Set "initialization complete" bit to move adapter from
277 * D0U* --> D0A* (powered-up active) state.
279 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
282 * Wait for clock stabilization; once stabilized, access to
283 * device-internal resources is supported, e.g. iwl_write_prph()
284 * and accesses to uCode SRAM.
286 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
287 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
288 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
, 25000);
290 IWL_DEBUG_INFO(trans
, "Failed to init the card\n");
294 if (trans
->cfg
->host_interrupt_operation_mode
) {
296 * This is a bit of an abuse - This is needed for 7260 / 3160
297 * only check host_interrupt_operation_mode even if this is
298 * not related to host_interrupt_operation_mode.
300 * Enable the oscillator to count wake up time for L1 exit. This
301 * consumes slightly more power (100uA) - but allows to be sure
302 * that we wake up from L1 on time.
304 * This looks weird: read twice the same register, discard the
305 * value, set a bit, and yet again, read that same register
306 * just to discard the value. But that's the way the hardware
309 iwl_read_prph(trans
, OSC_CLK
);
310 iwl_read_prph(trans
, OSC_CLK
);
311 iwl_set_bits_prph(trans
, OSC_CLK
, OSC_CLK_FORCE_CONTROL
);
312 iwl_read_prph(trans
, OSC_CLK
);
313 iwl_read_prph(trans
, OSC_CLK
);
317 * Enable DMA clock and wait for it to stabilize.
319 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
320 * bits do not disable clocks. This preserves any hardware
321 * bits already set by default in "CLK_CTRL_REG" after reset.
323 if (!trans
->cfg
->apmg_not_supported
) {
324 iwl_write_prph(trans
, APMG_CLK_EN_REG
,
325 APMG_CLK_VAL_DMA_CLK_RQT
);
328 /* Disable L1-Active */
329 iwl_set_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
330 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
332 /* Clear the interrupt in APMG if the NIC is in RFKILL */
333 iwl_write_prph(trans
, APMG_RTC_INT_STT_REG
,
334 APMG_RTC_INT_STT_RFKILL
);
337 set_bit(STATUS_DEVICE_ENABLED
, &trans
->status
);
344 * Enable LP XTAL to avoid HW bug where device may consume much power if
345 * FW is not loaded after device reset. LP XTAL is disabled by default
346 * after device HW reset. Do it only if XTAL is fed by internal source.
347 * Configure device's "persistence" mode to avoid resetting XTAL again when
348 * SHRD_HW_RST occurs in S3.
350 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans
*trans
)
354 u32 apmg_xtal_cfg_reg
;
358 __iwl_trans_pcie_set_bit(trans
, CSR_GP_CNTRL
,
359 CSR_GP_CNTRL_REG_FLAG_XTAL_ON
);
361 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
362 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
363 usleep_range(1000, 2000);
366 * Set "initialization complete" bit to move adapter from
367 * D0U* --> D0A* (powered-up active) state.
369 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
372 * Wait for clock stabilization; once stabilized, access to
373 * device-internal resources is possible.
375 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
376 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
377 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
379 if (WARN_ON(ret
< 0)) {
380 IWL_ERR(trans
, "Access time out - failed to enable LP XTAL\n");
381 /* Release XTAL ON request */
382 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
383 CSR_GP_CNTRL_REG_FLAG_XTAL_ON
);
388 * Clear "disable persistence" to avoid LP XTAL resetting when
389 * SHRD_HW_RST is applied in S3.
391 iwl_clear_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
392 APMG_PCIDEV_STT_VAL_PERSIST_DIS
);
395 * Force APMG XTAL to be active to prevent its disabling by HW
396 * caused by APMG idle state.
398 apmg_xtal_cfg_reg
= iwl_trans_pcie_read_shr(trans
,
399 SHR_APMG_XTAL_CFG_REG
);
400 iwl_trans_pcie_write_shr(trans
, SHR_APMG_XTAL_CFG_REG
,
402 SHR_APMG_XTAL_CFG_XTAL_ON_REQ
);
405 * Reset entire device again - do controller reset (results in
406 * SHRD_HW_RST). Turn MAC off before proceeding.
408 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
409 usleep_range(1000, 2000);
411 /* Enable LP XTAL by indirect access through CSR */
412 apmg_gp1_reg
= iwl_trans_pcie_read_shr(trans
, SHR_APMG_GP1_REG
);
413 iwl_trans_pcie_write_shr(trans
, SHR_APMG_GP1_REG
, apmg_gp1_reg
|
414 SHR_APMG_GP1_WF_XTAL_LP_EN
|
415 SHR_APMG_GP1_CHICKEN_BIT_SELECT
);
417 /* Clear delay line clock power up */
418 dl_cfg_reg
= iwl_trans_pcie_read_shr(trans
, SHR_APMG_DL_CFG_REG
);
419 iwl_trans_pcie_write_shr(trans
, SHR_APMG_DL_CFG_REG
, dl_cfg_reg
&
420 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP
);
423 * Enable persistence mode to avoid LP XTAL resetting when
424 * SHRD_HW_RST is applied in S3.
426 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
427 CSR_HW_IF_CONFIG_REG_PERSIST_MODE
);
430 * Clear "initialization complete" bit to move adapter from
431 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
433 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
434 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
436 /* Activates XTAL resources monitor */
437 __iwl_trans_pcie_set_bit(trans
, CSR_MONITOR_CFG_REG
,
438 CSR_MONITOR_XTAL_RESOURCES
);
440 /* Release XTAL ON request */
441 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
442 CSR_GP_CNTRL_REG_FLAG_XTAL_ON
);
445 /* Release APMG XTAL */
446 iwl_trans_pcie_write_shr(trans
, SHR_APMG_XTAL_CFG_REG
,
448 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ
);
451 int iwl_pcie_apm_stop_master(struct iwl_trans
*trans
)
455 /* stop device's busmaster DMA activity */
456 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_STOP_MASTER
);
458 ret
= iwl_poll_bit(trans
, CSR_RESET
,
459 CSR_RESET_REG_FLAG_MASTER_DISABLED
,
460 CSR_RESET_REG_FLAG_MASTER_DISABLED
, 100);
462 IWL_WARN(trans
, "Master Disable Timed Out, 100 usec\n");
464 IWL_DEBUG_INFO(trans
, "stop master\n");
469 static void iwl_pcie_apm_stop(struct iwl_trans
*trans
, bool op_mode_leave
)
471 IWL_DEBUG_INFO(trans
, "Stop card, put in low power state\n");
474 if (!test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
))
475 iwl_pcie_apm_init(trans
);
477 /* inform ME that we are leaving */
478 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_7000
)
479 iwl_set_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
480 APMG_PCIDEV_STT_VAL_WAKE_ME
);
481 else if (trans
->cfg
->device_family
>= IWL_DEVICE_FAMILY_8000
) {
482 iwl_set_bit(trans
, CSR_DBG_LINK_PWR_MGMT_REG
,
483 CSR_RESET_LINK_PWR_MGMT_DISABLED
);
484 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
485 CSR_HW_IF_CONFIG_REG_PREPARE
|
486 CSR_HW_IF_CONFIG_REG_ENABLE_PME
);
488 iwl_clear_bit(trans
, CSR_DBG_LINK_PWR_MGMT_REG
,
489 CSR_RESET_LINK_PWR_MGMT_DISABLED
);
494 clear_bit(STATUS_DEVICE_ENABLED
, &trans
->status
);
496 /* Stop device's DMA activity */
497 iwl_pcie_apm_stop_master(trans
);
499 if (trans
->cfg
->lp_xtal_workaround
) {
500 iwl_pcie_apm_lp_xtal_enable(trans
);
504 /* Reset the entire device */
505 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
506 usleep_range(1000, 2000);
509 * Clear "initialization complete" bit to move adapter from
510 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
512 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
513 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
516 static int iwl_pcie_nic_init(struct iwl_trans
*trans
)
518 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
521 spin_lock(&trans_pcie
->irq_lock
);
522 iwl_pcie_apm_init(trans
);
524 spin_unlock(&trans_pcie
->irq_lock
);
526 iwl_pcie_set_pwr(trans
, false);
528 iwl_op_mode_nic_config(trans
->op_mode
);
530 /* Allocate the RX queue, or reset if it is already allocated */
531 iwl_pcie_rx_init(trans
);
533 /* Allocate or reset and init all Tx and Command queues */
534 if (iwl_pcie_tx_init(trans
))
537 if (trans
->cfg
->base_params
->shadow_reg_enable
) {
538 /* enable shadow regs in HW */
539 iwl_set_bit(trans
, CSR_MAC_SHADOW_REG_CTRL
, 0x800FFFFF);
540 IWL_DEBUG_INFO(trans
, "Enabling shadow registers in device\n");
546 #define HW_READY_TIMEOUT (50)
548 /* Note: returns poll_bit return value, which is >= 0 if success */
549 static int iwl_pcie_set_hw_ready(struct iwl_trans
*trans
)
553 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
554 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
);
556 /* See if we got it */
557 ret
= iwl_poll_bit(trans
, CSR_HW_IF_CONFIG_REG
,
558 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
559 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
563 iwl_set_bit(trans
, CSR_MBOX_SET_REG
, CSR_MBOX_SET_REG_OS_ALIVE
);
565 IWL_DEBUG_INFO(trans
, "hardware%s ready\n", ret
< 0 ? " not" : "");
569 /* Note: returns standard 0/-ERROR code */
570 int iwl_pcie_prepare_card_hw(struct iwl_trans
*trans
)
576 IWL_DEBUG_INFO(trans
, "iwl_trans_prepare_card_hw enter\n");
578 ret
= iwl_pcie_set_hw_ready(trans
);
579 /* If the card is ready, exit 0 */
583 iwl_set_bit(trans
, CSR_DBG_LINK_PWR_MGMT_REG
,
584 CSR_RESET_LINK_PWR_MGMT_DISABLED
);
585 usleep_range(1000, 2000);
587 for (iter
= 0; iter
< 10; iter
++) {
588 /* If HW is not ready, prepare the conditions to check again */
589 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
590 CSR_HW_IF_CONFIG_REG_PREPARE
);
593 ret
= iwl_pcie_set_hw_ready(trans
);
597 usleep_range(200, 1000);
599 } while (t
< 150000);
603 IWL_ERR(trans
, "Couldn't prepare the card\n");
611 static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans
*trans
,
612 u32 dst_addr
, dma_addr_t phy_addr
,
615 iwl_write32(trans
, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
616 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE
);
618 iwl_write32(trans
, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL
),
621 iwl_write32(trans
, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL
),
622 phy_addr
& FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK
);
624 iwl_write32(trans
, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL
),
625 (iwl_get_dma_hi_addr(phy_addr
)
626 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT
) | byte_cnt
);
628 iwl_write32(trans
, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL
),
629 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM
) |
630 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX
) |
631 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID
);
633 iwl_write32(trans
, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
634 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE
|
635 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE
|
636 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD
);
639 static int iwl_pcie_load_firmware_chunk(struct iwl_trans
*trans
,
640 u32 dst_addr
, dma_addr_t phy_addr
,
643 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
647 trans_pcie
->ucode_write_complete
= false;
649 if (!iwl_trans_grab_nic_access(trans
, &flags
))
652 iwl_pcie_load_firmware_chunk_fh(trans
, dst_addr
, phy_addr
,
654 iwl_trans_release_nic_access(trans
, &flags
);
656 ret
= wait_event_timeout(trans_pcie
->ucode_write_waitq
,
657 trans_pcie
->ucode_write_complete
, 5 * HZ
);
659 IWL_ERR(trans
, "Failed to load firmware chunk!\n");
666 static int iwl_pcie_load_section(struct iwl_trans
*trans
, u8 section_num
,
667 const struct fw_desc
*section
)
671 u32 offset
, chunk_sz
= min_t(u32
, FH_MEM_TB_MAX_LENGTH
, section
->len
);
674 IWL_DEBUG_FW(trans
, "[%d] uCode section being loaded...\n",
677 v_addr
= dma_alloc_coherent(trans
->dev
, chunk_sz
, &p_addr
,
678 GFP_KERNEL
| __GFP_NOWARN
);
680 IWL_DEBUG_INFO(trans
, "Falling back to small chunks of DMA\n");
681 chunk_sz
= PAGE_SIZE
;
682 v_addr
= dma_alloc_coherent(trans
->dev
, chunk_sz
,
683 &p_addr
, GFP_KERNEL
);
688 for (offset
= 0; offset
< section
->len
; offset
+= chunk_sz
) {
689 u32 copy_size
, dst_addr
;
690 bool extended_addr
= false;
692 copy_size
= min_t(u32
, chunk_sz
, section
->len
- offset
);
693 dst_addr
= section
->offset
+ offset
;
695 if (dst_addr
>= IWL_FW_MEM_EXTENDED_START
&&
696 dst_addr
<= IWL_FW_MEM_EXTENDED_END
)
697 extended_addr
= true;
700 iwl_set_bits_prph(trans
, LMPM_CHICK
,
701 LMPM_CHICK_EXTENDED_ADDR_SPACE
);
703 memcpy(v_addr
, (u8
*)section
->data
+ offset
, copy_size
);
704 ret
= iwl_pcie_load_firmware_chunk(trans
, dst_addr
, p_addr
,
708 iwl_clear_bits_prph(trans
, LMPM_CHICK
,
709 LMPM_CHICK_EXTENDED_ADDR_SPACE
);
713 "Could not load the [%d] uCode section\n",
719 dma_free_coherent(trans
->dev
, chunk_sz
, v_addr
, p_addr
);
723 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans
*trans
,
724 const struct fw_img
*image
,
726 int *first_ucode_section
)
729 int i
, ret
= 0, sec_num
= 0x1;
730 u32 val
, last_read_idx
= 0;
734 *first_ucode_section
= 0;
737 (*first_ucode_section
)++;
740 for (i
= *first_ucode_section
; i
< image
->num_sec
; i
++) {
744 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
746 * PAGING_SEPARATOR_SECTION delimiter - separate between
747 * CPU2 non paged to CPU2 paging sec.
749 if (!image
->sec
[i
].data
||
750 image
->sec
[i
].offset
== CPU1_CPU2_SEPARATOR_SECTION
||
751 image
->sec
[i
].offset
== PAGING_SEPARATOR_SECTION
) {
753 "Break since Data not valid or Empty section, sec = %d\n",
758 ret
= iwl_pcie_load_section(trans
, i
, &image
->sec
[i
]);
762 /* Notify ucode of loaded section number and status */
763 val
= iwl_read_direct32(trans
, FH_UCODE_LOAD_STATUS
);
764 val
= val
| (sec_num
<< shift_param
);
765 iwl_write_direct32(trans
, FH_UCODE_LOAD_STATUS
, val
);
767 sec_num
= (sec_num
<< 1) | 0x1;
770 *first_ucode_section
= last_read_idx
;
772 iwl_enable_interrupts(trans
);
774 if (trans
->cfg
->use_tfh
) {
776 iwl_write_prph(trans
, UREG_UCODE_LOAD_STATUS
,
779 iwl_write_prph(trans
, UREG_UCODE_LOAD_STATUS
,
783 iwl_write_direct32(trans
, FH_UCODE_LOAD_STATUS
,
786 iwl_write_direct32(trans
, FH_UCODE_LOAD_STATUS
,
793 static int iwl_pcie_load_cpu_sections(struct iwl_trans
*trans
,
794 const struct fw_img
*image
,
796 int *first_ucode_section
)
799 u32 last_read_idx
= 0;
802 *first_ucode_section
= 0;
804 (*first_ucode_section
)++;
806 for (i
= *first_ucode_section
; i
< image
->num_sec
; i
++) {
810 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
812 * PAGING_SEPARATOR_SECTION delimiter - separate between
813 * CPU2 non paged to CPU2 paging sec.
815 if (!image
->sec
[i
].data
||
816 image
->sec
[i
].offset
== CPU1_CPU2_SEPARATOR_SECTION
||
817 image
->sec
[i
].offset
== PAGING_SEPARATOR_SECTION
) {
819 "Break since Data not valid or Empty section, sec = %d\n",
824 ret
= iwl_pcie_load_section(trans
, i
, &image
->sec
[i
]);
829 *first_ucode_section
= last_read_idx
;
834 void iwl_pcie_apply_destination(struct iwl_trans
*trans
)
836 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
837 const struct iwl_fw_dbg_dest_tlv
*dest
= trans
->dbg_dest_tlv
;
842 "DBG DEST version is %d - expect issues\n",
845 IWL_INFO(trans
, "Applying debug destination %s\n",
846 get_fw_dbg_mode_string(dest
->monitor_mode
));
848 if (dest
->monitor_mode
== EXTERNAL_MODE
)
849 iwl_pcie_alloc_fw_monitor(trans
, dest
->size_power
);
851 IWL_WARN(trans
, "PCI should have external buffer debug\n");
853 for (i
= 0; i
< trans
->dbg_dest_reg_num
; i
++) {
854 u32 addr
= le32_to_cpu(dest
->reg_ops
[i
].addr
);
855 u32 val
= le32_to_cpu(dest
->reg_ops
[i
].val
);
857 switch (dest
->reg_ops
[i
].op
) {
859 iwl_write32(trans
, addr
, val
);
862 iwl_set_bit(trans
, addr
, BIT(val
));
865 iwl_clear_bit(trans
, addr
, BIT(val
));
868 iwl_write_prph(trans
, addr
, val
);
871 iwl_set_bits_prph(trans
, addr
, BIT(val
));
874 iwl_clear_bits_prph(trans
, addr
, BIT(val
));
877 if (iwl_read_prph(trans
, addr
) & BIT(val
)) {
879 "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
885 IWL_ERR(trans
, "FW debug - unknown OP %d\n",
886 dest
->reg_ops
[i
].op
);
892 if (dest
->monitor_mode
== EXTERNAL_MODE
&& trans_pcie
->fw_mon_size
) {
893 iwl_write_prph(trans
, le32_to_cpu(dest
->base_reg
),
894 trans_pcie
->fw_mon_phys
>> dest
->base_shift
);
895 if (trans
->cfg
->device_family
>= IWL_DEVICE_FAMILY_8000
)
896 iwl_write_prph(trans
, le32_to_cpu(dest
->end_reg
),
897 (trans_pcie
->fw_mon_phys
+
898 trans_pcie
->fw_mon_size
- 256) >>
901 iwl_write_prph(trans
, le32_to_cpu(dest
->end_reg
),
902 (trans_pcie
->fw_mon_phys
+
903 trans_pcie
->fw_mon_size
) >>
908 static int iwl_pcie_load_given_ucode(struct iwl_trans
*trans
,
909 const struct fw_img
*image
)
911 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
913 int first_ucode_section
;
915 IWL_DEBUG_FW(trans
, "working with %s CPU\n",
916 image
->is_dual_cpus
? "Dual" : "Single");
918 /* load to FW the binary non secured sections of CPU1 */
919 ret
= iwl_pcie_load_cpu_sections(trans
, image
, 1, &first_ucode_section
);
923 if (image
->is_dual_cpus
) {
924 /* set CPU2 header address */
925 iwl_write_prph(trans
,
926 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR
,
927 LMPM_SECURE_CPU2_HDR_MEM_SPACE
);
929 /* load to FW the binary sections of CPU2 */
930 ret
= iwl_pcie_load_cpu_sections(trans
, image
, 2,
931 &first_ucode_section
);
936 /* supported for 7000 only for the moment */
937 if (iwlwifi_mod_params
.fw_monitor
&&
938 trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_7000
) {
939 iwl_pcie_alloc_fw_monitor(trans
, 0);
941 if (trans_pcie
->fw_mon_size
) {
942 iwl_write_prph(trans
, MON_BUFF_BASE_ADDR
,
943 trans_pcie
->fw_mon_phys
>> 4);
944 iwl_write_prph(trans
, MON_BUFF_END_ADDR
,
945 (trans_pcie
->fw_mon_phys
+
946 trans_pcie
->fw_mon_size
) >> 4);
948 } else if (trans
->dbg_dest_tlv
) {
949 iwl_pcie_apply_destination(trans
);
952 iwl_enable_interrupts(trans
);
954 /* release CPU reset */
955 iwl_write32(trans
, CSR_RESET
, 0);
960 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans
*trans
,
961 const struct fw_img
*image
)
964 int first_ucode_section
;
966 IWL_DEBUG_FW(trans
, "working with %s CPU\n",
967 image
->is_dual_cpus
? "Dual" : "Single");
969 if (trans
->dbg_dest_tlv
)
970 iwl_pcie_apply_destination(trans
);
972 IWL_DEBUG_POWER(trans
, "Original WFPM value = 0x%08X\n",
973 iwl_read_prph(trans
, WFPM_GP2
));
976 * Set default value. On resume reading the values that were
977 * zeored can provide debug data on the resume flow.
978 * This is for debugging only and has no functional impact.
980 iwl_write_prph(trans
, WFPM_GP2
, 0x01010101);
982 /* configure the ucode to be ready to get the secured image */
983 /* release CPU reset */
984 iwl_write_prph(trans
, RELEASE_CPU_RESET
, RELEASE_CPU_RESET_BIT
);
986 /* load to FW the binary Secured sections of CPU1 */
987 ret
= iwl_pcie_load_cpu_sections_8000(trans
, image
, 1,
988 &first_ucode_section
);
992 /* load to FW the binary sections of CPU2 */
993 return iwl_pcie_load_cpu_sections_8000(trans
, image
, 2,
994 &first_ucode_section
);
997 bool iwl_trans_check_hw_rf_kill(struct iwl_trans
*trans
)
999 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1000 bool hw_rfkill
= iwl_is_rfkill_set(trans
);
1001 bool prev
= test_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1005 set_bit(STATUS_RFKILL_HW
, &trans
->status
);
1006 set_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1008 clear_bit(STATUS_RFKILL_HW
, &trans
->status
);
1009 if (trans_pcie
->opmode_down
)
1010 clear_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1013 report
= test_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1016 iwl_trans_pcie_rf_kill(trans
, report
);
1021 struct iwl_causes_list
{
1027 static struct iwl_causes_list causes_list
[] = {
1028 {MSIX_FH_INT_CAUSES_D2S_CH0_NUM
, CSR_MSIX_FH_INT_MASK_AD
, 0},
1029 {MSIX_FH_INT_CAUSES_D2S_CH1_NUM
, CSR_MSIX_FH_INT_MASK_AD
, 0x1},
1030 {MSIX_FH_INT_CAUSES_S2D
, CSR_MSIX_FH_INT_MASK_AD
, 0x3},
1031 {MSIX_FH_INT_CAUSES_FH_ERR
, CSR_MSIX_FH_INT_MASK_AD
, 0x5},
1032 {MSIX_HW_INT_CAUSES_REG_ALIVE
, CSR_MSIX_HW_INT_MASK_AD
, 0x10},
1033 {MSIX_HW_INT_CAUSES_REG_WAKEUP
, CSR_MSIX_HW_INT_MASK_AD
, 0x11},
1034 {MSIX_HW_INT_CAUSES_REG_CT_KILL
, CSR_MSIX_HW_INT_MASK_AD
, 0x16},
1035 {MSIX_HW_INT_CAUSES_REG_RF_KILL
, CSR_MSIX_HW_INT_MASK_AD
, 0x17},
1036 {MSIX_HW_INT_CAUSES_REG_PERIODIC
, CSR_MSIX_HW_INT_MASK_AD
, 0x18},
1037 {MSIX_HW_INT_CAUSES_REG_SW_ERR
, CSR_MSIX_HW_INT_MASK_AD
, 0x29},
1038 {MSIX_HW_INT_CAUSES_REG_SCD
, CSR_MSIX_HW_INT_MASK_AD
, 0x2A},
1039 {MSIX_HW_INT_CAUSES_REG_FH_TX
, CSR_MSIX_HW_INT_MASK_AD
, 0x2B},
1040 {MSIX_HW_INT_CAUSES_REG_HW_ERR
, CSR_MSIX_HW_INT_MASK_AD
, 0x2D},
1041 {MSIX_HW_INT_CAUSES_REG_HAP
, CSR_MSIX_HW_INT_MASK_AD
, 0x2E},
1044 static void iwl_pcie_map_non_rx_causes(struct iwl_trans
*trans
)
1046 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1047 int val
= trans_pcie
->def_irq
| MSIX_NON_AUTO_CLEAR_CAUSE
;
1051 * Access all non RX causes and map them to the default irq.
1052 * In case we are missing at least one interrupt vector,
1053 * the first interrupt vector will serve non-RX and FBQ causes.
1055 for (i
= 0; i
< ARRAY_SIZE(causes_list
); i
++) {
1056 iwl_write8(trans
, CSR_MSIX_IVAR(causes_list
[i
].addr
), val
);
1057 iwl_clear_bit(trans
, causes_list
[i
].mask_reg
,
1058 causes_list
[i
].cause_num
);
1062 static void iwl_pcie_map_rx_causes(struct iwl_trans
*trans
)
1064 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1066 trans_pcie
->shared_vec_mask
& IWL_SHARED_IRQ_FIRST_RSS
? 1 : 0;
1070 * The first RX queue - fallback queue, which is designated for
1071 * management frame, command responses etc, is always mapped to the
1072 * first interrupt vector. The other RX queues are mapped to
1073 * the other (N - 2) interrupt vectors.
1075 val
= BIT(MSIX_FH_INT_CAUSES_Q(0));
1076 for (idx
= 1; idx
< trans
->num_rx_queues
; idx
++) {
1077 iwl_write8(trans
, CSR_MSIX_RX_IVAR(idx
),
1078 MSIX_FH_INT_CAUSES_Q(idx
- offset
));
1079 val
|= BIT(MSIX_FH_INT_CAUSES_Q(idx
));
1081 iwl_write32(trans
, CSR_MSIX_FH_INT_MASK_AD
, ~val
);
1083 val
= MSIX_FH_INT_CAUSES_Q(0);
1084 if (trans_pcie
->shared_vec_mask
& IWL_SHARED_IRQ_NON_RX
)
1085 val
|= MSIX_NON_AUTO_CLEAR_CAUSE
;
1086 iwl_write8(trans
, CSR_MSIX_RX_IVAR(0), val
);
1088 if (trans_pcie
->shared_vec_mask
& IWL_SHARED_IRQ_FIRST_RSS
)
1089 iwl_write8(trans
, CSR_MSIX_RX_IVAR(1), val
);
1092 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie
*trans_pcie
)
1094 struct iwl_trans
*trans
= trans_pcie
->trans
;
1096 if (!trans_pcie
->msix_enabled
) {
1097 if (trans
->cfg
->mq_rx_supported
&&
1098 test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
))
1099 iwl_write_prph(trans
, UREG_CHICK
,
1100 UREG_CHICK_MSI_ENABLE
);
1104 * The IVAR table needs to be configured again after reset,
1105 * but if the device is disabled, we can't write to
1108 if (test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
))
1109 iwl_write_prph(trans
, UREG_CHICK
, UREG_CHICK_MSIX_ENABLE
);
1112 * Each cause from the causes list above and the RX causes is
1113 * represented as a byte in the IVAR table. The first nibble
1114 * represents the bound interrupt vector of the cause, the second
1115 * represents no auto clear for this cause. This will be set if its
1116 * interrupt vector is bound to serve other causes.
1118 iwl_pcie_map_rx_causes(trans
);
1120 iwl_pcie_map_non_rx_causes(trans
);
1123 static void iwl_pcie_init_msix(struct iwl_trans_pcie
*trans_pcie
)
1125 struct iwl_trans
*trans
= trans_pcie
->trans
;
1127 iwl_pcie_conf_msix_hw(trans_pcie
);
1129 if (!trans_pcie
->msix_enabled
)
1132 trans_pcie
->fh_init_mask
= ~iwl_read32(trans
, CSR_MSIX_FH_INT_MASK_AD
);
1133 trans_pcie
->fh_mask
= trans_pcie
->fh_init_mask
;
1134 trans_pcie
->hw_init_mask
= ~iwl_read32(trans
, CSR_MSIX_HW_INT_MASK_AD
);
1135 trans_pcie
->hw_mask
= trans_pcie
->hw_init_mask
;
1138 static void _iwl_trans_pcie_stop_device(struct iwl_trans
*trans
, bool low_power
)
1140 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1142 lockdep_assert_held(&trans_pcie
->mutex
);
1144 if (trans_pcie
->is_down
)
1147 trans_pcie
->is_down
= true;
1149 /* tell the device to stop sending interrupts */
1150 iwl_disable_interrupts(trans
);
1152 /* device going down, Stop using ICT table */
1153 iwl_pcie_disable_ict(trans
);
1156 * If a HW restart happens during firmware loading,
1157 * then the firmware loading might call this function
1158 * and later it might be called again due to the
1159 * restart. So don't process again if the device is
1162 if (test_and_clear_bit(STATUS_DEVICE_ENABLED
, &trans
->status
)) {
1163 IWL_DEBUG_INFO(trans
,
1164 "DEVICE_ENABLED bit was set and is now cleared\n");
1165 iwl_pcie_tx_stop(trans
);
1166 iwl_pcie_rx_stop(trans
);
1168 /* Power-down device's busmaster DMA clocks */
1169 if (!trans
->cfg
->apmg_not_supported
) {
1170 iwl_write_prph(trans
, APMG_CLK_DIS_REG
,
1171 APMG_CLK_VAL_DMA_CLK_RQT
);
1176 /* Make sure (redundant) we've released our request to stay awake */
1177 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1178 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1180 /* Stop the device, and put it in low power state */
1181 iwl_pcie_apm_stop(trans
, false);
1183 /* stop and reset the on-board processor */
1184 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
1185 usleep_range(1000, 2000);
1188 * Upon stop, the IVAR table gets erased, so msi-x won't
1189 * work. This causes a bug in RF-KILL flows, since the interrupt
1190 * that enables radio won't fire on the correct irq, and the
1191 * driver won't be able to handle the interrupt.
1192 * Configure the IVAR table again after reset.
1194 iwl_pcie_conf_msix_hw(trans_pcie
);
1197 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1198 * This is a bug in certain verions of the hardware.
1199 * Certain devices also keep sending HW RF kill interrupt all
1200 * the time, unless the interrupt is ACKed even if the interrupt
1201 * should be masked. Re-ACK all the interrupts here.
1203 iwl_disable_interrupts(trans
);
1205 /* clear all status bits */
1206 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
1207 clear_bit(STATUS_INT_ENABLED
, &trans
->status
);
1208 clear_bit(STATUS_TPOWER_PMI
, &trans
->status
);
1211 * Even if we stop the HW, we still want the RF kill
1214 iwl_enable_rfkill_int(trans
);
1216 /* re-take ownership to prevent other users from stealing the device */
1217 iwl_pcie_prepare_card_hw(trans
);
1220 void iwl_pcie_synchronize_irqs(struct iwl_trans
*trans
)
1222 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1224 if (trans_pcie
->msix_enabled
) {
1227 for (i
= 0; i
< trans_pcie
->alloc_vecs
; i
++)
1228 synchronize_irq(trans_pcie
->msix_entries
[i
].vector
);
1230 synchronize_irq(trans_pcie
->pci_dev
->irq
);
1234 static int iwl_trans_pcie_start_fw(struct iwl_trans
*trans
,
1235 const struct fw_img
*fw
, bool run_in_rfkill
)
1237 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1241 /* This may fail if AMT took ownership of the device */
1242 if (iwl_pcie_prepare_card_hw(trans
)) {
1243 IWL_WARN(trans
, "Exit HW not ready\n");
1248 iwl_enable_rfkill_int(trans
);
1250 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
1253 * We enabled the RF-Kill interrupt and the handler may very
1254 * well be running. Disable the interrupts to make sure no other
1255 * interrupt can be fired.
1257 iwl_disable_interrupts(trans
);
1259 /* Make sure it finished running */
1260 iwl_pcie_synchronize_irqs(trans
);
1262 mutex_lock(&trans_pcie
->mutex
);
1264 /* If platform's RF_KILL switch is NOT set to KILL */
1265 hw_rfkill
= iwl_trans_check_hw_rf_kill(trans
);
1266 if (hw_rfkill
&& !run_in_rfkill
) {
1271 /* Someone called stop_device, don't try to start_fw */
1272 if (trans_pcie
->is_down
) {
1274 "Can't start_fw since the HW hasn't been started\n");
1279 /* make sure rfkill handshake bits are cleared */
1280 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1281 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
,
1282 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED
);
1284 /* clear (again), then enable host interrupts */
1285 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
1287 ret
= iwl_pcie_nic_init(trans
);
1289 IWL_ERR(trans
, "Unable to init nic\n");
1294 * Now, we load the firmware and don't want to be interrupted, even
1295 * by the RF-Kill interrupt (hence mask all the interrupt besides the
1296 * FH_TX interrupt which is needed to load the firmware). If the
1297 * RF-Kill switch is toggled, we will find out after having loaded
1298 * the firmware and return the proper value to the caller.
1300 iwl_enable_fw_load_int(trans
);
1302 /* really make sure rfkill handshake bits are cleared */
1303 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1304 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1306 /* Load the given image to the HW */
1307 if (trans
->cfg
->device_family
>= IWL_DEVICE_FAMILY_8000
)
1308 ret
= iwl_pcie_load_given_ucode_8000(trans
, fw
);
1310 ret
= iwl_pcie_load_given_ucode(trans
, fw
);
1312 /* re-check RF-Kill state since we may have missed the interrupt */
1313 hw_rfkill
= iwl_trans_check_hw_rf_kill(trans
);
1314 if (hw_rfkill
&& !run_in_rfkill
)
1318 mutex_unlock(&trans_pcie
->mutex
);
1322 static void iwl_trans_pcie_fw_alive(struct iwl_trans
*trans
, u32 scd_addr
)
1324 iwl_pcie_reset_ict(trans
);
1325 iwl_pcie_tx_start(trans
, scd_addr
);
1328 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans
*trans
,
1334 * Check again since the RF kill state may have changed while
1335 * all the interrupts were disabled, in this case we couldn't
1336 * receive the RF kill interrupt and update the state in the
1338 * Don't call the op_mode if the rkfill state hasn't changed.
1339 * This allows the op_mode to call stop_device from the rfkill
1340 * notification without endless recursion. Under very rare
1341 * circumstances, we might have a small recursion if the rfkill
1342 * state changed exactly now while we were called from stop_device.
1343 * This is very unlikely but can happen and is supported.
1345 hw_rfkill
= iwl_is_rfkill_set(trans
);
1347 set_bit(STATUS_RFKILL_HW
, &trans
->status
);
1348 set_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1350 clear_bit(STATUS_RFKILL_HW
, &trans
->status
);
1351 clear_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1353 if (hw_rfkill
!= was_in_rfkill
)
1354 iwl_trans_pcie_rf_kill(trans
, hw_rfkill
);
1357 static void iwl_trans_pcie_stop_device(struct iwl_trans
*trans
, bool low_power
)
1359 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1362 mutex_lock(&trans_pcie
->mutex
);
1363 trans_pcie
->opmode_down
= true;
1364 was_in_rfkill
= test_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1365 _iwl_trans_pcie_stop_device(trans
, low_power
);
1366 iwl_trans_pcie_handle_stop_rfkill(trans
, was_in_rfkill
);
1367 mutex_unlock(&trans_pcie
->mutex
);
1370 void iwl_trans_pcie_rf_kill(struct iwl_trans
*trans
, bool state
)
1372 struct iwl_trans_pcie __maybe_unused
*trans_pcie
=
1373 IWL_TRANS_GET_PCIE_TRANS(trans
);
1375 lockdep_assert_held(&trans_pcie
->mutex
);
1377 IWL_WARN(trans
, "reporting RF_KILL (radio %s)\n",
1378 state
? "disabled" : "enabled");
1379 if (iwl_op_mode_hw_rf_kill(trans
->op_mode
, state
)) {
1380 if (trans
->cfg
->gen2
)
1381 _iwl_trans_pcie_gen2_stop_device(trans
, true);
1383 _iwl_trans_pcie_stop_device(trans
, true);
1387 static void iwl_trans_pcie_d3_suspend(struct iwl_trans
*trans
, bool test
,
1391 /* Enable persistence mode to avoid reset */
1392 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
1393 CSR_HW_IF_CONFIG_REG_PERSIST_MODE
);
1396 iwl_disable_interrupts(trans
);
1399 * in testing mode, the host stays awake and the
1400 * hardware won't be reset (not even partially)
1405 iwl_pcie_disable_ict(trans
);
1407 iwl_pcie_synchronize_irqs(trans
);
1409 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1410 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1411 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1412 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
1414 iwl_pcie_enable_rx_wake(trans
, false);
1418 * reset TX queues -- some of their registers reset during S3
1419 * so if we don't reset everything here the D3 image would try
1420 * to execute some invalid memory upon resume
1422 iwl_trans_pcie_tx_reset(trans
);
1425 iwl_pcie_set_pwr(trans
, true);
1428 static int iwl_trans_pcie_d3_resume(struct iwl_trans
*trans
,
1429 enum iwl_d3_status
*status
,
1430 bool test
, bool reset
)
1432 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1437 iwl_enable_interrupts(trans
);
1438 *status
= IWL_D3_STATUS_ALIVE
;
1442 iwl_pcie_enable_rx_wake(trans
, true);
1445 * Reconfigure IVAR table in case of MSIX or reset ict table in
1446 * MSI mode since HW reset erased it.
1447 * Also enables interrupts - none will happen as
1448 * the device doesn't know we're waking it up, only when
1449 * the opmode actually tells it after this call.
1451 iwl_pcie_conf_msix_hw(trans_pcie
);
1452 if (!trans_pcie
->msix_enabled
)
1453 iwl_pcie_reset_ict(trans
);
1454 iwl_enable_interrupts(trans
);
1456 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1457 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
1459 if (trans
->cfg
->device_family
>= IWL_DEVICE_FAMILY_8000
)
1462 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
1463 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
1464 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
1467 IWL_ERR(trans
, "Failed to resume the device (mac ready)\n");
1471 iwl_pcie_set_pwr(trans
, false);
1474 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1475 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1477 iwl_trans_pcie_tx_reset(trans
);
1479 ret
= iwl_pcie_rx_init(trans
);
1482 "Failed to resume the device (RX reset)\n");
1487 IWL_DEBUG_POWER(trans
, "WFPM value upon resume = 0x%08X\n",
1488 iwl_read_prph(trans
, WFPM_GP2
));
1490 val
= iwl_read32(trans
, CSR_RESET
);
1491 if (val
& CSR_RESET_REG_FLAG_NEVO_RESET
)
1492 *status
= IWL_D3_STATUS_RESET
;
1494 *status
= IWL_D3_STATUS_ALIVE
;
1499 static void iwl_pcie_set_interrupt_capa(struct pci_dev
*pdev
,
1500 struct iwl_trans
*trans
)
1502 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1503 int max_irqs
, num_irqs
, i
, ret
, nr_online_cpus
;
1506 if (!trans
->cfg
->mq_rx_supported
)
1509 nr_online_cpus
= num_online_cpus();
1510 max_irqs
= min_t(u32
, nr_online_cpus
+ 2, IWL_MAX_RX_HW_QUEUES
);
1511 for (i
= 0; i
< max_irqs
; i
++)
1512 trans_pcie
->msix_entries
[i
].entry
= i
;
1514 num_irqs
= pci_enable_msix_range(pdev
, trans_pcie
->msix_entries
,
1515 MSIX_MIN_INTERRUPT_VECTORS
,
1518 IWL_DEBUG_INFO(trans
,
1519 "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
1523 trans_pcie
->def_irq
= (num_irqs
== max_irqs
) ? num_irqs
- 1 : 0;
1525 IWL_DEBUG_INFO(trans
,
1526 "MSI-X enabled. %d interrupt vectors were allocated\n",
1530 * In case the OS provides fewer interrupts than requested, different
1531 * causes will share the same interrupt vector as follows:
1532 * One interrupt less: non rx causes shared with FBQ.
1533 * Two interrupts less: non rx causes shared with FBQ and RSS.
1534 * More than two interrupts: we will use fewer RSS queues.
1536 if (num_irqs
<= nr_online_cpus
) {
1537 trans_pcie
->trans
->num_rx_queues
= num_irqs
+ 1;
1538 trans_pcie
->shared_vec_mask
= IWL_SHARED_IRQ_NON_RX
|
1539 IWL_SHARED_IRQ_FIRST_RSS
;
1540 } else if (num_irqs
== nr_online_cpus
+ 1) {
1541 trans_pcie
->trans
->num_rx_queues
= num_irqs
;
1542 trans_pcie
->shared_vec_mask
= IWL_SHARED_IRQ_NON_RX
;
1544 trans_pcie
->trans
->num_rx_queues
= num_irqs
- 1;
1547 trans_pcie
->alloc_vecs
= num_irqs
;
1548 trans_pcie
->msix_enabled
= true;
1552 ret
= pci_enable_msi(pdev
);
1554 dev_err(&pdev
->dev
, "pci_enable_msi failed - %d\n", ret
);
1555 /* enable rfkill interrupt: hw bug w/a */
1556 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_cmd
);
1557 if (pci_cmd
& PCI_COMMAND_INTX_DISABLE
) {
1558 pci_cmd
&= ~PCI_COMMAND_INTX_DISABLE
;
1559 pci_write_config_word(pdev
, PCI_COMMAND
, pci_cmd
);
1564 static void iwl_pcie_irq_set_affinity(struct iwl_trans
*trans
)
1566 int iter_rx_q
, i
, ret
, cpu
, offset
;
1567 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1569 i
= trans_pcie
->shared_vec_mask
& IWL_SHARED_IRQ_FIRST_RSS
? 0 : 1;
1570 iter_rx_q
= trans_pcie
->trans
->num_rx_queues
- 1 + i
;
1572 for (; i
< iter_rx_q
; i
++) {
1574 * Get the cpu prior to the place to search
1575 * (i.e. return will be > i - 1).
1577 cpu
= cpumask_next(i
- offset
, cpu_online_mask
);
1578 cpumask_set_cpu(cpu
, &trans_pcie
->affinity_mask
[i
]);
1579 ret
= irq_set_affinity_hint(trans_pcie
->msix_entries
[i
].vector
,
1580 &trans_pcie
->affinity_mask
[i
]);
1582 IWL_ERR(trans_pcie
->trans
,
1583 "Failed to set affinity mask for IRQ %d\n",
1588 static const char *queue_name(struct device
*dev
,
1589 struct iwl_trans_pcie
*trans_p
, int i
)
1591 if (trans_p
->shared_vec_mask
) {
1592 int vec
= trans_p
->shared_vec_mask
&
1593 IWL_SHARED_IRQ_FIRST_RSS
? 1 : 0;
1596 return DRV_NAME
": shared IRQ";
1598 return devm_kasprintf(dev
, GFP_KERNEL
,
1599 DRV_NAME
": queue %d", i
+ vec
);
1602 return DRV_NAME
": default queue";
1604 if (i
== trans_p
->alloc_vecs
- 1)
1605 return DRV_NAME
": exception";
1607 return devm_kasprintf(dev
, GFP_KERNEL
,
1608 DRV_NAME
": queue %d", i
);
1611 static int iwl_pcie_init_msix_handler(struct pci_dev
*pdev
,
1612 struct iwl_trans_pcie
*trans_pcie
)
1616 for (i
= 0; i
< trans_pcie
->alloc_vecs
; i
++) {
1618 struct msix_entry
*msix_entry
;
1619 const char *qname
= queue_name(&pdev
->dev
, trans_pcie
, i
);
1624 msix_entry
= &trans_pcie
->msix_entries
[i
];
1625 ret
= devm_request_threaded_irq(&pdev
->dev
,
1628 (i
== trans_pcie
->def_irq
) ?
1629 iwl_pcie_irq_msix_handler
:
1630 iwl_pcie_irq_rx_msix_handler
,
1635 IWL_ERR(trans_pcie
->trans
,
1636 "Error allocating IRQ %d\n", i
);
1641 iwl_pcie_irq_set_affinity(trans_pcie
->trans
);
1646 static int _iwl_trans_pcie_start_hw(struct iwl_trans
*trans
, bool low_power
)
1648 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1651 lockdep_assert_held(&trans_pcie
->mutex
);
1653 err
= iwl_pcie_prepare_card_hw(trans
);
1655 IWL_ERR(trans
, "Error while preparing HW: %d\n", err
);
1659 /* Reset the entire device */
1660 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
1661 usleep_range(1000, 2000);
1663 iwl_pcie_apm_init(trans
);
1665 iwl_pcie_init_msix(trans_pcie
);
1667 /* From now on, the op_mode will be kept updated about RF kill state */
1668 iwl_enable_rfkill_int(trans
);
1670 trans_pcie
->opmode_down
= false;
1672 /* Set is_down to false here so that...*/
1673 trans_pcie
->is_down
= false;
1675 /* ...rfkill can call stop_device and set it false if needed */
1676 iwl_trans_check_hw_rf_kill(trans
);
1678 /* Make sure we sync here, because we'll need full access later */
1680 pm_runtime_resume(trans
->dev
);
1685 static int iwl_trans_pcie_start_hw(struct iwl_trans
*trans
, bool low_power
)
1687 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1690 mutex_lock(&trans_pcie
->mutex
);
1691 ret
= _iwl_trans_pcie_start_hw(trans
, low_power
);
1692 mutex_unlock(&trans_pcie
->mutex
);
1697 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans
*trans
)
1699 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1701 mutex_lock(&trans_pcie
->mutex
);
1703 /* disable interrupts - don't enable HW RF kill interrupt */
1704 iwl_disable_interrupts(trans
);
1706 iwl_pcie_apm_stop(trans
, true);
1708 iwl_disable_interrupts(trans
);
1710 iwl_pcie_disable_ict(trans
);
1712 mutex_unlock(&trans_pcie
->mutex
);
1714 iwl_pcie_synchronize_irqs(trans
);
1717 static void iwl_trans_pcie_write8(struct iwl_trans
*trans
, u32 ofs
, u8 val
)
1719 writeb(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1722 static void iwl_trans_pcie_write32(struct iwl_trans
*trans
, u32 ofs
, u32 val
)
1724 writel(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1727 static u32
iwl_trans_pcie_read32(struct iwl_trans
*trans
, u32 ofs
)
1729 return readl(IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1732 static u32
iwl_trans_pcie_read_prph(struct iwl_trans
*trans
, u32 reg
)
1734 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_RADDR
,
1735 ((reg
& 0x000FFFFF) | (3 << 24)));
1736 return iwl_trans_pcie_read32(trans
, HBUS_TARG_PRPH_RDAT
);
1739 static void iwl_trans_pcie_write_prph(struct iwl_trans
*trans
, u32 addr
,
1742 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_WADDR
,
1743 ((addr
& 0x000FFFFF) | (3 << 24)));
1744 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_WDAT
, val
);
1747 static void iwl_trans_pcie_configure(struct iwl_trans
*trans
,
1748 const struct iwl_trans_config
*trans_cfg
)
1750 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1752 trans_pcie
->cmd_queue
= trans_cfg
->cmd_queue
;
1753 trans_pcie
->cmd_fifo
= trans_cfg
->cmd_fifo
;
1754 trans_pcie
->cmd_q_wdg_timeout
= trans_cfg
->cmd_q_wdg_timeout
;
1755 if (WARN_ON(trans_cfg
->n_no_reclaim_cmds
> MAX_NO_RECLAIM_CMDS
))
1756 trans_pcie
->n_no_reclaim_cmds
= 0;
1758 trans_pcie
->n_no_reclaim_cmds
= trans_cfg
->n_no_reclaim_cmds
;
1759 if (trans_pcie
->n_no_reclaim_cmds
)
1760 memcpy(trans_pcie
->no_reclaim_cmds
, trans_cfg
->no_reclaim_cmds
,
1761 trans_pcie
->n_no_reclaim_cmds
* sizeof(u8
));
1763 trans_pcie
->rx_buf_size
= trans_cfg
->rx_buf_size
;
1764 trans_pcie
->rx_page_order
=
1765 iwl_trans_get_rb_size_order(trans_pcie
->rx_buf_size
);
1767 trans_pcie
->bc_table_dword
= trans_cfg
->bc_table_dword
;
1768 trans_pcie
->scd_set_active
= trans_cfg
->scd_set_active
;
1769 trans_pcie
->sw_csum_tx
= trans_cfg
->sw_csum_tx
;
1771 trans_pcie
->page_offs
= trans_cfg
->cb_data_offs
;
1772 trans_pcie
->dev_cmd_offs
= trans_cfg
->cb_data_offs
+ sizeof(void *);
1774 trans
->command_groups
= trans_cfg
->command_groups
;
1775 trans
->command_groups_size
= trans_cfg
->command_groups_size
;
1777 /* Initialize NAPI here - it should be before registering to mac80211
1778 * in the opmode but after the HW struct is allocated.
1779 * As this function may be called again in some corner cases don't
1780 * do anything if NAPI was already initialized.
1782 if (trans_pcie
->napi_dev
.reg_state
!= NETREG_DUMMY
)
1783 init_dummy_netdev(&trans_pcie
->napi_dev
);
1786 void iwl_trans_pcie_free(struct iwl_trans
*trans
)
1788 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1791 iwl_pcie_synchronize_irqs(trans
);
1793 if (trans
->cfg
->gen2
)
1794 iwl_pcie_gen2_tx_free(trans
);
1796 iwl_pcie_tx_free(trans
);
1797 iwl_pcie_rx_free(trans
);
1799 if (trans_pcie
->msix_enabled
) {
1800 for (i
= 0; i
< trans_pcie
->alloc_vecs
; i
++) {
1801 irq_set_affinity_hint(
1802 trans_pcie
->msix_entries
[i
].vector
,
1806 trans_pcie
->msix_enabled
= false;
1808 iwl_pcie_free_ict(trans
);
1811 iwl_pcie_free_fw_monitor(trans
);
1813 for_each_possible_cpu(i
) {
1814 struct iwl_tso_hdr_page
*p
=
1815 per_cpu_ptr(trans_pcie
->tso_hdr_page
, i
);
1818 __free_page(p
->page
);
1821 free_percpu(trans_pcie
->tso_hdr_page
);
1822 mutex_destroy(&trans_pcie
->mutex
);
1823 iwl_trans_free(trans
);
1826 static void iwl_trans_pcie_set_pmi(struct iwl_trans
*trans
, bool state
)
1829 set_bit(STATUS_TPOWER_PMI
, &trans
->status
);
1831 clear_bit(STATUS_TPOWER_PMI
, &trans
->status
);
1834 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans
*trans
,
1835 unsigned long *flags
)
1838 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1840 spin_lock_irqsave(&trans_pcie
->reg_lock
, *flags
);
1842 if (trans_pcie
->cmd_hold_nic_awake
)
1845 /* this bit wakes up the NIC */
1846 __iwl_trans_pcie_set_bit(trans
, CSR_GP_CNTRL
,
1847 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1848 if (trans
->cfg
->device_family
>= IWL_DEVICE_FAMILY_8000
)
1852 * These bits say the device is running, and should keep running for
1853 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
1854 * but they do not indicate that embedded SRAM is restored yet;
1855 * 3945 and 4965 have volatile SRAM, and must save/restore contents
1856 * to/from host DRAM when sleeping/waking for power-saving.
1857 * Each direction takes approximately 1/4 millisecond; with this
1858 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
1859 * series of register accesses are expected (e.g. reading Event Log),
1860 * to keep device from sleeping.
1862 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
1863 * SRAM is okay/restored. We don't check that here because this call
1864 * is just for hardware register access; but GP1 MAC_SLEEP check is a
1865 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
1867 * 5000 series and later (including 1000 series) have non-volatile SRAM,
1868 * and do not save/restore SRAM when power cycling.
1870 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
1871 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN
,
1872 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
|
1873 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP
), 15000);
1874 if (unlikely(ret
< 0)) {
1875 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_FORCE_NMI
);
1877 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
1878 iwl_read32(trans
, CSR_GP_CNTRL
));
1879 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, *flags
);
1885 * Fool sparse by faking we release the lock - sparse will
1886 * track nic_access anyway.
1888 __release(&trans_pcie
->reg_lock
);
1892 static void iwl_trans_pcie_release_nic_access(struct iwl_trans
*trans
,
1893 unsigned long *flags
)
1895 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1897 lockdep_assert_held(&trans_pcie
->reg_lock
);
1900 * Fool sparse by faking we acquiring the lock - sparse will
1901 * track nic_access anyway.
1903 __acquire(&trans_pcie
->reg_lock
);
1905 if (trans_pcie
->cmd_hold_nic_awake
)
1908 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
1909 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1911 * Above we read the CSR_GP_CNTRL register, which will flush
1912 * any previous writes, but we need the write that clears the
1913 * MAC_ACCESS_REQ bit to be performed before any other writes
1914 * scheduled on different CPUs (after we drop reg_lock).
1918 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, *flags
);
1921 static int iwl_trans_pcie_read_mem(struct iwl_trans
*trans
, u32 addr
,
1922 void *buf
, int dwords
)
1924 unsigned long flags
;
1928 if (iwl_trans_grab_nic_access(trans
, &flags
)) {
1929 iwl_write32(trans
, HBUS_TARG_MEM_RADDR
, addr
);
1930 for (offs
= 0; offs
< dwords
; offs
++)
1931 vals
[offs
] = iwl_read32(trans
, HBUS_TARG_MEM_RDAT
);
1932 iwl_trans_release_nic_access(trans
, &flags
);
1939 static int iwl_trans_pcie_write_mem(struct iwl_trans
*trans
, u32 addr
,
1940 const void *buf
, int dwords
)
1942 unsigned long flags
;
1944 const u32
*vals
= buf
;
1946 if (iwl_trans_grab_nic_access(trans
, &flags
)) {
1947 iwl_write32(trans
, HBUS_TARG_MEM_WADDR
, addr
);
1948 for (offs
= 0; offs
< dwords
; offs
++)
1949 iwl_write32(trans
, HBUS_TARG_MEM_WDAT
,
1950 vals
? vals
[offs
] : 0);
1951 iwl_trans_release_nic_access(trans
, &flags
);
1958 static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans
*trans
,
1962 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1965 for_each_set_bit(queue
, &txqs
, BITS_PER_LONG
) {
1966 struct iwl_txq
*txq
= trans_pcie
->txq
[queue
];
1969 spin_lock_bh(&txq
->lock
);
1973 if (txq
->frozen
== freeze
)
1976 IWL_DEBUG_TX_QUEUES(trans
, "%s TXQ %d\n",
1977 freeze
? "Freezing" : "Waking", queue
);
1979 txq
->frozen
= freeze
;
1981 if (txq
->read_ptr
== txq
->write_ptr
)
1985 if (unlikely(time_after(now
,
1986 txq
->stuck_timer
.expires
))) {
1988 * The timer should have fired, maybe it is
1989 * spinning right now on the lock.
1993 /* remember how long until the timer fires */
1994 txq
->frozen_expiry_remainder
=
1995 txq
->stuck_timer
.expires
- now
;
1996 del_timer(&txq
->stuck_timer
);
2001 * Wake a non-empty queue -> arm timer with the
2002 * remainder before it froze
2004 mod_timer(&txq
->stuck_timer
,
2005 now
+ txq
->frozen_expiry_remainder
);
2008 spin_unlock_bh(&txq
->lock
);
2012 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans
*trans
, bool block
)
2014 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2017 for (i
= 0; i
< trans
->cfg
->base_params
->num_of_queues
; i
++) {
2018 struct iwl_txq
*txq
= trans_pcie
->txq
[i
];
2020 if (i
== trans_pcie
->cmd_queue
)
2023 spin_lock_bh(&txq
->lock
);
2025 if (!block
&& !(WARN_ON_ONCE(!txq
->block
))) {
2028 iwl_write32(trans
, HBUS_TARG_WRPTR
,
2029 txq
->write_ptr
| (i
<< 8));
2035 spin_unlock_bh(&txq
->lock
);
2039 #define IWL_FLUSH_WAIT_MS 2000
2041 void iwl_trans_pcie_log_scd_error(struct iwl_trans
*trans
, struct iwl_txq
*txq
)
2043 u32 txq_id
= txq
->id
;
2048 if (trans
->cfg
->use_tfh
) {
2049 IWL_ERR(trans
, "Queue %d is stuck %d %d\n", txq_id
,
2050 txq
->read_ptr
, txq
->write_ptr
);
2051 /* TODO: access new SCD registers and dump them */
2055 status
= iwl_read_prph(trans
, SCD_QUEUE_STATUS_BITS(txq_id
));
2056 fifo
= (status
>> SCD_QUEUE_STTS_REG_POS_TXF
) & 0x7;
2057 active
= !!(status
& BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE
));
2060 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
2061 txq_id
, active
? "" : "in", fifo
,
2062 jiffies_to_msecs(txq
->wd_timeout
),
2063 txq
->read_ptr
, txq
->write_ptr
,
2064 iwl_read_prph(trans
, SCD_QUEUE_RDPTR(txq_id
)) &
2065 (TFD_QUEUE_SIZE_MAX
- 1),
2066 iwl_read_prph(trans
, SCD_QUEUE_WRPTR(txq_id
)) &
2067 (TFD_QUEUE_SIZE_MAX
- 1),
2068 iwl_read_direct32(trans
, FH_TX_TRB_REG(fifo
)));
2071 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans
*trans
, int txq_idx
)
2073 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2074 struct iwl_txq
*txq
;
2075 unsigned long now
= jiffies
;
2078 if (!test_bit(txq_idx
, trans_pcie
->queue_used
))
2081 IWL_DEBUG_TX_QUEUES(trans
, "Emptying queue %d...\n", txq_idx
);
2082 txq
= trans_pcie
->txq
[txq_idx
];
2083 wr_ptr
= ACCESS_ONCE(txq
->write_ptr
);
2085 while (txq
->read_ptr
!= ACCESS_ONCE(txq
->write_ptr
) &&
2086 !time_after(jiffies
,
2087 now
+ msecs_to_jiffies(IWL_FLUSH_WAIT_MS
))) {
2088 u8 write_ptr
= ACCESS_ONCE(txq
->write_ptr
);
2090 if (WARN_ONCE(wr_ptr
!= write_ptr
,
2091 "WR pointer moved while flushing %d -> %d\n",
2094 usleep_range(1000, 2000);
2097 if (txq
->read_ptr
!= txq
->write_ptr
) {
2099 "fail to flush all tx fifo queues Q %d\n", txq_idx
);
2100 iwl_trans_pcie_log_scd_error(trans
, txq
);
2104 IWL_DEBUG_TX_QUEUES(trans
, "Queue %d is now empty.\n", txq_idx
);
2109 static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans
*trans
, u32 txq_bm
)
2111 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2115 /* waiting for all the tx frames complete might take a while */
2116 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
2118 if (cnt
== trans_pcie
->cmd_queue
)
2120 if (!test_bit(cnt
, trans_pcie
->queue_used
))
2122 if (!(BIT(cnt
) & txq_bm
))
2125 ret
= iwl_trans_pcie_wait_txq_empty(trans
, cnt
);
2133 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans
*trans
, u32 reg
,
2134 u32 mask
, u32 value
)
2136 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2137 unsigned long flags
;
2139 spin_lock_irqsave(&trans_pcie
->reg_lock
, flags
);
2140 __iwl_trans_pcie_set_bits_mask(trans
, reg
, mask
, value
);
2141 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, flags
);
2144 static void iwl_trans_pcie_ref(struct iwl_trans
*trans
)
2146 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2148 if (iwlwifi_mod_params
.d0i3_disable
)
2151 pm_runtime_get(&trans_pcie
->pci_dev
->dev
);
2154 IWL_DEBUG_RPM(trans
, "runtime usage count: %d\n",
2155 atomic_read(&trans_pcie
->pci_dev
->dev
.power
.usage_count
));
2156 #endif /* CONFIG_PM */
2159 static void iwl_trans_pcie_unref(struct iwl_trans
*trans
)
2161 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2163 if (iwlwifi_mod_params
.d0i3_disable
)
2166 pm_runtime_mark_last_busy(&trans_pcie
->pci_dev
->dev
);
2167 pm_runtime_put_autosuspend(&trans_pcie
->pci_dev
->dev
);
2170 IWL_DEBUG_RPM(trans
, "runtime usage count: %d\n",
2171 atomic_read(&trans_pcie
->pci_dev
->dev
.power
.usage_count
));
2172 #endif /* CONFIG_PM */
2175 static const char *get_csr_string(int cmd
)
2177 #define IWL_CMD(x) case x: return #x
2179 IWL_CMD(CSR_HW_IF_CONFIG_REG
);
2180 IWL_CMD(CSR_INT_COALESCING
);
2182 IWL_CMD(CSR_INT_MASK
);
2183 IWL_CMD(CSR_FH_INT_STATUS
);
2184 IWL_CMD(CSR_GPIO_IN
);
2186 IWL_CMD(CSR_GP_CNTRL
);
2187 IWL_CMD(CSR_HW_REV
);
2188 IWL_CMD(CSR_EEPROM_REG
);
2189 IWL_CMD(CSR_EEPROM_GP
);
2190 IWL_CMD(CSR_OTP_GP_REG
);
2191 IWL_CMD(CSR_GIO_REG
);
2192 IWL_CMD(CSR_GP_UCODE_REG
);
2193 IWL_CMD(CSR_GP_DRIVER_REG
);
2194 IWL_CMD(CSR_UCODE_DRV_GP1
);
2195 IWL_CMD(CSR_UCODE_DRV_GP2
);
2196 IWL_CMD(CSR_LED_REG
);
2197 IWL_CMD(CSR_DRAM_INT_TBL_REG
);
2198 IWL_CMD(CSR_GIO_CHICKEN_BITS
);
2199 IWL_CMD(CSR_ANA_PLL_CFG
);
2200 IWL_CMD(CSR_HW_REV_WA_REG
);
2201 IWL_CMD(CSR_MONITOR_STATUS_REG
);
2202 IWL_CMD(CSR_DBG_HPET_MEM_REG
);
2209 void iwl_pcie_dump_csr(struct iwl_trans
*trans
)
2212 static const u32 csr_tbl
[] = {
2213 CSR_HW_IF_CONFIG_REG
,
2231 CSR_DRAM_INT_TBL_REG
,
2232 CSR_GIO_CHICKEN_BITS
,
2234 CSR_MONITOR_STATUS_REG
,
2236 CSR_DBG_HPET_MEM_REG
2238 IWL_ERR(trans
, "CSR values:\n");
2239 IWL_ERR(trans
, "(2nd byte of CSR_INT_COALESCING is "
2240 "CSR_INT_PERIODIC_REG)\n");
2241 for (i
= 0; i
< ARRAY_SIZE(csr_tbl
); i
++) {
2242 IWL_ERR(trans
, " %25s: 0X%08x\n",
2243 get_csr_string(csr_tbl
[i
]),
2244 iwl_read32(trans
, csr_tbl
[i
]));
2248 #ifdef CONFIG_IWLWIFI_DEBUGFS
2249 /* create and remove of files */
2250 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
2251 if (!debugfs_create_file(#name, mode, parent, trans, \
2252 &iwl_dbgfs_##name##_ops)) \
2256 /* file operation */
2257 #define DEBUGFS_READ_FILE_OPS(name) \
2258 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2259 .read = iwl_dbgfs_##name##_read, \
2260 .open = simple_open, \
2261 .llseek = generic_file_llseek, \
2264 #define DEBUGFS_WRITE_FILE_OPS(name) \
2265 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2266 .write = iwl_dbgfs_##name##_write, \
2267 .open = simple_open, \
2268 .llseek = generic_file_llseek, \
2271 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
2272 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2273 .write = iwl_dbgfs_##name##_write, \
2274 .read = iwl_dbgfs_##name##_read, \
2275 .open = simple_open, \
2276 .llseek = generic_file_llseek, \
2279 static ssize_t
iwl_dbgfs_tx_queue_read(struct file
*file
,
2280 char __user
*user_buf
,
2281 size_t count
, loff_t
*ppos
)
2283 struct iwl_trans
*trans
= file
->private_data
;
2284 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2285 struct iwl_txq
*txq
;
2292 bufsz
= sizeof(char) * 75 * trans
->cfg
->base_params
->num_of_queues
;
2294 if (!trans_pcie
->txq_memory
)
2297 buf
= kzalloc(bufsz
, GFP_KERNEL
);
2301 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
2302 txq
= trans_pcie
->txq
[cnt
];
2303 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
2304 "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
2305 cnt
, txq
->read_ptr
, txq
->write_ptr
,
2306 !!test_bit(cnt
, trans_pcie
->queue_used
),
2307 !!test_bit(cnt
, trans_pcie
->queue_stopped
),
2308 txq
->need_update
, txq
->frozen
,
2309 (cnt
== trans_pcie
->cmd_queue
? " HCMD" : ""));
2311 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
2316 static ssize_t
iwl_dbgfs_rx_queue_read(struct file
*file
,
2317 char __user
*user_buf
,
2318 size_t count
, loff_t
*ppos
)
2320 struct iwl_trans
*trans
= file
->private_data
;
2321 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2323 int pos
= 0, i
, ret
;
2324 size_t bufsz
= sizeof(buf
);
2326 bufsz
= sizeof(char) * 121 * trans
->num_rx_queues
;
2328 if (!trans_pcie
->rxq
)
2331 buf
= kzalloc(bufsz
, GFP_KERNEL
);
2335 for (i
= 0; i
< trans
->num_rx_queues
&& pos
< bufsz
; i
++) {
2336 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[i
];
2338 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "queue#: %2d\n",
2340 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "\tread: %u\n",
2342 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "\twrite: %u\n",
2344 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "\twrite_actual: %u\n",
2346 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "\tneed_update: %2d\n",
2348 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "\tfree_count: %u\n",
2351 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
2352 "\tclosed_rb_num: %u\n",
2353 le16_to_cpu(rxq
->rb_stts
->closed_rb_num
) &
2356 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
2357 "\tclosed_rb_num: Not Allocated\n");
2360 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
2366 static ssize_t
iwl_dbgfs_interrupt_read(struct file
*file
,
2367 char __user
*user_buf
,
2368 size_t count
, loff_t
*ppos
)
2370 struct iwl_trans
*trans
= file
->private_data
;
2371 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2372 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
2376 int bufsz
= 24 * 64; /* 24 items * 64 char per item */
2379 buf
= kzalloc(bufsz
, GFP_KERNEL
);
2383 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
2384 "Interrupt Statistics Report:\n");
2386 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "HW Error:\t\t\t %u\n",
2388 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "SW Error:\t\t\t %u\n",
2390 if (isr_stats
->sw
|| isr_stats
->hw
) {
2391 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
2392 "\tLast Restarting Code: 0x%X\n",
2393 isr_stats
->err_code
);
2395 #ifdef CONFIG_IWLWIFI_DEBUG
2396 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Frame transmitted:\t\t %u\n",
2398 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Alive interrupt:\t\t %u\n",
2401 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
2402 "HW RF KILL switch toggled:\t %u\n", isr_stats
->rfkill
);
2404 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "CT KILL:\t\t\t %u\n",
2407 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Wakeup Interrupt:\t\t %u\n",
2410 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
2411 "Rx command responses:\t\t %u\n", isr_stats
->rx
);
2413 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Tx/FH interrupt:\t\t %u\n",
2416 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Unexpected INTA:\t\t %u\n",
2417 isr_stats
->unhandled
);
2419 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
2424 static ssize_t
iwl_dbgfs_interrupt_write(struct file
*file
,
2425 const char __user
*user_buf
,
2426 size_t count
, loff_t
*ppos
)
2428 struct iwl_trans
*trans
= file
->private_data
;
2429 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2430 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
2436 memset(buf
, 0, sizeof(buf
));
2437 buf_size
= min(count
, sizeof(buf
) - 1);
2438 if (copy_from_user(buf
, user_buf
, buf_size
))
2440 if (sscanf(buf
, "%x", &reset_flag
) != 1)
2442 if (reset_flag
== 0)
2443 memset(isr_stats
, 0, sizeof(*isr_stats
));
2448 static ssize_t
iwl_dbgfs_csr_write(struct file
*file
,
2449 const char __user
*user_buf
,
2450 size_t count
, loff_t
*ppos
)
2452 struct iwl_trans
*trans
= file
->private_data
;
2457 memset(buf
, 0, sizeof(buf
));
2458 buf_size
= min(count
, sizeof(buf
) - 1);
2459 if (copy_from_user(buf
, user_buf
, buf_size
))
2461 if (sscanf(buf
, "%d", &csr
) != 1)
2464 iwl_pcie_dump_csr(trans
);
2469 static ssize_t
iwl_dbgfs_fh_reg_read(struct file
*file
,
2470 char __user
*user_buf
,
2471 size_t count
, loff_t
*ppos
)
2473 struct iwl_trans
*trans
= file
->private_data
;
2477 ret
= iwl_dump_fh(trans
, &buf
);
2482 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, ret
);
2487 static ssize_t
iwl_dbgfs_rfkill_read(struct file
*file
,
2488 char __user
*user_buf
,
2489 size_t count
, loff_t
*ppos
)
2491 struct iwl_trans
*trans
= file
->private_data
;
2492 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2496 pos
= scnprintf(buf
, sizeof(buf
), "debug: %d\nhw: %d\n",
2497 trans_pcie
->debug_rfkill
,
2498 !(iwl_read32(trans
, CSR_GP_CNTRL
) &
2499 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW
));
2501 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
2504 static ssize_t
iwl_dbgfs_rfkill_write(struct file
*file
,
2505 const char __user
*user_buf
,
2506 size_t count
, loff_t
*ppos
)
2508 struct iwl_trans
*trans
= file
->private_data
;
2509 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2510 bool old
= trans_pcie
->debug_rfkill
;
2513 ret
= kstrtobool_from_user(user_buf
, count
, &trans_pcie
->debug_rfkill
);
2516 if (old
== trans_pcie
->debug_rfkill
)
2518 IWL_WARN(trans
, "changing debug rfkill %d->%d\n",
2519 old
, trans_pcie
->debug_rfkill
);
2520 iwl_pcie_handle_rfkill_irq(trans
);
2525 DEBUGFS_READ_WRITE_FILE_OPS(interrupt
);
2526 DEBUGFS_READ_FILE_OPS(fh_reg
);
2527 DEBUGFS_READ_FILE_OPS(rx_queue
);
2528 DEBUGFS_READ_FILE_OPS(tx_queue
);
2529 DEBUGFS_WRITE_FILE_OPS(csr
);
2530 DEBUGFS_READ_WRITE_FILE_OPS(rfkill
);
2532 /* Create the debugfs files and directories */
2533 int iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
)
2535 struct dentry
*dir
= trans
->dbgfs_dir
;
2537 DEBUGFS_ADD_FILE(rx_queue
, dir
, S_IRUSR
);
2538 DEBUGFS_ADD_FILE(tx_queue
, dir
, S_IRUSR
);
2539 DEBUGFS_ADD_FILE(interrupt
, dir
, S_IWUSR
| S_IRUSR
);
2540 DEBUGFS_ADD_FILE(csr
, dir
, S_IWUSR
);
2541 DEBUGFS_ADD_FILE(fh_reg
, dir
, S_IRUSR
);
2542 DEBUGFS_ADD_FILE(rfkill
, dir
, S_IWUSR
| S_IRUSR
);
2546 IWL_ERR(trans
, "failed to create the trans debugfs entry\n");
2549 #endif /*CONFIG_IWLWIFI_DEBUGFS */
2551 static u32
iwl_trans_pcie_get_cmdlen(struct iwl_trans
*trans
, void *tfd
)
2553 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2557 for (i
= 0; i
< trans_pcie
->max_tbs
; i
++)
2558 cmdlen
+= iwl_pcie_tfd_tb_get_len(trans
, tfd
, i
);
2563 static u32
iwl_trans_pcie_dump_rbs(struct iwl_trans
*trans
,
2564 struct iwl_fw_error_dump_data
**data
,
2565 int allocated_rb_nums
)
2567 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2568 int max_len
= PAGE_SIZE
<< trans_pcie
->rx_page_order
;
2569 /* Dump RBs is supported only for pre-9000 devices (1 queue) */
2570 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[0];
2571 u32 i
, r
, j
, rb_len
= 0;
2573 spin_lock(&rxq
->lock
);
2575 r
= le16_to_cpu(ACCESS_ONCE(rxq
->rb_stts
->closed_rb_num
)) & 0x0FFF;
2577 for (i
= rxq
->read
, j
= 0;
2578 i
!= r
&& j
< allocated_rb_nums
;
2579 i
= (i
+ 1) & RX_QUEUE_MASK
, j
++) {
2580 struct iwl_rx_mem_buffer
*rxb
= rxq
->queue
[i
];
2581 struct iwl_fw_error_dump_rb
*rb
;
2583 dma_unmap_page(trans
->dev
, rxb
->page_dma
, max_len
,
2586 rb_len
+= sizeof(**data
) + sizeof(*rb
) + max_len
;
2588 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_RB
);
2589 (*data
)->len
= cpu_to_le32(sizeof(*rb
) + max_len
);
2590 rb
= (void *)(*data
)->data
;
2591 rb
->index
= cpu_to_le32(i
);
2592 memcpy(rb
->data
, page_address(rxb
->page
), max_len
);
2593 /* remap the page for the free benefit */
2594 rxb
->page_dma
= dma_map_page(trans
->dev
, rxb
->page
, 0,
2598 *data
= iwl_fw_error_next_data(*data
);
2601 spin_unlock(&rxq
->lock
);
2605 #define IWL_CSR_TO_DUMP (0x250)
2607 static u32
iwl_trans_pcie_dump_csr(struct iwl_trans
*trans
,
2608 struct iwl_fw_error_dump_data
**data
)
2610 u32 csr_len
= sizeof(**data
) + IWL_CSR_TO_DUMP
;
2614 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_CSR
);
2615 (*data
)->len
= cpu_to_le32(IWL_CSR_TO_DUMP
);
2616 val
= (void *)(*data
)->data
;
2618 for (i
= 0; i
< IWL_CSR_TO_DUMP
; i
+= 4)
2619 *val
++ = cpu_to_le32(iwl_trans_pcie_read32(trans
, i
));
2621 *data
= iwl_fw_error_next_data(*data
);
2626 static u32
iwl_trans_pcie_fh_regs_dump(struct iwl_trans
*trans
,
2627 struct iwl_fw_error_dump_data
**data
)
2629 u32 fh_regs_len
= FH_MEM_UPPER_BOUND
- FH_MEM_LOWER_BOUND
;
2630 unsigned long flags
;
2634 if (!iwl_trans_grab_nic_access(trans
, &flags
))
2637 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS
);
2638 (*data
)->len
= cpu_to_le32(fh_regs_len
);
2639 val
= (void *)(*data
)->data
;
2641 if (!trans
->cfg
->gen2
)
2642 for (i
= FH_MEM_LOWER_BOUND
; i
< FH_MEM_UPPER_BOUND
;
2644 *val
++ = cpu_to_le32(iwl_trans_pcie_read32(trans
, i
));
2646 for (i
= FH_MEM_LOWER_BOUND_GEN2
; i
< FH_MEM_UPPER_BOUND_GEN2
;
2648 *val
++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans
,
2651 iwl_trans_release_nic_access(trans
, &flags
);
2653 *data
= iwl_fw_error_next_data(*data
);
2655 return sizeof(**data
) + fh_regs_len
;
2659 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans
*trans
,
2660 struct iwl_fw_error_dump_fw_mon
*fw_mon_data
,
2663 u32 buf_size_in_dwords
= (monitor_len
>> 2);
2664 u32
*buffer
= (u32
*)fw_mon_data
->data
;
2665 unsigned long flags
;
2668 if (!iwl_trans_grab_nic_access(trans
, &flags
))
2671 iwl_write_prph_no_grab(trans
, MON_DMARB_RD_CTL_ADDR
, 0x1);
2672 for (i
= 0; i
< buf_size_in_dwords
; i
++)
2673 buffer
[i
] = iwl_read_prph_no_grab(trans
,
2674 MON_DMARB_RD_DATA_ADDR
);
2675 iwl_write_prph_no_grab(trans
, MON_DMARB_RD_CTL_ADDR
, 0x0);
2677 iwl_trans_release_nic_access(trans
, &flags
);
2683 iwl_trans_pcie_dump_monitor(struct iwl_trans
*trans
,
2684 struct iwl_fw_error_dump_data
**data
,
2687 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2690 if ((trans_pcie
->fw_mon_page
&&
2691 trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_7000
) ||
2692 trans
->dbg_dest_tlv
) {
2693 struct iwl_fw_error_dump_fw_mon
*fw_mon_data
;
2694 u32 base
, write_ptr
, wrap_cnt
;
2696 /* If there was a dest TLV - use the values from there */
2697 if (trans
->dbg_dest_tlv
) {
2699 le32_to_cpu(trans
->dbg_dest_tlv
->write_ptr_reg
);
2700 wrap_cnt
= le32_to_cpu(trans
->dbg_dest_tlv
->wrap_count
);
2701 base
= le32_to_cpu(trans
->dbg_dest_tlv
->base_reg
);
2703 base
= MON_BUFF_BASE_ADDR
;
2704 write_ptr
= MON_BUFF_WRPTR
;
2705 wrap_cnt
= MON_BUFF_CYCLE_CNT
;
2708 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR
);
2709 fw_mon_data
= (void *)(*data
)->data
;
2710 fw_mon_data
->fw_mon_wr_ptr
=
2711 cpu_to_le32(iwl_read_prph(trans
, write_ptr
));
2712 fw_mon_data
->fw_mon_cycle_cnt
=
2713 cpu_to_le32(iwl_read_prph(trans
, wrap_cnt
));
2714 fw_mon_data
->fw_mon_base_ptr
=
2715 cpu_to_le32(iwl_read_prph(trans
, base
));
2717 len
+= sizeof(**data
) + sizeof(*fw_mon_data
);
2718 if (trans_pcie
->fw_mon_page
) {
2720 * The firmware is now asserted, it won't write anything
2721 * to the buffer. CPU can take ownership to fetch the
2722 * data. The buffer will be handed back to the device
2723 * before the firmware will be restarted.
2725 dma_sync_single_for_cpu(trans
->dev
,
2726 trans_pcie
->fw_mon_phys
,
2727 trans_pcie
->fw_mon_size
,
2729 memcpy(fw_mon_data
->data
,
2730 page_address(trans_pcie
->fw_mon_page
),
2731 trans_pcie
->fw_mon_size
);
2733 monitor_len
= trans_pcie
->fw_mon_size
;
2734 } else if (trans
->dbg_dest_tlv
->monitor_mode
== SMEM_MODE
) {
2736 * Update pointers to reflect actual values after
2739 base
= iwl_read_prph(trans
, base
) <<
2740 trans
->dbg_dest_tlv
->base_shift
;
2741 iwl_trans_read_mem(trans
, base
, fw_mon_data
->data
,
2742 monitor_len
/ sizeof(u32
));
2743 } else if (trans
->dbg_dest_tlv
->monitor_mode
== MARBH_MODE
) {
2745 iwl_trans_pci_dump_marbh_monitor(trans
,
2749 /* Didn't match anything - output no monitor data */
2754 (*data
)->len
= cpu_to_le32(monitor_len
+ sizeof(*fw_mon_data
));
2760 static struct iwl_trans_dump_data
2761 *iwl_trans_pcie_dump_data(struct iwl_trans
*trans
,
2762 const struct iwl_fw_dbg_trigger_tlv
*trigger
)
2764 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2765 struct iwl_fw_error_dump_data
*data
;
2766 struct iwl_txq
*cmdq
= trans_pcie
->txq
[trans_pcie
->cmd_queue
];
2767 struct iwl_fw_error_dump_txcmd
*txcmd
;
2768 struct iwl_trans_dump_data
*dump_data
;
2772 bool dump_rbs
= test_bit(STATUS_FW_ERROR
, &trans
->status
) &&
2773 !trans
->cfg
->mq_rx_supported
;
2775 /* transport dump header */
2776 len
= sizeof(*dump_data
);
2779 len
+= sizeof(*data
) +
2780 cmdq
->n_window
* (sizeof(*txcmd
) + TFD_MAX_PAYLOAD_SIZE
);
2783 if (trans_pcie
->fw_mon_page
) {
2784 len
+= sizeof(*data
) + sizeof(struct iwl_fw_error_dump_fw_mon
) +
2785 trans_pcie
->fw_mon_size
;
2786 monitor_len
= trans_pcie
->fw_mon_size
;
2787 } else if (trans
->dbg_dest_tlv
) {
2790 base
= le32_to_cpu(trans
->dbg_dest_tlv
->base_reg
);
2791 end
= le32_to_cpu(trans
->dbg_dest_tlv
->end_reg
);
2793 base
= iwl_read_prph(trans
, base
) <<
2794 trans
->dbg_dest_tlv
->base_shift
;
2795 end
= iwl_read_prph(trans
, end
) <<
2796 trans
->dbg_dest_tlv
->end_shift
;
2798 /* Make "end" point to the actual end */
2799 if (trans
->cfg
->device_family
>= IWL_DEVICE_FAMILY_8000
||
2800 trans
->dbg_dest_tlv
->monitor_mode
== MARBH_MODE
)
2801 end
+= (1 << trans
->dbg_dest_tlv
->end_shift
);
2802 monitor_len
= end
- base
;
2803 len
+= sizeof(*data
) + sizeof(struct iwl_fw_error_dump_fw_mon
) +
2809 if (trigger
&& (trigger
->mode
& IWL_FW_DBG_TRIGGER_MONITOR_ONLY
)) {
2810 dump_data
= vzalloc(len
);
2814 data
= (void *)dump_data
->data
;
2815 len
= iwl_trans_pcie_dump_monitor(trans
, &data
, monitor_len
);
2816 dump_data
->len
= len
;
2822 len
+= sizeof(*data
) + IWL_CSR_TO_DUMP
;
2825 if (trans
->cfg
->gen2
)
2826 len
+= sizeof(*data
) +
2827 (FH_MEM_UPPER_BOUND_GEN2
- FH_MEM_LOWER_BOUND_GEN2
);
2829 len
+= sizeof(*data
) +
2830 (FH_MEM_UPPER_BOUND
- FH_MEM_LOWER_BOUND
);
2833 /* Dump RBs is supported only for pre-9000 devices (1 queue) */
2834 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[0];
2836 num_rbs
= le16_to_cpu(ACCESS_ONCE(rxq
->rb_stts
->closed_rb_num
))
2838 num_rbs
= (num_rbs
- rxq
->read
) & RX_QUEUE_MASK
;
2839 len
+= num_rbs
* (sizeof(*data
) +
2840 sizeof(struct iwl_fw_error_dump_rb
) +
2841 (PAGE_SIZE
<< trans_pcie
->rx_page_order
));
2844 /* Paged memory for gen2 HW */
2845 if (trans
->cfg
->gen2
)
2846 for (i
= 0; i
< trans_pcie
->init_dram
.paging_cnt
; i
++)
2847 len
+= sizeof(*data
) +
2848 sizeof(struct iwl_fw_error_dump_paging
) +
2849 trans_pcie
->init_dram
.paging
[i
].size
;
2851 dump_data
= vzalloc(len
);
2856 data
= (void *)dump_data
->data
;
2857 data
->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD
);
2858 txcmd
= (void *)data
->data
;
2859 spin_lock_bh(&cmdq
->lock
);
2860 ptr
= cmdq
->write_ptr
;
2861 for (i
= 0; i
< cmdq
->n_window
; i
++) {
2862 u8 idx
= get_cmd_index(cmdq
, ptr
);
2865 cmdlen
= iwl_trans_pcie_get_cmdlen(trans
, cmdq
->tfds
+
2866 trans_pcie
->tfd_size
* ptr
);
2867 caplen
= min_t(u32
, TFD_MAX_PAYLOAD_SIZE
, cmdlen
);
2870 len
+= sizeof(*txcmd
) + caplen
;
2871 txcmd
->cmdlen
= cpu_to_le32(cmdlen
);
2872 txcmd
->caplen
= cpu_to_le32(caplen
);
2873 memcpy(txcmd
->data
, cmdq
->entries
[idx
].cmd
, caplen
);
2874 txcmd
= (void *)((u8
*)txcmd
->data
+ caplen
);
2877 ptr
= iwl_queue_dec_wrap(ptr
);
2879 spin_unlock_bh(&cmdq
->lock
);
2881 data
->len
= cpu_to_le32(len
);
2882 len
+= sizeof(*data
);
2883 data
= iwl_fw_error_next_data(data
);
2885 len
+= iwl_trans_pcie_dump_csr(trans
, &data
);
2886 len
+= iwl_trans_pcie_fh_regs_dump(trans
, &data
);
2888 len
+= iwl_trans_pcie_dump_rbs(trans
, &data
, num_rbs
);
2890 /* Paged memory for gen2 HW */
2891 if (trans
->cfg
->gen2
) {
2892 for (i
= 0; i
< trans_pcie
->init_dram
.paging_cnt
; i
++) {
2893 struct iwl_fw_error_dump_paging
*paging
;
2895 trans_pcie
->init_dram
.paging
[i
].physical
;
2896 u32 page_len
= trans_pcie
->init_dram
.paging
[i
].size
;
2898 data
->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING
);
2899 data
->len
= cpu_to_le32(sizeof(*paging
) + page_len
);
2900 paging
= (void *)data
->data
;
2901 paging
->index
= cpu_to_le32(i
);
2902 dma_sync_single_for_cpu(trans
->dev
, addr
, page_len
,
2904 memcpy(paging
->data
,
2905 trans_pcie
->init_dram
.paging
[i
].block
, page_len
);
2906 data
= iwl_fw_error_next_data(data
);
2908 len
+= sizeof(*data
) + sizeof(*paging
) + page_len
;
2912 len
+= iwl_trans_pcie_dump_monitor(trans
, &data
, monitor_len
);
2914 dump_data
->len
= len
;
2919 #ifdef CONFIG_PM_SLEEP
2920 static int iwl_trans_pcie_suspend(struct iwl_trans
*trans
)
2922 if (trans
->runtime_pm_mode
== IWL_PLAT_PM_MODE_D0I3
&&
2923 (trans
->system_pm_mode
== IWL_PLAT_PM_MODE_D0I3
))
2924 return iwl_pci_fw_enter_d0i3(trans
);
2929 static void iwl_trans_pcie_resume(struct iwl_trans
*trans
)
2931 if (trans
->runtime_pm_mode
== IWL_PLAT_PM_MODE_D0I3
&&
2932 (trans
->system_pm_mode
== IWL_PLAT_PM_MODE_D0I3
))
2933 iwl_pci_fw_exit_d0i3(trans
);
2935 #endif /* CONFIG_PM_SLEEP */
2937 #define IWL_TRANS_COMMON_OPS \
2938 .op_mode_leave = iwl_trans_pcie_op_mode_leave, \
2939 .write8 = iwl_trans_pcie_write8, \
2940 .write32 = iwl_trans_pcie_write32, \
2941 .read32 = iwl_trans_pcie_read32, \
2942 .read_prph = iwl_trans_pcie_read_prph, \
2943 .write_prph = iwl_trans_pcie_write_prph, \
2944 .read_mem = iwl_trans_pcie_read_mem, \
2945 .write_mem = iwl_trans_pcie_write_mem, \
2946 .configure = iwl_trans_pcie_configure, \
2947 .set_pmi = iwl_trans_pcie_set_pmi, \
2948 .grab_nic_access = iwl_trans_pcie_grab_nic_access, \
2949 .release_nic_access = iwl_trans_pcie_release_nic_access, \
2950 .set_bits_mask = iwl_trans_pcie_set_bits_mask, \
2951 .ref = iwl_trans_pcie_ref, \
2952 .unref = iwl_trans_pcie_unref, \
2953 .dump_data = iwl_trans_pcie_dump_data, \
2954 .d3_suspend = iwl_trans_pcie_d3_suspend, \
2955 .d3_resume = iwl_trans_pcie_d3_resume
2957 #ifdef CONFIG_PM_SLEEP
2958 #define IWL_TRANS_PM_OPS \
2959 .suspend = iwl_trans_pcie_suspend, \
2960 .resume = iwl_trans_pcie_resume,
2962 #define IWL_TRANS_PM_OPS
2963 #endif /* CONFIG_PM_SLEEP */
2965 static const struct iwl_trans_ops trans_ops_pcie
= {
2966 IWL_TRANS_COMMON_OPS
,
2968 .start_hw
= iwl_trans_pcie_start_hw
,
2969 .fw_alive
= iwl_trans_pcie_fw_alive
,
2970 .start_fw
= iwl_trans_pcie_start_fw
,
2971 .stop_device
= iwl_trans_pcie_stop_device
,
2973 .send_cmd
= iwl_trans_pcie_send_hcmd
,
2975 .tx
= iwl_trans_pcie_tx
,
2976 .reclaim
= iwl_trans_pcie_reclaim
,
2978 .txq_disable
= iwl_trans_pcie_txq_disable
,
2979 .txq_enable
= iwl_trans_pcie_txq_enable
,
2981 .txq_set_shared_mode
= iwl_trans_pcie_txq_set_shared_mode
,
2983 .wait_tx_queues_empty
= iwl_trans_pcie_wait_txqs_empty
,
2985 .freeze_txq_timer
= iwl_trans_pcie_freeze_txq_timer
,
2986 .block_txq_ptrs
= iwl_trans_pcie_block_txq_ptrs
,
2989 static const struct iwl_trans_ops trans_ops_pcie_gen2
= {
2990 IWL_TRANS_COMMON_OPS
,
2992 .start_hw
= iwl_trans_pcie_start_hw
,
2993 .fw_alive
= iwl_trans_pcie_gen2_fw_alive
,
2994 .start_fw
= iwl_trans_pcie_gen2_start_fw
,
2995 .stop_device
= iwl_trans_pcie_gen2_stop_device
,
2997 .send_cmd
= iwl_trans_pcie_gen2_send_hcmd
,
2999 .tx
= iwl_trans_pcie_gen2_tx
,
3000 .reclaim
= iwl_trans_pcie_reclaim
,
3002 .txq_alloc
= iwl_trans_pcie_dyn_txq_alloc
,
3003 .txq_free
= iwl_trans_pcie_dyn_txq_free
,
3004 .wait_txq_empty
= iwl_trans_pcie_wait_txq_empty
,
3007 struct iwl_trans
*iwl_trans_pcie_alloc(struct pci_dev
*pdev
,
3008 const struct pci_device_id
*ent
,
3009 const struct iwl_cfg
*cfg
)
3011 struct iwl_trans_pcie
*trans_pcie
;
3012 struct iwl_trans
*trans
;
3015 ret
= pcim_enable_device(pdev
);
3017 return ERR_PTR(ret
);
3020 trans
= iwl_trans_alloc(sizeof(struct iwl_trans_pcie
),
3021 &pdev
->dev
, cfg
, &trans_ops_pcie_gen2
);
3023 trans
= iwl_trans_alloc(sizeof(struct iwl_trans_pcie
),
3024 &pdev
->dev
, cfg
, &trans_ops_pcie
);
3026 return ERR_PTR(-ENOMEM
);
3028 trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
3030 trans_pcie
->trans
= trans
;
3031 trans_pcie
->opmode_down
= true;
3032 spin_lock_init(&trans_pcie
->irq_lock
);
3033 spin_lock_init(&trans_pcie
->reg_lock
);
3034 mutex_init(&trans_pcie
->mutex
);
3035 init_waitqueue_head(&trans_pcie
->ucode_write_waitq
);
3036 trans_pcie
->tso_hdr_page
= alloc_percpu(struct iwl_tso_hdr_page
);
3037 if (!trans_pcie
->tso_hdr_page
) {
3043 if (!cfg
->base_params
->pcie_l1_allowed
) {
3045 * W/A - seems to solve weird behavior. We need to remove this
3046 * if we don't want to stay in L1 all the time. This wastes a
3049 pci_disable_link_state(pdev
, PCIE_LINK_STATE_L0S
|
3050 PCIE_LINK_STATE_L1
|
3051 PCIE_LINK_STATE_CLKPM
);
3056 trans_pcie
->max_tbs
= IWL_TFH_NUM_TBS
;
3057 trans_pcie
->tfd_size
= sizeof(struct iwl_tfh_tfd
);
3060 trans_pcie
->max_tbs
= IWL_NUM_OF_TBS
;
3061 trans_pcie
->tfd_size
= sizeof(struct iwl_tfd
);
3063 trans
->max_skb_frags
= IWL_PCIE_MAX_FRAGS(trans_pcie
);
3065 pci_set_master(pdev
);
3067 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(addr_size
));
3069 ret
= pci_set_consistent_dma_mask(pdev
,
3070 DMA_BIT_MASK(addr_size
));
3072 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
3074 ret
= pci_set_consistent_dma_mask(pdev
,
3076 /* both attempts failed: */
3078 dev_err(&pdev
->dev
, "No suitable DMA available\n");
3083 ret
= pcim_iomap_regions_request_all(pdev
, BIT(0), DRV_NAME
);
3085 dev_err(&pdev
->dev
, "pcim_iomap_regions_request_all failed\n");
3089 trans_pcie
->hw_base
= pcim_iomap_table(pdev
)[0];
3090 if (!trans_pcie
->hw_base
) {
3091 dev_err(&pdev
->dev
, "pcim_iomap_table failed\n");
3096 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3097 * PCI Tx retries from interfering with C3 CPU state */
3098 pci_write_config_byte(pdev
, PCI_CFG_RETRY_TIMEOUT
, 0x00);
3100 trans_pcie
->pci_dev
= pdev
;
3101 iwl_disable_interrupts(trans
);
3103 trans
->hw_rev
= iwl_read32(trans
, CSR_HW_REV
);
3105 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
3106 * changed, and now the revision step also includes bit 0-1 (no more
3107 * "dash" value). To keep hw_rev backwards compatible - we'll store it
3108 * in the old format.
3110 if (trans
->cfg
->device_family
>= IWL_DEVICE_FAMILY_8000
) {
3111 unsigned long flags
;
3113 trans
->hw_rev
= (trans
->hw_rev
& 0xfff0) |
3114 (CSR_HW_REV_STEP(trans
->hw_rev
<< 2) << 2);
3116 ret
= iwl_pcie_prepare_card_hw(trans
);
3118 IWL_WARN(trans
, "Exit HW not ready\n");
3123 * in-order to recognize C step driver should read chip version
3124 * id located at the AUX bus MISC address space.
3126 iwl_set_bit(trans
, CSR_GP_CNTRL
,
3127 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
3130 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
3131 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
3132 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
3135 IWL_DEBUG_INFO(trans
, "Failed to wake up the nic\n");
3139 if (iwl_trans_grab_nic_access(trans
, &flags
)) {
3142 hw_step
= iwl_read_prph_no_grab(trans
, WFPM_CTRL_REG
);
3143 hw_step
|= ENABLE_WFPM
;
3144 iwl_write_prph_no_grab(trans
, WFPM_CTRL_REG
, hw_step
);
3145 hw_step
= iwl_read_prph_no_grab(trans
, AUX_MISC_REG
);
3146 hw_step
= (hw_step
>> HW_STEP_LOCATION_BITS
) & 0xF;
3148 trans
->hw_rev
= (trans
->hw_rev
& 0xFFFFFFF3) |
3149 (SILICON_C_STEP
<< 2);
3150 iwl_trans_release_nic_access(trans
, &flags
);
3154 trans
->hw_rf_id
= iwl_read32(trans
, CSR_HW_RF_ID
);
3156 iwl_pcie_set_interrupt_capa(pdev
, trans
);
3157 trans
->hw_id
= (pdev
->device
<< 16) + pdev
->subsystem_device
;
3158 snprintf(trans
->hw_id_str
, sizeof(trans
->hw_id_str
),
3159 "PCI ID: 0x%04X:0x%04X", pdev
->device
, pdev
->subsystem_device
);
3161 /* Initialize the wait queue for commands */
3162 init_waitqueue_head(&trans_pcie
->wait_command_queue
);
3164 init_waitqueue_head(&trans_pcie
->d0i3_waitq
);
3166 if (trans_pcie
->msix_enabled
) {
3167 if (iwl_pcie_init_msix_handler(pdev
, trans_pcie
))
3170 ret
= iwl_pcie_alloc_ict(trans
);
3174 ret
= devm_request_threaded_irq(&pdev
->dev
, pdev
->irq
,
3176 iwl_pcie_irq_handler
,
3177 IRQF_SHARED
, DRV_NAME
, trans
);
3179 IWL_ERR(trans
, "Error allocating IRQ %d\n", pdev
->irq
);
3182 trans_pcie
->inta_mask
= CSR_INI_SET_MASK
;
3185 #ifdef CONFIG_IWLWIFI_PCIE_RTPM
3186 trans
->runtime_pm_mode
= IWL_PLAT_PM_MODE_D0I3
;
3188 trans
->runtime_pm_mode
= IWL_PLAT_PM_MODE_DISABLED
;
3189 #endif /* CONFIG_IWLWIFI_PCIE_RTPM */
3194 iwl_pcie_free_ict(trans
);
3196 free_percpu(trans_pcie
->tso_hdr_page
);
3197 iwl_trans_free(trans
);
3198 return ERR_PTR(ret
);