2 * DHD Bus Module for PCIE
4 * Copyright (C) 1999-2019, Broadcom.
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
25 * <<Broadcom-WL-IPTag/Open:>>
27 * $Id: dhd_pcie.c 802450 2019-02-01 14:05:56Z $
38 #include <hnd_debug.h>
40 #include <hnd_armtrap.h>
41 #if defined(DHD_DEBUG)
43 #endif /* defined(DHD_DEBUG) */
44 #include <dngl_stats.h>
45 #include <pcie_core.h>
48 #include <dhd_flowring.h>
49 #include <dhd_proto.h>
51 #include <dhd_debug.h>
52 #include <dhd_daemon.h>
55 #include <bcmmsgbuf.h>
59 #include <bcmendian.h>
60 #ifdef DHDTCPACK_SUPPRESS
62 #endif /* DHDTCPACK_SUPPRESS */
65 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
66 #include <linux/pm_runtime.h>
67 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
69 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
71 #endif /* DEBUGGER || DHD_DSCOPE */
73 #define EXTENDED_PCIE_DEBUG_DUMP 1 /* Enable Extended pcie registers dump */
75 #define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
76 #define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */
78 #define ARMCR4REG_BANKIDX (0x40/sizeof(uint32))
79 #define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32))
80 /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
82 /* CTO Prevention Recovery */
84 #define CTO_TO_CLEAR_WAIT_MS 10000
85 #define CTO_TO_CLEAR_WAIT_MAX_CNT 100
87 #define CTO_TO_CLEAR_WAIT_MS 1000
88 #define CTO_TO_CLEAR_WAIT_MAX_CNT 10
91 /* Fetch address of a member in the pciedev_shared structure in dongle memory */
92 #define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member) \
93 (bus)->shared_addr + OFFSETOF(pciedev_shared_t, member)
95 /* Fetch address of a member in rings_info_ptr structure in dongle memory */
96 #define DHD_RING_INFO_MEMBER_ADDR(bus, member) \
97 (bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member)
99 /* Fetch address of a member in the ring_mem structure in dongle memory */
100 #define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member) \
101 (bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member)
103 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
104 extern unsigned int system_rev
;
105 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
107 /* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */
108 uint dma_ring_indices
= 0;
109 /* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */
111 /* This can be overwritten by module parameter(force_trap_bad_h2d_phase)
112 * defined in dhd_linux.c
114 bool force_trap_bad_h2d_phase
= 0;
116 int dhd_dongle_memsize
;
117 int dhd_dongle_ramsize
;
118 struct dhd_bus
*g_dhd_bus
= NULL
;
119 static int dhdpcie_checkdied(dhd_bus_t
*bus
, char *data
, uint size
);
120 static int dhdpcie_bus_readconsole(dhd_bus_t
*bus
);
121 #if defined(DHD_FW_COREDUMP)
122 static int dhdpcie_mem_dump(dhd_bus_t
*bus
);
123 #endif /* DHD_FW_COREDUMP */
125 static int dhdpcie_bus_membytes(dhd_bus_t
*bus
, bool write
, ulong address
, uint8
*data
, uint size
);
126 static int dhdpcie_bus_doiovar(dhd_bus_t
*bus
, const bcm_iovar_t
*vi
, uint32 actionid
,
127 const char *name
, void *params
,
128 int plen
, void *arg
, int len
, int val_size
);
129 static int dhdpcie_bus_lpback_req(struct dhd_bus
*bus
, uint32 intval
);
130 static int dhdpcie_bus_dmaxfer_req(struct dhd_bus
*bus
,
131 uint32 len
, uint32 srcdelay
, uint32 destdelay
,
132 uint32 d11_lpbk
, uint32 core_num
, uint32 wait
);
133 static int dhdpcie_bus_download_state(dhd_bus_t
*bus
, bool enter
);
134 static int _dhdpcie_download_firmware(struct dhd_bus
*bus
);
135 static int dhdpcie_download_firmware(dhd_bus_t
*bus
, osl_t
*osh
);
136 static int dhdpcie_bus_write_vars(dhd_bus_t
*bus
);
137 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t
*bus
, uint32 intstatus
);
138 static bool dhdpci_bus_read_frames(dhd_bus_t
*bus
);
139 static int dhdpcie_readshared(dhd_bus_t
*bus
);
140 static void dhdpcie_init_shared_addr(dhd_bus_t
*bus
);
141 static bool dhdpcie_dongle_attach(dhd_bus_t
*bus
);
142 static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t
*bus
, int mem_size
);
143 static void dhdpcie_bus_release_dongle(dhd_bus_t
*bus
, osl_t
*osh
,
144 bool dongle_isolation
, bool reset_flag
);
145 static void dhdpcie_bus_release_malloc(dhd_bus_t
*bus
, osl_t
*osh
);
146 static int dhdpcie_downloadvars(dhd_bus_t
*bus
, void *arg
, int len
);
147 static uint8
dhdpcie_bus_rtcm8(dhd_bus_t
*bus
, ulong offset
);
148 static void dhdpcie_bus_wtcm8(dhd_bus_t
*bus
, ulong offset
, uint8 data
);
149 static void dhdpcie_bus_wtcm16(dhd_bus_t
*bus
, ulong offset
, uint16 data
);
150 static uint16
dhdpcie_bus_rtcm16(dhd_bus_t
*bus
, ulong offset
);
151 static void dhdpcie_bus_wtcm32(dhd_bus_t
*bus
, ulong offset
, uint32 data
);
152 static uint32
dhdpcie_bus_rtcm32(dhd_bus_t
*bus
, ulong offset
);
153 #ifdef DHD_SUPPORT_64BIT
154 static void dhdpcie_bus_wtcm64(dhd_bus_t
*bus
, ulong offset
, uint64 data
) __attribute__ ((used
));
155 static uint64
dhdpcie_bus_rtcm64(dhd_bus_t
*bus
, ulong offset
) __attribute__ ((used
));
156 #endif /* DHD_SUPPORT_64BIT */
157 static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t
*bus
, uint32 data
);
158 static void dhdpcie_bus_reg_unmap(osl_t
*osh
, volatile char *addr
, int size
);
159 static int dhdpcie_cc_nvmshadow(dhd_bus_t
*bus
, struct bcmstrbuf
*b
);
160 static void dhdpcie_fw_trap(dhd_bus_t
*bus
);
161 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t
*bus
, ring_info_t
*ring_info
);
162 static void dhdpcie_handle_mb_data(dhd_bus_t
*bus
);
163 extern void dhd_dpc_enable(dhd_pub_t
*dhdp
);
164 extern void dhd_dpc_kill(dhd_pub_t
*dhdp
);
166 #ifdef IDLE_TX_FLOW_MGMT
167 static void dhd_bus_check_idle_scan(dhd_bus_t
*bus
);
168 static void dhd_bus_idle_scan(dhd_bus_t
*bus
);
169 #endif /* IDLE_TX_FLOW_MGMT */
171 #ifdef EXYNOS_PCIE_DEBUG
172 extern void exynos_pcie_register_dump(int ch_num
);
173 #endif /* EXYNOS_PCIE_DEBUG */
175 #define PCI_VENDOR_ID_BROADCOM 0x14e4
177 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
178 #define MAX_D3_ACK_TIMEOUT 100
179 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
181 #define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */
182 static bool dhdpcie_check_firmware_compatible(uint32 f_api_version
, uint32 h_api_version
);
183 static void dhdpcie_cto_error_recovery(struct dhd_bus
*bus
);
185 static int dhdpcie_init_d11status(struct dhd_bus
*bus
);
187 static int dhdpcie_wrt_rnd(struct dhd_bus
*bus
);
189 extern uint16
dhd_prot_get_h2d_max_txpost(dhd_pub_t
*dhd
);
190 extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t
*dhd
, uint16 max_txpost
);
192 static int dhdpcie_wrt_host_whitelist_region(struct dhd_bus
*bus
);
195 static int dhdpcie_sssr_dump(dhd_pub_t
*dhd
);
196 #endif /* DHD_SSSR_DUMP */
202 IOV_SET_DOWNLOAD_STATE
,
214 IOV_LTRSLEEPON_UNLOOAD
,
220 IOV_DUMP_RINGUPD_BLOCK
,
225 #ifdef DHD_PCIE_RUNTIMEPM
227 #endif /* DHD_PCIE_RUNTIMEPM */
234 IOV_H2D_ENABLE_TRAP_BADPHASE
,
235 IOV_H2D_TXPOST_MAX_ITEM
,
245 IOV_DNGL_CAPS
, /**< returns string with dongle capabilities */
246 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
247 IOV_GDB_SERVER
, /**< starts gdb server on given interface */
248 #endif /* DEBUGGER || DHD_DSCOPE */
252 IOV_MINIDUMP_OVERRIDE
,
253 #endif /* D2H_MINIDUMP */
254 IOV_PCIE_LAST
/**< unused IOVAR */
257 const bcm_iovar_t dhdpcie_iovars
[] = {
258 {"intr", IOV_INTR
, 0, 0, IOVT_BOOL
, 0 },
259 {"memsize", IOV_MEMSIZE
, 0, 0, IOVT_UINT32
, 0 },
260 {"dwnldstate", IOV_SET_DOWNLOAD_STATE
, 0, 0, IOVT_BOOL
, 0 },
261 {"vars", IOV_VARS
, 0, 0, IOVT_BUFFER
, 0 },
262 {"devreset", IOV_DEVRESET
, 0, 0, IOVT_UINT8
, 0 },
263 {"pcie_device_trap", IOV_FORCE_FW_TRAP
, 0, 0, 0, 0 },
264 {"pcie_lpbk", IOV_PCIE_LPBK
, 0, 0, IOVT_UINT32
, 0 },
265 {"cc_nvmshadow", IOV_CC_NVMSHADOW
, 0, 0, IOVT_BUFFER
, 0 },
266 {"ramsize", IOV_RAMSIZE
, 0, 0, IOVT_UINT32
, 0 },
267 {"ramstart", IOV_RAMSTART
, 0, 0, IOVT_UINT32
, 0 },
268 {"pcie_dmaxfer", IOV_PCIE_DMAXFER
, 0, 0, IOVT_BUFFER
, 3 * sizeof(int32
) },
269 {"pcie_suspend", IOV_PCIE_SUSPEND
, 0, 0, IOVT_UINT32
, 0 },
270 {"sleep_allowed", IOV_SLEEP_ALLOWED
, 0, 0, IOVT_BOOL
, 0 },
271 {"dngl_isolation", IOV_DONGLEISOLATION
, 0, 0, IOVT_UINT32
, 0 },
272 {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD
, 0, 0, IOVT_UINT32
, 0 },
273 {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK
, 0, 0, IOVT_BUFFER
, 0 },
274 {"dma_ring_indices", IOV_DMA_RINGINDICES
, 0, 0, IOVT_UINT32
, 0},
275 {"metadata_dbg", IOV_METADATA_DBG
, 0, 0, IOVT_BOOL
, 0 },
276 {"rx_metadata_len", IOV_RX_METADATALEN
, 0, 0, IOVT_UINT32
, 0 },
277 {"tx_metadata_len", IOV_TX_METADATALEN
, 0, 0, IOVT_UINT32
, 0 },
278 {"db1_for_mb", IOV_DB1_FOR_MB
, 0, 0, IOVT_UINT32
, 0 },
279 {"txp_thresh", IOV_TXP_THRESHOLD
, 0, 0, IOVT_UINT32
, 0 },
280 {"buzzz_dump", IOV_BUZZZ_DUMP
, 0, 0, IOVT_UINT32
, 0 },
281 {"flow_prio_map", IOV_FLOW_PRIO_MAP
, 0, 0, IOVT_UINT32
, 0 },
282 #ifdef DHD_PCIE_RUNTIMEPM
283 {"idletime", IOV_IDLETIME
, 0, 0, IOVT_INT32
, 0 },
284 #endif /* DHD_PCIE_RUNTIMEPM */
285 {"rxbound", IOV_RXBOUND
, 0, 0, IOVT_UINT32
, 0 },
286 {"txbound", IOV_TXBOUND
, 0, 0, IOVT_UINT32
, 0 },
287 {"fw_hang_report", IOV_HANGREPORT
, 0, 0, IOVT_BOOL
, 0 },
288 {"h2d_mb_data", IOV_H2D_MAILBOXDATA
, 0, 0, IOVT_UINT32
, 0 },
289 {"inforings", IOV_INFORINGS
, 0, 0, IOVT_UINT32
, 0 },
290 {"h2d_phase", IOV_H2D_PHASE
, 0, 0, IOVT_UINT32
, 0 },
291 {"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE
, 0, 0,
293 {"h2d_max_txpost", IOV_H2D_TXPOST_MAX_ITEM
, 0, 0, IOVT_UINT32
, 0 },
294 {"trap_data", IOV_TRAPDATA
, 0, 0, IOVT_BUFFER
, 0 },
295 {"trap_data_raw", IOV_TRAPDATA_RAW
, 0, 0, IOVT_BUFFER
, 0 },
296 {"cto_prevention", IOV_CTO_PREVENTION
, 0, 0, IOVT_UINT32
, 0 },
297 {"pcie_wd_reset", IOV_PCIE_WD_RESET
, 0, 0, IOVT_BOOL
, 0 },
298 {"dump_dongle", IOV_DUMP_DONGLE
, 0, 0, IOVT_BUFFER
,
299 MAX(sizeof(dump_dongle_in_t
), sizeof(dump_dongle_out_t
))},
300 {"clear_ring", IOV_CLEAR_RING
, 0, 0, IOVT_UINT32
, 0 },
301 {"idma_enable", IOV_IDMA_ENABLE
, 0, 0, IOVT_UINT32
, 0 },
302 {"ifrm_enable", IOV_IFRM_ENABLE
, 0, 0, IOVT_UINT32
, 0 },
303 {"dar_enable", IOV_DAR_ENABLE
, 0, 0, IOVT_UINT32
, 0 },
304 {"cap", IOV_DNGL_CAPS
, 0, 0, IOVT_BUFFER
, 0},
305 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
306 {"gdb_server", IOV_GDB_SERVER
, 0, 0, IOVT_UINT32
, 0 },
307 #endif /* DEBUGGER || DHD_DSCOPE */
308 {"inb_dw_enable", IOV_INB_DW_ENABLE
, 0, 0, IOVT_UINT32
, 0 },
309 {"cto_threshold", IOV_CTO_THRESHOLD
, 0, 0, IOVT_UINT32
, 0 },
311 {"minidump_override", IOV_MINIDUMP_OVERRIDE
, 0, 0, IOVT_UINT32
, 0 },
312 #endif /* D2H_MINIDUMP */
313 {NULL
, 0, 0, 0, 0, 0 }
316 #define MAX_READ_TIMEOUT 5 * 1000 * 1000
319 #define DHD_RXBOUND 64
322 #define DHD_TXBOUND 64
325 #define DHD_INFORING_BOUND 32
326 #define DHD_BTLOGRING_BOUND 32
328 uint dhd_rxbound
= DHD_RXBOUND
;
329 uint dhd_txbound
= DHD_TXBOUND
;
331 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
332 /** the GDB debugger layer will call back into this (bus) layer to read/write dongle memory */
333 static struct dhd_gdb_bus_ops_s bus_ops
= {
334 .read_u16
= dhdpcie_bus_rtcm16
,
335 .read_u32
= dhdpcie_bus_rtcm32
,
336 .write_u32
= dhdpcie_bus_wtcm32
,
338 #endif /* DEBUGGER || DHD_DSCOPE */
341 dhd_bus_get_flr_force_fail(struct dhd_bus
*bus
)
343 return bus
->flr_force_fail
;
347 * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to
348 * link with the bus driver, in order to look for or await the device.
351 dhd_bus_register(void)
353 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
355 return dhdpcie_bus_register();
359 dhd_bus_unregister(void)
361 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
363 dhdpcie_bus_unregister();
367 /** returns a host virtual address */
369 dhdpcie_bus_reg_map(osl_t
*osh
, ulong addr
, int size
)
371 return (uint32
*)REG_MAP(addr
, size
);
375 dhdpcie_bus_reg_unmap(osl_t
*osh
, volatile char *addr
, int size
)
382 * retrun H2D Doorbell registers address
383 * use DAR registers instead of enum register for corerev >= 23 (4347B0)
386 dhd_bus_db0_addr_get(struct dhd_bus
*bus
)
388 uint addr
= PCIH2D_MailBox
;
389 uint dar_addr
= DAR_PCIH2D_DB0_0(bus
->sih
->buscorerev
);
391 return ((DAR_ACTIVE(bus
->dhd
)) ? dar_addr
: addr
);
395 dhd_bus_db0_addr_2_get(struct dhd_bus
*bus
)
397 return ((DAR_ACTIVE(bus
->dhd
)) ? DAR_PCIH2D_DB2_0(bus
->sih
->buscorerev
) : PCIH2D_MailBox_2
);
401 dhd_bus_db1_addr_get(struct dhd_bus
*bus
)
403 return ((DAR_ACTIVE(bus
->dhd
)) ? DAR_PCIH2D_DB0_1(bus
->sih
->buscorerev
) : PCIH2D_DB1
);
407 dhd_bus_db1_addr_1_get(struct dhd_bus
*bus
)
409 return ((DAR_ACTIVE(bus
->dhd
)) ? DAR_PCIH2D_DB1_1(bus
->sih
->buscorerev
) : PCIH2D_DB1_1
);
413 _dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus
*bus
)
418 * If multiple de-asserts, decrement ref and return
419 * Clear power request when only one pending
420 * so initial request is not removed unexpectedly
422 if (bus
->pwr_req_ref
> 1) {
427 ASSERT(bus
->pwr_req_ref
== 1);
429 if (MULTIBP_ENAB(bus
->sih
)) {
430 /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
431 mask
= SRPWR_DMN1_ARMBPSD_MASK
;
433 mask
= SRPWR_DMN0_PCIE_MASK
| SRPWR_DMN1_ARMBPSD_MASK
;
436 si_srpwr_request(bus
->sih
, mask
, 0);
437 bus
->pwr_req_ref
= 0;
441 dhd_bus_pcie_pwr_req_clear(struct dhd_bus
*bus
)
443 unsigned long flags
= 0;
445 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
446 _dhd_bus_pcie_pwr_req_clear_cmn(bus
);
447 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
451 dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus
*bus
)
453 _dhd_bus_pcie_pwr_req_clear_cmn(bus
);
457 _dhd_bus_pcie_pwr_req_cmn(struct dhd_bus
*bus
)
461 /* If multiple request entries, increment reference and return */
462 if (bus
->pwr_req_ref
> 0) {
467 ASSERT(bus
->pwr_req_ref
== 0);
469 if (MULTIBP_ENAB(bus
->sih
)) {
470 /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
471 mask
= SRPWR_DMN1_ARMBPSD_MASK
;
472 val
= SRPWR_DMN1_ARMBPSD_MASK
;
474 mask
= SRPWR_DMN0_PCIE_MASK
| SRPWR_DMN1_ARMBPSD_MASK
;
475 val
= SRPWR_DMN0_PCIE_MASK
| SRPWR_DMN1_ARMBPSD_MASK
;
478 si_srpwr_request(bus
->sih
, mask
, val
);
480 bus
->pwr_req_ref
= 1;
484 dhd_bus_pcie_pwr_req(struct dhd_bus
*bus
)
486 unsigned long flags
= 0;
488 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
489 _dhd_bus_pcie_pwr_req_cmn(bus
);
490 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
494 _dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus
*bus
)
498 mask
= SRPWR_DMN_ALL_MASK
;
499 val
= SRPWR_DMN_ALL_MASK
;
501 si_srpwr_request(bus
->sih
, mask
, val
);
505 dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus
*bus
)
507 unsigned long flags
= 0;
509 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
510 _dhd_bus_pcie_pwr_req_pd0123_cmn(bus
);
511 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
515 _dhd_bus_pcie_pwr_req_clear_pd23_cmn(struct dhd_bus
*bus
)
519 mask
= SRPWR_DMN3_MACMAIN_MASK
| SRPWR_DMN2_MACAUX_MASK
;
521 si_srpwr_request(bus
->sih
, mask
, 0);
525 dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus
*bus
)
527 unsigned long flags
= 0;
529 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
530 _dhd_bus_pcie_pwr_req_clear_pd23_cmn(bus
);
531 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
535 dhd_bus_pcie_pwr_req_nolock(struct dhd_bus
*bus
)
537 _dhd_bus_pcie_pwr_req_cmn(bus
);
541 dhdpcie_chip_support_msi(dhd_bus_t
*bus
)
543 DHD_ERROR(("%s: buscorerev=%d chipid=0x%x\n",
544 __FUNCTION__
, bus
->sih
->buscorerev
, si_chipid(bus
->sih
)));
545 if (bus
->sih
->buscorerev
<= 14 ||
546 si_chipid(bus
->sih
) == BCM4375_CHIP_ID
||
547 si_chipid(bus
->sih
) == BCM4361_CHIP_ID
||
548 si_chipid(bus
->sih
) == BCM4359_CHIP_ID
) {
556 * Called once for each hardware (dongle) instance that this DHD manages.
558 * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
559 * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
560 * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
562 * 'tcm' is the *host* virtual address at which tcm is mapped.
564 int dhdpcie_bus_attach(osl_t
*osh
, dhd_bus_t
**bus_ptr
,
565 volatile char *regs
, volatile char *tcm
, void *pci_dev
)
567 dhd_bus_t
*bus
= NULL
;
570 DHD_TRACE(("%s: ENTER\n", __FUNCTION__
));
573 if (!(bus
= MALLOCZ(osh
, sizeof(dhd_bus_t
)))) {
574 DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__
));
575 ret
= BCME_NORESOURCE
;
582 /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
583 bus
->dev
= (struct pci_dev
*)pci_dev
;
585 dll_init(&bus
->flowring_active_list
);
586 #ifdef IDLE_TX_FLOW_MGMT
587 bus
->active_list_last_process_ts
= OSL_SYSUPTIME();
588 #endif /* IDLE_TX_FLOW_MGMT */
590 /* Attach pcie shared structure */
591 if (!(bus
->pcie_sh
= MALLOCZ(osh
, sizeof(pciedev_shared_t
)))) {
592 DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__
));
593 ret
= BCME_NORESOURCE
;
597 /* dhd_common_init(osh); */
599 if (dhdpcie_dongle_attach(bus
)) {
600 DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__
));
605 /* software resources */
606 if (!(bus
->dhd
= dhd_attach(osh
, bus
, PCMSGBUF_HDRLEN
))) {
607 DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__
));
608 ret
= BCME_NORESOURCE
;
611 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
612 bus
->db1_for_mb
= TRUE
;
613 bus
->dhd
->hang_report
= TRUE
;
614 bus
->use_mailbox
= FALSE
;
615 bus
->use_d0_inform
= FALSE
;
616 bus
->intr_enabled
= FALSE
;
617 bus
->flr_force_fail
= FALSE
;
618 /* update the dma indices if set through module parameter. */
619 if (dma_ring_indices
!= 0) {
620 dhdpcie_set_dma_ring_indices(bus
->dhd
, dma_ring_indices
);
622 /* update h2d phase support if set through module parameter */
623 bus
->dhd
->h2d_phase_supported
= h2d_phase
? TRUE
: FALSE
;
624 /* update force trap on bad phase if set through module parameter */
625 bus
->dhd
->force_dongletrap_on_bad_h2d_phase
=
626 force_trap_bad_h2d_phase
? TRUE
: FALSE
;
627 #ifdef IDLE_TX_FLOW_MGMT
628 bus
->enable_idle_flowring_mgmt
= FALSE
;
629 #endif /* IDLE_TX_FLOW_MGMT */
630 bus
->irq_registered
= FALSE
;
632 #ifdef DHD_MSI_SUPPORT
633 bus
->d2h_intr_method
= enable_msi
&& dhdpcie_chip_support_msi(bus
) ?
634 PCIE_MSI
: PCIE_INTX
;
636 bus
->d2h_intr_method
= PCIE_INTX
;
637 #endif /* DHD_MSI_SUPPORT */
639 DHD_TRACE(("%s: EXIT SUCCESS\n",
646 DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__
));
648 if (bus
&& bus
->pcie_sh
) {
649 MFREE(osh
, bus
->pcie_sh
, sizeof(pciedev_shared_t
));
653 MFREE(osh
, bus
, sizeof(dhd_bus_t
));
660 dhd_bus_skip_clm(dhd_pub_t
*dhdp
)
662 switch (dhd_bus_chip_id(dhdp
)) {
663 case BCM4369_CHIP_ID
:
671 dhd_bus_chip(struct dhd_bus
*bus
)
673 ASSERT(bus
->sih
!= NULL
);
674 return bus
->sih
->chip
;
678 dhd_bus_chiprev(struct dhd_bus
*bus
)
681 ASSERT(bus
->sih
!= NULL
);
682 return bus
->sih
->chiprev
;
686 dhd_bus_pub(struct dhd_bus
*bus
)
692 dhd_bus_sih(struct dhd_bus
*bus
)
694 return (void *)bus
->sih
;
698 dhd_bus_txq(struct dhd_bus
*bus
)
703 /** Get Chip ID version */
704 uint
dhd_bus_chip_id(dhd_pub_t
*dhdp
)
706 dhd_bus_t
*bus
= dhdp
->bus
;
707 return bus
->sih
->chip
;
710 /** Get Chip Rev ID version */
711 uint
dhd_bus_chiprev_id(dhd_pub_t
*dhdp
)
713 dhd_bus_t
*bus
= dhdp
->bus
;
714 return bus
->sih
->chiprev
;
717 /** Get Chip Pkg ID version */
718 uint
dhd_bus_chippkg_id(dhd_pub_t
*dhdp
)
720 dhd_bus_t
*bus
= dhdp
->bus
;
721 return bus
->sih
->chippkg
;
724 /* Log the lastest DPC schedule time */
726 dhd_bus_set_dpc_sched_time(dhd_pub_t
*dhdp
)
728 dhdp
->bus
->dpc_sched_time
= OSL_LOCALTIME_NS();
731 /* Check if there is DPC scheduling errors */
733 dhd_bus_query_dpc_sched_errors(dhd_pub_t
*dhdp
)
735 dhd_bus_t
*bus
= dhdp
->bus
;
738 if (bus
->dpc_entry_time
< bus
->isr_exit_time
) {
739 /* Kernel doesn't schedule the DPC after processing PCIe IRQ */
741 } else if (bus
->dpc_entry_time
< bus
->resched_dpc_time
) {
742 /* Kernel doesn't schedule the DPC after DHD tries to reschedule
743 * the DPC due to pending work items to be processed.
751 /* print out minimum timestamp info */
752 DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
753 " isr_exit_time="SEC_USEC_FMT
754 " last_non_ours_irq_time="SEC_USEC_FMT
755 " \ndpc_entry_time="SEC_USEC_FMT
756 " dpc_exit_time="SEC_USEC_FMT
757 " dpc_sched_time="SEC_USEC_FMT
758 " resched_dpc_time="SEC_USEC_FMT
"\n",
759 GET_SEC_USEC(bus
->isr_entry_time
),
760 GET_SEC_USEC(bus
->isr_exit_time
),
761 GET_SEC_USEC(bus
->last_non_ours_irq_time
),
762 GET_SEC_USEC(bus
->dpc_entry_time
),
763 GET_SEC_USEC(bus
->dpc_exit_time
),
764 GET_SEC_USEC(bus
->dpc_sched_time
),
765 GET_SEC_USEC(bus
->resched_dpc_time
)));
766 /* Added more log to debug un-scheduling from isr */
767 DHD_ERROR(("donglereset=%d, busstate=%d instatus=0x%x intr_enabled=%d \n",
768 dhdp
->dongle_reset
, dhdp
->busstate
, bus
->intstatus
, bus
->intr_enabled
));
770 dhd_pcie_dump_rc_conf_space_cap(dhdp
);
771 #ifdef EXTENDED_PCIE_DEBUG_DUMP
772 DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
773 dhdpcie_ep_access_cap(bus
, PCIE_EXTCAP_ID_ERR
,
774 PCIE_EXTCAP_AER_UCERR_OFFSET
, TRUE
, FALSE
, 0)));
775 DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
776 "hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1
,
777 dhd_pcie_config_read(bus
->osh
, PCI_TLP_HDR_LOG1
, sizeof(uint32
)),
779 dhd_pcie_config_read(bus
->osh
, PCI_TLP_HDR_LOG2
, sizeof(uint32
)),
781 dhd_pcie_config_read(bus
->osh
, PCI_TLP_HDR_LOG3
, sizeof(uint32
)),
783 dhd_pcie_config_read(bus
->osh
, PCI_TLP_HDR_LOG4
, sizeof(uint32
))));
784 if (bus
->sih
->buscorerev
>= 24) {
785 DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
786 "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL
,
787 dhd_pcie_config_read(bus
->osh
, PCIECFGREG_DEV_STATUS_CTRL
,
788 sizeof(uint32
)), PCIE_CFG_SUBSYSTEM_CONTROL
,
789 dhd_pcie_config_read(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
,
790 sizeof(uint32
)), PCIECFGREG_PML1_SUB_CTRL2
,
791 dhd_pcie_config_read(bus
->osh
, PCIECFGREG_PML1_SUB_CTRL2
,
794 DHD_ERROR(("\n ------- DUMPING PCIE DAR Registers ------- \r\n"));
795 DHD_ERROR(("clkctl(0x%x)=0x%x pwrctl(0x%x)=0x%x H2D_DB0(0x%x)=0x%x\n",
796 PCIDARClkCtl(bus
->sih
->buscorerev
),
797 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
798 PCIDARClkCtl(bus
->sih
->buscorerev
), 0, 0),
799 PCIDARPwrCtl(bus
->sih
->buscorerev
),
800 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
801 PCIDARPwrCtl(bus
->sih
->buscorerev
), 0, 0),
802 PCIDARH2D_DB0(bus
->sih
->buscorerev
),
803 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
804 PCIDARH2D_DB0(bus
->sih
->buscorerev
), 0, 0)));
806 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
812 /** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
814 dhdpcie_bus_intstatus(dhd_bus_t
*bus
)
816 uint32 intstatus
= 0;
819 if (bus
->bus_low_power_state
== DHD_BUS_D3_ACK_RECIEVED
) {
820 DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__
));
823 if ((bus
->sih
->buscorerev
== 6) || (bus
->sih
->buscorerev
== 4) ||
824 (bus
->sih
->buscorerev
== 2)) {
825 intstatus
= dhdpcie_bus_cfg_read_dword(bus
, PCIIntstatus
, 4);
826 dhdpcie_bus_cfg_write_dword(bus
, PCIIntstatus
, 4, intstatus
);
829 /* this is a PCIE core register..not a config register... */
830 intstatus
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_int
, 0, 0);
832 /* this is a PCIE core register..not a config register... */
833 intmask
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_mask
, 0, 0);
834 /* Is device removed. intstatus & intmask read 0xffffffff */
835 if (intstatus
== (uint32
)-1 || intmask
== (uint32
)-1) {
836 DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__
));
837 DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n",
838 __FUNCTION__
, intstatus
, intmask
));
839 bus
->is_linkdown
= TRUE
;
840 dhd_pcie_debug_info_dump(bus
->dhd
);
841 #ifdef CUSTOMER_HW4_DEBUG
842 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
843 #ifdef SUPPORT_LINKDOWN_RECOVERY
844 #ifdef CONFIG_ARCH_MSM
845 bus
->no_cfg_restore
= 1;
846 #endif /* CONFIG_ARCH_MSM */
847 #endif /* SUPPORT_LINKDOWN_RECOVERY */
848 bus
->dhd
->hang_reason
= HANG_REASON_PCIE_LINK_DOWN
;
849 dhd_os_send_hang_message(bus
->dhd
);
850 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
851 #endif /* CUSTOMER_HW4_DEBUG */
855 intstatus
&= intmask
;
858 * The fourth argument to si_corereg is the "mask" fields of the register to update
859 * and the fifth field is the "value" to update. Now if we are interested in only
860 * few fields of the "mask" bit map, we should not be writing back what we read
861 * By doing so, we might clear/ack interrupts that are not handled yet.
863 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_int
, bus
->def_intmask
,
866 intstatus
&= bus
->def_intmask
;
873 * Name: dhdpcie_bus_isr
875 * 1: IN int irq -- interrupt vector
876 * 2: IN void *arg -- handle to private data structure
878 * Status (TRUE or FALSE)
881 * Interrupt Service routine checks for the status register,
882 * disable interrupt and queue DPC if mail box interrupts are raised.
885 dhdpcie_bus_isr(dhd_bus_t
*bus
)
887 uint32 intstatus
= 0;
890 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
891 /* verify argument */
893 DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__
));
897 if (bus
->dhd
->dongle_reset
) {
898 DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__
));
902 if (bus
->dhd
->busstate
== DHD_BUS_DOWN
) {
903 DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__
));
907 /* avoid processing of interrupts until msgbuf prot is inited */
908 if (!bus
->intr_enabled
) {
909 DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__
));
912 #ifdef DHD_PCIE_RUNTIMEPM
914 #endif /* DHD_PCIE_RUNTIMEPM */
915 if (PCIECTO_ENAB(bus
)) {
916 /* read pci_intstatus */
917 intstatus
= dhdpcie_bus_cfg_read_dword(bus
, PCI_INT_STATUS
, 4);
919 if (intstatus
& PCI_CTO_INT_MASK
) {
920 /* reset backplane and cto,
921 * then access through pcie is recovered.
923 dhdpcie_cto_error_recovery(bus
);
928 if (bus
->d2h_intr_method
== PCIE_MSI
) {
929 /* For MSI, as intstatus is cleared by firmware, no need to read */
930 goto skip_intstatus_read
;
933 intstatus
= dhdpcie_bus_intstatus(bus
);
935 /* Check if the interrupt is ours or not */
936 if (intstatus
== 0) {
937 DHD_LOG_MEM(("%s : this interrupt is not ours\n", __FUNCTION__
));
938 bus
->non_ours_irq_count
++;
939 bus
->last_non_ours_irq_time
= OSL_LOCALTIME_NS();
943 /* save the intstatus */
944 /* read interrupt status register!! Status bits will be cleared in DPC !! */
945 bus
->intstatus
= intstatus
;
947 /* return error for 0xFFFFFFFF */
948 if (intstatus
== (uint32
)-1) {
949 DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n",
950 __FUNCTION__
, intstatus
));
951 dhdpcie_disable_irq_nosync(bus
);
956 /* Overall operation:
957 * - Mask further interrupts
958 * - Read/ack intstatus
959 * - Take action based on bits and state
960 * - Reenable interrupts (as per state)
963 /* Count the interrupt call */
968 bus
->isr_intr_disable_count
++;
970 /* For Linux, Macos etc (otherthan NDIS) instead of disabling
971 * dongle interrupt by clearing the IntMask, disable directly
972 * interrupt from the host side, so that host will not recieve
973 * any interrupts at all, even though dongle raises interrupts
975 dhdpcie_disable_irq_nosync(bus
); /* Disable interrupt!! */
979 #if defined(PCIE_ISR_THREAD)
981 DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__
));
982 DHD_OS_WAKE_LOCK(bus
->dhd
);
983 while (dhd_bus_dpc(bus
));
984 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
986 bus
->dpc_sched
= TRUE
;
987 dhd_sched_dpc(bus
->dhd
); /* queue DPC now!! */
988 #endif /* defined(SDIO_ISR_THREAD) */
990 DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__
));
995 DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__
));
1000 dhdpcie_set_pwr_state(dhd_bus_t
*bus
, uint state
)
1002 uint32 cur_state
= 0;
1004 osl_t
*osh
= bus
->osh
;
1006 pm_csr
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_PM_CSR
, sizeof(uint32
));
1007 cur_state
= pm_csr
& PCIECFGREG_PM_CSR_STATE_MASK
;
1009 if (cur_state
== state
) {
1010 DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__
, cur_state
));
1014 if (state
> PCIECFGREG_PM_CSR_STATE_D3_HOT
)
1017 /* Validate the state transition
1018 * if already in a lower power state, return error
1020 if (state
!= PCIECFGREG_PM_CSR_STATE_D0
&&
1021 cur_state
<= PCIECFGREG_PM_CSR_STATE_D3_COLD
&&
1022 cur_state
> state
) {
1023 DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__
));
1027 pm_csr
&= ~PCIECFGREG_PM_CSR_STATE_MASK
;
1030 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_PM_CSR
, sizeof(uint32
), pm_csr
);
1032 /* need to wait for the specified mandatory pcie power transition delay time */
1033 if (state
== PCIECFGREG_PM_CSR_STATE_D3_HOT
||
1034 cur_state
== PCIECFGREG_PM_CSR_STATE_D3_HOT
)
1035 OSL_DELAY(DHDPCIE_PM_D3_DELAY
);
1036 else if (state
== PCIECFGREG_PM_CSR_STATE_D2
||
1037 cur_state
== PCIECFGREG_PM_CSR_STATE_D2
)
1038 OSL_DELAY(DHDPCIE_PM_D2_DELAY
);
1040 /* read back the power state and verify */
1041 pm_csr
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_PM_CSR
, sizeof(uint32
));
1042 cur_state
= pm_csr
& PCIECFGREG_PM_CSR_STATE_MASK
;
1043 if (cur_state
!= state
) {
1044 DHD_ERROR(("%s: power transition failed ! Current state is %u \n",
1045 __FUNCTION__
, cur_state
));
1048 DHD_ERROR(("%s: power transition to %u success \n",
1049 __FUNCTION__
, cur_state
));
1056 dhdpcie_config_check(dhd_bus_t
*bus
)
1059 int ret
= BCME_ERROR
;
1061 for (i
= 0; i
< DHDPCIE_CONFIG_CHECK_RETRY_COUNT
; i
++) {
1062 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCI_CFG_VID
, sizeof(uint32
));
1063 if ((val
& 0xFFFF) == VENDOR_BROADCOM
) {
1067 OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS
* 1000);
1074 dhdpcie_config_restore(dhd_bus_t
*bus
, bool restore_pmcsr
)
1077 osl_t
*osh
= bus
->osh
;
1079 if (BCME_OK
!= dhdpcie_config_check(bus
)) {
1083 for (i
= PCI_CFG_REV
>> 2; i
< DHDPCIE_CONFIG_HDR_SIZE
; i
++) {
1084 OSL_PCI_WRITE_CONFIG(osh
, i
<< 2, sizeof(uint32
), bus
->saved_config
.header
[i
]);
1086 OSL_PCI_WRITE_CONFIG(osh
, PCI_CFG_CMD
, sizeof(uint32
), bus
->saved_config
.header
[1]);
1089 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_PM_CSR
,
1090 sizeof(uint32
), bus
->saved_config
.pmcsr
);
1092 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_MSI_CAP
, sizeof(uint32
), bus
->saved_config
.msi_cap
);
1093 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_MSI_ADDR_L
, sizeof(uint32
),
1094 bus
->saved_config
.msi_addr0
);
1095 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_MSI_ADDR_H
,
1096 sizeof(uint32
), bus
->saved_config
.msi_addr1
);
1097 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_MSI_DATA
,
1098 sizeof(uint32
), bus
->saved_config
.msi_data
);
1100 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_DEV_STATUS_CTRL
,
1101 sizeof(uint32
), bus
->saved_config
.exp_dev_ctrl_stat
);
1102 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGGEN_DEV_STATUS_CTRL2
,
1103 sizeof(uint32
), bus
->saved_config
.exp_dev_ctrl_stat2
);
1104 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_LINK_STATUS_CTRL
,
1105 sizeof(uint32
), bus
->saved_config
.exp_link_ctrl_stat
);
1106 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_LINK_STATUS_CTRL2
,
1107 sizeof(uint32
), bus
->saved_config
.exp_link_ctrl_stat2
);
1109 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_PML1_SUB_CTRL1
,
1110 sizeof(uint32
), bus
->saved_config
.l1pm0
);
1111 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_PML1_SUB_CTRL2
,
1112 sizeof(uint32
), bus
->saved_config
.l1pm1
);
1114 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCI_BAR0_WIN
, sizeof(uint32
),
1115 bus
->saved_config
.bar0_win
);
1116 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCI_BAR1_WIN
, sizeof(uint32
),
1117 bus
->saved_config
.bar1_win
);
1123 dhdpcie_config_save(dhd_bus_t
*bus
)
1126 osl_t
*osh
= bus
->osh
;
1128 if (BCME_OK
!= dhdpcie_config_check(bus
)) {
1132 for (i
= 0; i
< DHDPCIE_CONFIG_HDR_SIZE
; i
++) {
1133 bus
->saved_config
.header
[i
] = OSL_PCI_READ_CONFIG(osh
, i
<< 2, sizeof(uint32
));
1136 bus
->saved_config
.pmcsr
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_PM_CSR
, sizeof(uint32
));
1138 bus
->saved_config
.msi_cap
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_MSI_CAP
,
1140 bus
->saved_config
.msi_addr0
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_MSI_ADDR_L
,
1142 bus
->saved_config
.msi_addr1
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_MSI_ADDR_H
,
1144 bus
->saved_config
.msi_data
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_MSI_DATA
,
1147 bus
->saved_config
.exp_dev_ctrl_stat
= OSL_PCI_READ_CONFIG(osh
,
1148 PCIECFGREG_DEV_STATUS_CTRL
, sizeof(uint32
));
1149 bus
->saved_config
.exp_dev_ctrl_stat2
= OSL_PCI_READ_CONFIG(osh
,
1150 PCIECFGGEN_DEV_STATUS_CTRL2
, sizeof(uint32
));
1151 bus
->saved_config
.exp_link_ctrl_stat
= OSL_PCI_READ_CONFIG(osh
,
1152 PCIECFGREG_LINK_STATUS_CTRL
, sizeof(uint32
));
1153 bus
->saved_config
.exp_link_ctrl_stat2
= OSL_PCI_READ_CONFIG(osh
,
1154 PCIECFGREG_LINK_STATUS_CTRL2
, sizeof(uint32
));
1156 bus
->saved_config
.l1pm0
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_PML1_SUB_CTRL1
,
1158 bus
->saved_config
.l1pm1
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_PML1_SUB_CTRL2
,
1161 bus
->saved_config
.bar0_win
= OSL_PCI_READ_CONFIG(osh
, PCI_BAR0_WIN
,
1163 bus
->saved_config
.bar1_win
= OSL_PCI_READ_CONFIG(osh
, PCI_BAR1_WIN
,
1169 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1170 dhd_pub_t
*link_recovery
= NULL
;
1171 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1174 dhdpcie_bus_intr_init(dhd_bus_t
*bus
)
1176 uint buscorerev
= bus
->sih
->buscorerev
;
1177 bus
->pcie_mailbox_int
= PCIMailBoxInt(buscorerev
);
1178 bus
->pcie_mailbox_mask
= PCIMailBoxMask(buscorerev
);
1179 bus
->d2h_mb_mask
= PCIE_MB_D2H_MB_MASK(buscorerev
);
1180 bus
->def_intmask
= PCIE_MB_D2H_MB_MASK(buscorerev
);
1181 if (buscorerev
< 64) {
1182 bus
->def_intmask
|= PCIE_MB_TOPCIE_FN0_0
| PCIE_MB_TOPCIE_FN0_1
;
1187 dhd_bus_aspm_enable_rc_ep(dhd_bus_t
*bus
, bool enable
)
1189 uint32 linkctrl_rc
, linkctrl_ep
;
1190 linkctrl_rc
= dhdpcie_rc_access_cap(bus
, PCIE_CAP_ID_EXP
, PCIE_CAP_LINKCTRL_OFFSET
, FALSE
,
1192 linkctrl_ep
= dhdpcie_ep_access_cap(bus
, PCIE_CAP_ID_EXP
, PCIE_CAP_LINKCTRL_OFFSET
, FALSE
,
1194 DHD_ERROR(("%s: %s val before rc-0x%x:ep-0x%x\n", __FUNCTION__
,
1195 (enable
? "ENABLE" : "DISABLE"), linkctrl_rc
, linkctrl_ep
));
1197 /* Enable only L1 ASPM (bit 1) first RC then EP */
1198 dhdpcie_rc_access_cap(bus
, PCIE_CAP_ID_EXP
, PCIE_CAP_LINKCTRL_OFFSET
, FALSE
,
1199 TRUE
, (linkctrl_rc
| PCIE_ASPM_L1_ENAB
));
1200 dhdpcie_ep_access_cap(bus
, PCIE_CAP_ID_EXP
, PCIE_CAP_LINKCTRL_OFFSET
, FALSE
,
1201 TRUE
, (linkctrl_ep
| PCIE_ASPM_L1_ENAB
));
1203 /* Disable complete ASPM (bit 1 and bit 0) first EP then RC */
1204 dhdpcie_ep_access_cap(bus
, PCIE_CAP_ID_EXP
, PCIE_CAP_LINKCTRL_OFFSET
, FALSE
,
1205 TRUE
, (linkctrl_ep
& (~PCIE_ASPM_ENAB
)));
1206 dhdpcie_rc_access_cap(bus
, PCIE_CAP_ID_EXP
, PCIE_CAP_LINKCTRL_OFFSET
, FALSE
,
1207 TRUE
, (linkctrl_rc
& (~PCIE_ASPM_ENAB
)));
1209 linkctrl_rc
= dhdpcie_rc_access_cap(bus
, PCIE_CAP_ID_EXP
, PCIE_CAP_LINKCTRL_OFFSET
, FALSE
,
1211 linkctrl_ep
= dhdpcie_ep_access_cap(bus
, PCIE_CAP_ID_EXP
, PCIE_CAP_LINKCTRL_OFFSET
, FALSE
,
1213 DHD_ERROR(("%s: %s val after rc-0x%x:ep-0x%x\n", __FUNCTION__
,
1214 (enable
? "ENABLE" : "DISABLE"), linkctrl_rc
, linkctrl_ep
));
1218 dhd_bus_l1ss_enable_rc_ep(dhd_bus_t
*bus
, bool enable
)
1220 uint32 l1ssctrl_rc
, l1ssctrl_ep
;
1222 /* Disable ASPM of RC and EP */
1223 dhd_bus_aspm_enable_rc_ep(bus
, FALSE
);
1225 /* Extendend Capacility Reg */
1226 l1ssctrl_rc
= dhdpcie_rc_access_cap(bus
, PCIE_EXTCAP_ID_L1SS
,
1227 PCIE_EXTCAP_L1SS_CONTROL_OFFSET
, TRUE
, FALSE
, 0);
1228 l1ssctrl_ep
= dhdpcie_ep_access_cap(bus
, PCIE_EXTCAP_ID_L1SS
,
1229 PCIE_EXTCAP_L1SS_CONTROL_OFFSET
, TRUE
, FALSE
, 0);
1230 DHD_ERROR(("%s: %s val before rc-0x%x:ep-0x%x\n", __FUNCTION__
,
1231 (enable
? "ENABLE" : "DISABLE"), l1ssctrl_rc
, l1ssctrl_ep
));
1233 /* Enable RC then EP */
1234 dhdpcie_rc_access_cap(bus
, PCIE_EXTCAP_ID_L1SS
, PCIE_EXTCAP_L1SS_CONTROL_OFFSET
,
1235 TRUE
, TRUE
, (l1ssctrl_rc
| PCIE_EXT_L1SS_ENAB
));
1236 dhdpcie_ep_access_cap(bus
, PCIE_EXTCAP_ID_L1SS
, PCIE_EXTCAP_L1SS_CONTROL_OFFSET
,
1237 TRUE
, TRUE
, (l1ssctrl_ep
| PCIE_EXT_L1SS_ENAB
));
1239 /* Disable EP then RC */
1240 dhdpcie_ep_access_cap(bus
, PCIE_EXTCAP_ID_L1SS
, PCIE_EXTCAP_L1SS_CONTROL_OFFSET
,
1241 TRUE
, TRUE
, (l1ssctrl_ep
& (~PCIE_EXT_L1SS_ENAB
)));
1242 dhdpcie_rc_access_cap(bus
, PCIE_EXTCAP_ID_L1SS
, PCIE_EXTCAP_L1SS_CONTROL_OFFSET
,
1243 TRUE
, TRUE
, (l1ssctrl_rc
& (~PCIE_EXT_L1SS_ENAB
)));
1245 l1ssctrl_rc
= dhdpcie_rc_access_cap(bus
, PCIE_EXTCAP_ID_L1SS
,
1246 PCIE_EXTCAP_L1SS_CONTROL_OFFSET
, TRUE
, FALSE
, 0);
1247 l1ssctrl_ep
= dhdpcie_ep_access_cap(bus
, PCIE_EXTCAP_ID_L1SS
,
1248 PCIE_EXTCAP_L1SS_CONTROL_OFFSET
, TRUE
, FALSE
, 0);
1249 DHD_ERROR(("%s: %s val after rc-0x%x:ep-0x%x\n", __FUNCTION__
,
1250 (enable
? "ENABLE" : "DISABLE"), l1ssctrl_rc
, l1ssctrl_ep
));
1252 /* Enable ASPM of RC and EP */
1253 dhd_bus_aspm_enable_rc_ep(bus
, TRUE
);
1257 dhdpcie_dongle_reset(dhd_bus_t
*bus
)
1259 #ifndef DHD_USE_BP_RESET
1261 #endif /* !DHD_USE_BP_RESET */
1263 /* if the pcie link is down, watchdog reset
1264 * should not be done, as it may hang
1266 if (bus
->is_linkdown
) {
1270 #ifdef DHD_USE_BP_RESET
1271 dhd_bus_perform_bp_reset(bus
);
1273 wd_en
= (bus
->sih
->buscorerev
== 66) ? WD_SSRESET_PCIE_F0_EN
:
1274 (WD_SSRESET_PCIE_F0_EN
| WD_SSRESET_PCIE_ALL_FN_EN
);
1275 pcie_watchdog_reset(bus
->osh
, bus
->sih
, WD_ENABLE_MASK
, wd_en
);
1276 #endif /* DHD_USE_BP_RESET */
1280 dhdpcie_dongle_attach(dhd_bus_t
*bus
)
1282 osl_t
*osh
= bus
->osh
;
1283 volatile void *regsva
= (volatile void*)bus
->regs
;
1286 sbpcieregs_t
*sbpcieregs
;
1287 bool dongle_isolation
;
1289 DHD_TRACE(("%s: ENTER\n", __FUNCTION__
));
1291 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1292 link_recovery
= bus
->dhd
;
1293 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1295 bus
->alp_only
= TRUE
;
1298 /* Checking PCIe bus status with reading configuration space */
1299 val
= OSL_PCI_READ_CONFIG(osh
, PCI_CFG_VID
, sizeof(uint32
));
1300 if ((val
& 0xFFFF) != VENDOR_BROADCOM
) {
1301 DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__
));
1304 devid
= (val
>> 16) & 0xFFFF;
1305 bus
->cl_devid
= devid
;
1307 /* Set bar0 window to si_enum_base */
1308 dhdpcie_bus_cfg_set_bar0_win(bus
, si_enum_base(devid
));
1311 * Checking PCI_SPROM_CONTROL register for preventing invalid address access
1312 * due to switch address space from PCI_BUS to SI_BUS.
1314 val
= OSL_PCI_READ_CONFIG(osh
, PCI_SPROM_CONTROL
, sizeof(uint32
));
1315 if (val
== 0xffffffff) {
1316 DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__
));
1320 /* si_attach() will provide an SI handle and scan the backplane */
1321 if (!(bus
->sih
= si_attach((uint
)devid
, osh
, regsva
, PCI_BUS
, bus
,
1322 &bus
->vars
, &bus
->varsz
))) {
1323 DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__
));
1327 if (MULTIBP_ENAB(bus
->sih
) && (bus
->sih
->buscorerev
>= 66)) {
1328 DHD_ERROR(("Enable CTO\n"));
1329 bus
->cto_enable
= TRUE
;
1330 dhdpcie_cto_init(bus
, bus
->cto_enable
);
1332 * HW JIRA - CRWLPCIEGEN2-672
1333 * Producer Index Feature which is used by F1 gets reset on F0 FLR
1336 if (bus
->sih
->buscorerev
== 66) {
1337 dhdpcie_ssreset_dis_enum_rst(bus
);
1340 /* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset
1341 * dhdpcie_bus_release_dongle() --> si_detach()
1342 * dhdpcie_dongle_attach() --> si_attach()
1344 bus
->pwr_req_ref
= 0;
1347 if (MULTIBP_ENAB(bus
->sih
)) {
1348 dhd_bus_pcie_pwr_req_nolock(bus
);
1351 /* Olympic EFI requirement - stop driver load if FW is already running
1352 * need to do this here before pcie_watchdog_reset, because
1353 * pcie_watchdog_reset will put the ARM back into halt state
1355 if (!dhdpcie_is_arm_halted(bus
)) {
1356 DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n",
1361 BCM_REFERENCE(dongle_isolation
);
1363 /* Dongle reset during power on can be invoked in case of module type driver */
1364 if (dhd_download_fw_on_driverload
) {
1365 /* Enable CLKREQ# */
1366 dhdpcie_clkreq(bus
->osh
, 1, 1);
1369 * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset
1370 * without checking dongle_isolation flag, but if it is called via some other path
1371 * like quiesce FLR, then based on dongle_isolation flag, watchdog_reset should
1374 if (bus
->dhd
== NULL
) {
1375 /* dhd_attach not yet happened, do watchdog reset */
1376 dongle_isolation
= FALSE
;
1378 dongle_isolation
= bus
->dhd
->dongle_isolation
;
1381 * Issue CC watchdog to reset all the cores on the chip - similar to rmmod dhd
1382 * This is required to avoid spurious interrupts to the Host and bring back
1383 * dongle to a sane state (on host soft-reboot / watchdog-reboot).
1385 if (dongle_isolation
== FALSE
) {
1386 dhdpcie_dongle_reset(bus
);
1391 si_setcore(bus
->sih
, PCIE2_CORE_ID
, 0);
1392 sbpcieregs
= (sbpcieregs_t
*)(bus
->regs
);
1394 /* WAR where the BAR1 window may not be sized properly */
1395 W_REG(osh
, &sbpcieregs
->configaddr
, 0x4e0);
1396 val
= R_REG(osh
, &sbpcieregs
->configdata
);
1397 W_REG(osh
, &sbpcieregs
->configdata
, val
);
1399 /* Get info on the ARM and SOCRAM cores... */
1400 /* Should really be qualified by device id */
1401 if ((si_setcore(bus
->sih
, ARM7S_CORE_ID
, 0)) ||
1402 (si_setcore(bus
->sih
, ARMCM3_CORE_ID
, 0)) ||
1403 (si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0)) ||
1404 (si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0))) {
1405 bus
->armrev
= si_corerev(bus
->sih
);
1407 DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__
));
1411 if (si_setcore(bus
->sih
, SYSMEM_CORE_ID
, 0)) {
1412 /* Only set dongle RAMSIZE to default value when BMC vs ARM usage of SYSMEM is not
1415 if (!bus
->ramsize_adjusted
) {
1416 if (!(bus
->orig_ramsize
= si_sysmem_size(bus
->sih
))) {
1417 DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__
));
1420 switch ((uint16
)bus
->sih
->chip
) {
1422 /* also populate base address */
1423 bus
->dongle_ram_base
= CA7_4365_RAM_BASE
;
1424 bus
->orig_ramsize
= 0x1c0000; /* Reserve 1.75MB for CA7 */
1428 } else if (!si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0)) {
1429 if (!(bus
->orig_ramsize
= si_socram_size(bus
->sih
))) {
1430 DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__
));
1434 /* cr4 has a different way to find the RAM size from TCM's */
1435 if (!(bus
->orig_ramsize
= si_tcm_size(bus
->sih
))) {
1436 DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__
));
1439 /* also populate base address */
1440 switch ((uint16
)bus
->sih
->chip
) {
1441 case BCM4339_CHIP_ID
:
1442 case BCM4335_CHIP_ID
:
1443 bus
->dongle_ram_base
= CR4_4335_RAM_BASE
;
1445 case BCM4358_CHIP_ID
:
1446 case BCM4354_CHIP_ID
:
1447 case BCM43567_CHIP_ID
:
1448 case BCM43569_CHIP_ID
:
1449 case BCM4350_CHIP_ID
:
1450 case BCM43570_CHIP_ID
:
1451 bus
->dongle_ram_base
= CR4_4350_RAM_BASE
;
1453 case BCM4360_CHIP_ID
:
1454 bus
->dongle_ram_base
= CR4_4360_RAM_BASE
;
1457 case BCM4364_CHIP_ID
:
1458 bus
->dongle_ram_base
= CR4_4364_RAM_BASE
;
1462 bus
->dongle_ram_base
= (bus
->sih
->chiprev
< 6) /* changed at 4345C0 */
1463 ? CR4_4345_LT_C0_RAM_BASE
: CR4_4345_GE_C0_RAM_BASE
;
1466 bus
->dongle_ram_base
= CR4_43602_RAM_BASE
;
1468 case BCM4349_CHIP_GRPID
:
1469 /* RAM based changed from 4349c0(revid=9) onwards */
1470 bus
->dongle_ram_base
= ((bus
->sih
->chiprev
< 9) ?
1471 CR4_4349_RAM_BASE
: CR4_4349_RAM_BASE_FROM_REV_9
);
1473 case BCM4347_CHIP_ID
:
1474 case BCM4357_CHIP_ID
:
1475 case BCM4361_CHIP_ID
:
1476 bus
->dongle_ram_base
= CR4_4347_RAM_BASE
;
1478 case BCM4375_CHIP_ID
:
1479 case BCM4369_CHIP_ID
:
1480 bus
->dongle_ram_base
= CR4_4369_RAM_BASE
;
1483 bus
->dongle_ram_base
= 0;
1484 DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
1485 __FUNCTION__
, bus
->dongle_ram_base
));
1488 bus
->ramsize
= bus
->orig_ramsize
;
1489 if (dhd_dongle_memsize
)
1490 dhdpcie_bus_dongle_setmemsize(bus
, dhd_dongle_memsize
);
1492 if (bus
->ramsize
> DONGLE_TCM_MAP_SIZE
) {
1493 DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n",
1494 __FUNCTION__
, bus
->ramsize
, bus
->ramsize
));
1498 DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
1499 bus
->ramsize
, bus
->orig_ramsize
, bus
->dongle_ram_base
));
1501 bus
->srmemsize
= si_socram_srmem_size(bus
->sih
);
1503 dhdpcie_bus_intr_init(bus
);
1505 /* Set the poll and/or interrupt flags */
1506 bus
->intr
= (bool)dhd_intr
;
1507 #ifdef DHD_DISABLE_ASPM
1508 dhd_bus_aspm_enable_rc_ep(bus
, FALSE
);
1509 #endif /* DHD_DISABLE_ASPM */
1511 bus
->idma_enabled
= TRUE
;
1512 bus
->ifrm_enabled
= TRUE
;
1513 DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__
));
1515 if (MULTIBP_ENAB(bus
->sih
)) {
1516 dhd_bus_pcie_pwr_req_clear_nolock(bus
);
1519 bus
->force_bt_quiesce
= TRUE
;
1524 if (bus
->sih
!= NULL
) {
1525 if (MULTIBP_ENAB(bus
->sih
)) {
1526 dhd_bus_pcie_pwr_req_clear_nolock(bus
);
1528 /* for EFI even if there is an error, load still succeeds
1529 * so si_detach should not be called here, it is called during unload
1531 si_detach(bus
->sih
);
1534 DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__
));
1539 dhpcie_bus_unmask_interrupt(dhd_bus_t
*bus
)
1541 dhdpcie_bus_cfg_write_dword(bus
, PCIIntmask
, 4, I_MB
);
1545 dhpcie_bus_mask_interrupt(dhd_bus_t
*bus
)
1547 dhdpcie_bus_cfg_write_dword(bus
, PCIIntmask
, 4, 0x0);
1551 /* Non atomic function, caller should hold appropriate lock */
1553 dhdpcie_bus_intr_enable(dhd_bus_t
*bus
)
1555 DHD_TRACE(("%s Enter\n", __FUNCTION__
));
1556 if (bus
&& bus
->sih
&& !bus
->is_linkdown
) {
1557 /* Skip after recieving D3 ACK */
1558 if (bus
->bus_low_power_state
== DHD_BUS_D3_ACK_RECIEVED
) {
1561 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
1562 (bus
->sih
->buscorerev
== 4)) {
1563 dhpcie_bus_unmask_interrupt(bus
);
1565 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_mask
,
1566 bus
->def_intmask
, bus
->def_intmask
);
1569 DHD_TRACE(("%s Exit\n", __FUNCTION__
));
1572 /* Non atomic function, caller should hold appropriate lock */
1574 dhdpcie_bus_intr_disable(dhd_bus_t
*bus
)
1576 DHD_TRACE(("%s Enter\n", __FUNCTION__
));
1577 if (bus
&& bus
->sih
&& !bus
->is_linkdown
) {
1578 /* Skip after recieving D3 ACK */
1579 if (bus
->bus_low_power_state
== DHD_BUS_D3_ACK_RECIEVED
) {
1582 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
1583 (bus
->sih
->buscorerev
== 4)) {
1584 dhpcie_bus_mask_interrupt(bus
);
1586 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_mask
,
1587 bus
->def_intmask
, 0);
1590 DHD_TRACE(("%s Exit\n", __FUNCTION__
));
1594 * dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
1595 * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
1596 * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
1597 * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
1598 * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
1601 dhdpcie_advertise_bus_cleanup(dhd_pub_t
*dhdp
)
1603 unsigned long flags
;
1606 #ifdef DHD_PCIE_RUNTIMEPM
1607 dhdpcie_runtime_bus_wake(dhdp
, TRUE
, dhdpcie_advertise_bus_cleanup
);
1608 #endif /* DHD_PCIE_RUNTIMEPM */
1610 dhdp
->dhd_watchdog_ms_backup
= dhd_watchdog_ms
;
1611 if (dhdp
->dhd_watchdog_ms_backup
) {
1612 DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n",
1614 dhd_os_wd_timer(dhdp
, 0);
1616 if (dhdp
->busstate
!= DHD_BUS_DOWN
) {
1617 DHD_GENERAL_LOCK(dhdp
, flags
);
1618 dhdp
->busstate
= DHD_BUS_DOWN_IN_PROGRESS
;
1619 DHD_GENERAL_UNLOCK(dhdp
, flags
);
1622 timeleft
= dhd_os_busbusy_wait_negation(dhdp
, &dhdp
->dhd_bus_busy_state
);
1623 if ((timeleft
== 0) || (timeleft
== 1)) {
1624 DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
1625 __FUNCTION__
, dhdp
->dhd_bus_busy_state
));
1633 dhdpcie_bus_remove_prep(dhd_bus_t
*bus
)
1635 unsigned long flags
;
1636 DHD_TRACE(("%s Enter\n", __FUNCTION__
));
1638 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
1639 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
1640 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
1642 dhd_os_sdlock(bus
->dhd
);
1644 if (bus
->sih
&& !bus
->dhd
->dongle_isolation
) {
1645 if (bus
->sih
->buscorerev
== 66) {
1646 dhd_bus_pcie_pwr_req_reload_war(bus
);
1649 /* Has insmod fails after rmmod issue in Brix Android */
1651 /* if the pcie link is down, watchdog reset
1652 * should not be done, as it may hang
1655 if (!bus
->is_linkdown
) {
1656 dhdpcie_dongle_reset(bus
);
1659 bus
->dhd
->is_pcie_watchdog_reset
= TRUE
;
1662 dhd_os_sdunlock(bus
->dhd
);
1664 DHD_TRACE(("%s Exit\n", __FUNCTION__
));
1668 dhd_init_bus_lock(dhd_bus_t
*bus
)
1670 if (!bus
->bus_lock
) {
1671 bus
->bus_lock
= dhd_os_spin_lock_init(bus
->dhd
->osh
);
1676 dhd_deinit_bus_lock(dhd_bus_t
*bus
)
1678 if (bus
->bus_lock
) {
1679 dhd_os_spin_lock_deinit(bus
->dhd
->osh
, bus
->bus_lock
);
1680 bus
->bus_lock
= NULL
;
1684 /** Detach and free everything */
1686 dhdpcie_bus_release(dhd_bus_t
*bus
)
1688 bool dongle_isolation
= FALSE
;
1690 unsigned long flags_bus
;
1692 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
1700 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
1702 #endif /* DEBUGGER || DHD_DSCOPE */
1703 dhdpcie_advertise_bus_cleanup(bus
->dhd
);
1704 dongle_isolation
= bus
->dhd
->dongle_isolation
;
1705 bus
->dhd
->is_pcie_watchdog_reset
= FALSE
;
1706 dhdpcie_bus_remove_prep(bus
);
1709 DHD_BUS_LOCK(bus
->bus_lock
, flags_bus
);
1710 dhdpcie_bus_intr_disable(bus
);
1711 DHD_BUS_UNLOCK(bus
->bus_lock
, flags_bus
);
1712 dhdpcie_free_irq(bus
);
1714 dhd_deinit_bus_lock(bus
);
1716 * dhdpcie_bus_release_dongle free bus->sih handle, which is needed to
1717 * access Dongle registers.
1718 * dhd_detach will communicate with dongle to delete flowring ..etc.
1719 * So dhdpcie_bus_release_dongle should be called only after the dhd_detach.
1721 dhd_detach(bus
->dhd
);
1722 dhdpcie_bus_release_dongle(bus
, osh
, dongle_isolation
, TRUE
);
1726 /* unmap the regs and tcm here!! */
1728 dhdpcie_bus_reg_unmap(osh
, bus
->regs
, DONGLE_REG_MAP_SIZE
);
1732 dhdpcie_bus_reg_unmap(osh
, bus
->tcm
, DONGLE_TCM_MAP_SIZE
);
1736 dhdpcie_bus_release_malloc(bus
, osh
);
1737 /* Detach pcie shared structure */
1739 MFREE(osh
, bus
->pcie_sh
, sizeof(pciedev_shared_t
));
1740 bus
->pcie_sh
= NULL
;
1743 if (bus
->console
.buf
!= NULL
) {
1744 MFREE(osh
, bus
->console
.buf
, bus
->console
.bufsize
);
1747 /* Finally free bus info */
1748 MFREE(osh
, bus
, sizeof(dhd_bus_t
));
1753 DHD_TRACE(("%s: Exit\n", __FUNCTION__
));
1754 } /* dhdpcie_bus_release */
1757 dhdpcie_bus_release_dongle(dhd_bus_t
*bus
, osl_t
*osh
, bool dongle_isolation
, bool reset_flag
)
1759 DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__
,
1760 bus
->dhd
, bus
->dhd
->dongle_reset
));
1762 if ((bus
->dhd
&& bus
->dhd
->dongle_reset
) && reset_flag
) {
1763 DHD_TRACE(("%s Exit\n", __FUNCTION__
));
1767 if (bus
->is_linkdown
) {
1768 DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__
));
1774 if (!dongle_isolation
&&
1775 (bus
->dhd
&& !bus
->dhd
->is_pcie_watchdog_reset
)) {
1776 dhdpcie_dongle_reset(bus
);
1779 if (bus
->ltrsleep_on_unload
) {
1780 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
1781 OFFSETOF(sbpcieregs_t
, u
.pcie2
.ltr_state
), ~0, 0);
1784 if (bus
->sih
->buscorerev
== 13)
1785 pcie_serdes_iddqdisable(bus
->osh
, bus
->sih
,
1786 (sbpcieregs_t
*) bus
->regs
);
1788 if (dhd_download_fw_on_driverload
) {
1789 /* Disable CLKREQ# */
1790 dhdpcie_clkreq(bus
->osh
, 1, 0);
1793 if (bus
->sih
!= NULL
) {
1794 si_detach(bus
->sih
);
1797 if (bus
->vars
&& bus
->varsz
)
1798 MFREE(osh
, bus
->vars
, bus
->varsz
);
1802 DHD_TRACE(("%s Exit\n", __FUNCTION__
));
1806 dhdpcie_bus_cfg_read_dword(dhd_bus_t
*bus
, uint32 addr
, uint32 size
)
1808 uint32 data
= OSL_PCI_READ_CONFIG(bus
->osh
, addr
, size
);
1812 /** 32 bit config write */
1814 dhdpcie_bus_cfg_write_dword(dhd_bus_t
*bus
, uint32 addr
, uint32 size
, uint32 data
)
1816 OSL_PCI_WRITE_CONFIG(bus
->osh
, addr
, size
, data
);
1820 dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t
*bus
, uint32 data
)
1822 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCI_BAR0_WIN
, 4, data
);
1826 dhdpcie_bus_dongle_setmemsize(struct dhd_bus
*bus
, int mem_size
)
1828 int32 min_size
= DONGLE_MIN_MEMSIZE
;
1829 /* Restrict the memsize to user specified limit */
1830 DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
1831 dhd_dongle_memsize
, min_size
));
1832 if ((dhd_dongle_memsize
> min_size
) &&
1833 (dhd_dongle_memsize
< (int32
)bus
->orig_ramsize
))
1834 bus
->ramsize
= dhd_dongle_memsize
;
1838 dhdpcie_bus_release_malloc(dhd_bus_t
*bus
, osl_t
*osh
)
1840 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
1842 if (bus
->dhd
&& bus
->dhd
->dongle_reset
)
1845 if (bus
->vars
&& bus
->varsz
) {
1846 MFREE(osh
, bus
->vars
, bus
->varsz
);
1850 DHD_TRACE(("%s: Exit\n", __FUNCTION__
));
1855 /** Stop bus module: clear pending frames, disable data flow */
1856 void dhd_bus_stop(struct dhd_bus
*bus
, bool enforce_mutex
)
1858 unsigned long flags
, flags_bus
;
1860 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
1865 if (bus
->dhd
->busstate
== DHD_BUS_DOWN
) {
1866 DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__
));
1870 DHD_DISABLE_RUNTIME_PM(bus
->dhd
);
1872 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
1873 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
1874 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
1876 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1877 atomic_set(&bus
->dhd
->block_bus
, TRUE
);
1878 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1880 DHD_BUS_LOCK(bus
->bus_lock
, flags_bus
);
1881 dhdpcie_bus_intr_disable(bus
);
1882 DHD_BUS_UNLOCK(bus
->bus_lock
, flags_bus
);
1884 if (!bus
->is_linkdown
) {
1886 status
= dhdpcie_bus_cfg_read_dword(bus
, PCIIntstatus
, 4);
1887 dhdpcie_bus_cfg_write_dword(bus
, PCIIntstatus
, 4, status
);
1890 if (!dhd_download_fw_on_driverload
) {
1891 dhd_dpc_kill(bus
->dhd
);
1894 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1895 pm_runtime_disable(dhd_bus_to_dev(bus
));
1896 pm_runtime_set_suspended(dhd_bus_to_dev(bus
));
1897 pm_runtime_enable(dhd_bus_to_dev(bus
));
1898 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1900 /* Clear rx control and wake any waiters */
1901 dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT
);
1902 dhd_wakeup_ioctl_event(bus
->dhd
, IOCTL_RETURN_ON_BUS_STOP
);
1909 * Watchdog timer function.
1910 * @param dhd Represents a specific hardware (dongle) instance that this DHD manages
1912 bool dhd_bus_watchdog(dhd_pub_t
*dhd
)
1914 unsigned long flags
;
1915 dhd_bus_t
*bus
= dhd
->bus
;
1917 DHD_GENERAL_LOCK(dhd
, flags
);
1918 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd
) ||
1919 DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd
)) {
1920 DHD_GENERAL_UNLOCK(dhd
, flags
);
1923 DHD_BUS_BUSY_SET_IN_WD(dhd
);
1924 DHD_GENERAL_UNLOCK(dhd
, flags
);
1926 #ifdef DHD_PCIE_RUNTIMEPM
1927 dhdpcie_runtime_bus_wake(dhd
, TRUE
, __builtin_return_address(0));
1928 #endif /* DHD_PCIE_RUNTIMEPM */
1930 /* Poll for console output periodically */
1931 if (dhd
->busstate
== DHD_BUS_DATA
&&
1932 dhd
->dhd_console_ms
!= 0 &&
1933 bus
->bus_low_power_state
== DHD_BUS_NO_LOW_POWER_STATE
) {
1934 bus
->console
.count
+= dhd_watchdog_ms
;
1935 if (bus
->console
.count
>= dhd
->dhd_console_ms
) {
1936 bus
->console
.count
-= dhd
->dhd_console_ms
;
1938 if (MULTIBP_ENAB(bus
->sih
)) {
1939 dhd_bus_pcie_pwr_req(bus
);
1942 /* Make sure backplane clock is on */
1943 if (dhdpcie_bus_readconsole(bus
) < 0) {
1944 dhd
->dhd_console_ms
= 0; /* On error, stop trying */
1947 if (MULTIBP_ENAB(bus
->sih
)) {
1948 dhd_bus_pcie_pwr_req_clear(bus
);
1953 DHD_GENERAL_LOCK(dhd
, flags
);
1954 DHD_BUS_BUSY_CLEAR_IN_WD(dhd
);
1955 dhd_os_busbusy_wake(dhd
);
1956 DHD_GENERAL_UNLOCK(dhd
, flags
);
1959 } /* dhd_bus_watchdog */
1961 #if defined(SUPPORT_MULTIPLE_REVISION)
1962 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
1963 defined(SUPPORT_BCM4359_MIXED_MODULES)
1964 #define VENDOR_MURATA "murata"
1965 #define VENDOR_WISOL "wisol"
1966 #define VNAME_DELIM "_"
1967 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
1969 #if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
1971 #define MAX_EXTENSION 20
1972 #define MODULE_BCM4361_INDEX 3
1973 #define CHIP_REV_A0 1
1974 #define CHIP_REV_A1 2
1975 #define CHIP_REV_B0 3
1976 #define CHIP_REV_B1 4
1977 #define CHIP_REV_B2 5
1978 #define CHIP_REV_C0 6
1979 #define BOARD_TYPE_EPA 0x080f
1980 #define BOARD_TYPE_IPA 0x0827
1981 #define BOARD_TYPE_IPA_OLD 0x081a
1982 #define DEFAULT_CIDINFO_FOR_EPA "r00a_e000_a0_ePA"
1983 #define DEFAULT_CIDINFO_FOR_IPA "r00a_e000_a0_iPA"
1984 #define DEFAULT_CIDINFO_FOR_A1 "r01a_e30a_a1"
1985 #define DEFAULT_CIDINFO_FOR_B0 "r01i_e32_b0"
1986 #define MAX_VID_LEN 8
1987 #define CIS_TUPLE_HDR_LEN 2
1988 #define CIS_TUPLE_START_ADDRESS 0x18011110
1989 #define CIS_TUPLE_END_ADDRESS 0x18011167
1990 #define CIS_TUPLE_MAX_COUNT (uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\
1991 + 1) / sizeof(uint32))
1992 #define CIS_TUPLE_TAG_START 0x80
1993 #define CIS_TUPLE_TAG_VENDOR 0x81
1994 #define CIS_TUPLE_TAG_BOARDTYPE 0x1b
1995 #define CIS_TUPLE_TAG_LENGTH 1
1996 #define NVRAM_FEM_MURATA "_murata"
1997 #define CID_FEM_MURATA "_mur_"
1999 typedef struct cis_tuple_format
{
2001 uint8 len
; /* total length of tag and data */
2004 } cis_tuple_format_t
;
2007 char cid_ext
[MAX_EXTENSION
];
2008 char nvram_ext
[MAX_EXTENSION
];
2009 char fw_ext
[MAX_EXTENSION
];
2012 naming_info_t bcm4361_naming_table
[] = {
2013 { {""}, {""}, {""} },
2014 { {"r00a_e000_a0_ePA"}, {"_a0_ePA"}, {"_a0_ePA"} },
2015 { {"r00a_e000_a0_iPA"}, {"_a0"}, {"_a1"} },
2016 { {"r01a_e30a_a1"}, {"_r01a_a1"}, {"_a1"} },
2017 { {"r02a_e30a_a1"}, {"_r02a_a1"}, {"_a1"} },
2018 { {"r02c_e30a_a1"}, {"_r02c_a1"}, {"_a1"} },
2019 { {"r01d_e31_b0"}, {"_r01d_b0"}, {"_b0"} },
2020 { {"r01f_e31_b0"}, {"_r01f_b0"}, {"_b0"} },
2021 { {"r02g_e31_b0"}, {"_r02g_b0"}, {"_b0"} },
2022 { {"r01h_e32_b0"}, {"_r01h_b0"}, {"_b0"} },
2023 { {"r01i_e32_b0"}, {"_r01i_b0"}, {"_b0"} },
2024 { {"r02j_e32_b0"}, {"_r02j_b0"}, {"_b0"} },
2025 { {"r012_1kl_a1"}, {"_r012_a1"}, {"_a1"} },
2026 { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
2027 { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
2028 { {"r014_1kl_b0"}, {"_r014_b0"}, {"_b0"} },
2029 { {"r015_1kl_b0"}, {"_r015_b0"}, {"_b0"} },
2030 { {"r020_1kl_b0"}, {"_r020_b0"}, {"_b0"} },
2031 { {"r021_1kl_b0"}, {"_r021_b0"}, {"_b0"} },
2032 { {"r022_1kl_b0"}, {"_r022_b0"}, {"_b0"} },
2033 { {"r023_1kl_b0"}, {"_r023_b0"}, {"_b0"} },
2034 { {"r024_1kl_b0"}, {"_r024_b0"}, {"_b0"} },
2035 { {"r030_1kl_b0"}, {"_r030_b0"}, {"_b0"} },
2036 { {"r031_1kl_b0"}, {"_r030_b0"}, {"_b0"} }, /* exceptional case : r31 -> r30 */
2037 { {"r032_1kl_b0"}, {"_r032_b0"}, {"_b0"} },
2038 { {"r033_1kl_b0"}, {"_r033_b0"}, {"_b0"} },
2039 { {"r034_1kl_b0"}, {"_r034_b0"}, {"_b0"} },
2040 { {"r02a_e32a_b2"}, {"_r02a_b2"}, {"_b2"} },
2041 { {"r02b_e32a_b2"}, {"_r02b_b2"}, {"_b2"} },
2042 { {"r020_1qw_b2"}, {"_r020_b2"}, {"_b2"} },
2043 { {"r021_1qw_b2"}, {"_r021_b2"}, {"_b2"} },
2044 { {"r022_1qw_b2"}, {"_r022_b2"}, {"_b2"} },
2045 { {"r031_1qw_b2"}, {"_r031_b2"}, {"_b2"} },
2046 { {"r032_1qw_b2"}, {"_r032_b2"}, {"_b2"} },
2047 { {"r041_1qw_b2"}, {"_r041_b2"}, {"_b2"} }
2050 static naming_info_t
*
2051 dhd_find_naming_info(naming_info_t table
[], int table_size
, char *module_type
)
2053 int index_found
= 0, i
= 0;
2055 if (module_type
&& strlen(module_type
) > 0) {
2056 for (i
= 1; i
< table_size
; i
++) {
2057 if (!strncmp(table
[i
].cid_ext
, module_type
, strlen(table
[i
].cid_ext
))) {
2064 DHD_INFO(("%s: index_found=%d\n", __FUNCTION__
, index_found
));
2066 return &table
[index_found
];
2069 static naming_info_t
*
2070 dhd_find_naming_info_by_cid(naming_info_t table
[], int table_size
,
2073 int index_found
= 0, i
= 0;
2076 /* truncate extension */
2077 for (i
= 1, ptr
= cid_info
; i
< MODULE_BCM4361_INDEX
&& ptr
; i
++) {
2078 ptr
= bcmstrstr(ptr
, "_");
2084 for (i
= 1; i
< table_size
&& ptr
; i
++) {
2085 if (!strncmp(table
[i
].cid_ext
, ptr
, strlen(table
[i
].cid_ext
))) {
2091 DHD_INFO(("%s: index_found=%d\n", __FUNCTION__
, index_found
));
2093 return &table
[index_found
];
2097 dhd_parse_board_information_bcm4361(dhd_bus_t
*bus
, int *boardtype
,
2098 unsigned char *vid
, int *vid_length
)
2100 int boardtype_backplane_addr
[] = {
2101 0x18010324, /* OTP Control 1 */
2102 0x18012618, /* PMU min resource mask */
2104 int boardtype_backplane_data
[] = {
2106 0x0e4fffff /* Keep on ARMHTAVAIL */
2108 int int_val
= 0, i
= 0;
2109 cis_tuple_format_t
*tuple
;
2111 uint32 raw_data
[CIS_TUPLE_MAX_COUNT
];
2113 for (i
= 0; i
< ARRAYSIZE(boardtype_backplane_addr
); i
++) {
2114 /* Write new OTP and PMU configuration */
2115 if (si_backplane_access(bus
->sih
, boardtype_backplane_addr
[i
], sizeof(int),
2116 &boardtype_backplane_data
[i
], FALSE
) != BCME_OK
) {
2117 DHD_ERROR(("invalid size/addr combination\n"));
2121 if (si_backplane_access(bus
->sih
, boardtype_backplane_addr
[i
], sizeof(int),
2122 &int_val
, TRUE
) != BCME_OK
) {
2123 DHD_ERROR(("invalid size/addr combination\n"));
2127 DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n",
2128 __FUNCTION__
, boardtype_backplane_addr
[i
], int_val
));
2131 /* read tuple raw data */
2132 for (i
= 0; i
< CIS_TUPLE_MAX_COUNT
; i
++) {
2133 if (si_backplane_access(bus
->sih
, CIS_TUPLE_START_ADDRESS
+ i
* sizeof(uint32
),
2134 sizeof(uint32
), &raw_data
[i
], TRUE
) != BCME_OK
) {
2139 totlen
= i
* sizeof(uint32
);
2140 tuple
= (cis_tuple_format_t
*)raw_data
;
2142 /* check the first tuple has tag 'start' */
2143 if (tuple
->id
!= CIS_TUPLE_TAG_START
) {
2147 *vid_length
= *boardtype
= 0;
2149 /* find tagged parameter */
2150 while ((totlen
>= (tuple
->len
+ CIS_TUPLE_HDR_LEN
)) &&
2151 (*vid_length
== 0 || *boardtype
== 0)) {
2154 if ((tuple
->tag
== CIS_TUPLE_TAG_VENDOR
) &&
2155 (totlen
>= (int)(len
+ CIS_TUPLE_HDR_LEN
))) {
2157 memcpy(vid
, tuple
->data
, tuple
->len
- CIS_TUPLE_TAG_LENGTH
);
2158 *vid_length
= tuple
->len
- CIS_TUPLE_TAG_LENGTH
;
2159 prhex("OTP VID", tuple
->data
, tuple
->len
- CIS_TUPLE_TAG_LENGTH
);
2161 else if ((tuple
->tag
== CIS_TUPLE_TAG_BOARDTYPE
) &&
2162 (totlen
>= (int)(len
+ CIS_TUPLE_HDR_LEN
))) {
2163 /* found boardtype */
2164 *boardtype
= (int)tuple
->data
[0];
2165 prhex("OTP boardtype", tuple
->data
, tuple
->len
- CIS_TUPLE_TAG_LENGTH
);
2168 tuple
= (cis_tuple_format_t
*)((uint8
*)tuple
+ (len
+ CIS_TUPLE_HDR_LEN
));
2169 totlen
-= (len
+ CIS_TUPLE_HDR_LEN
);
2172 if (*vid_length
<= 0 || *boardtype
<= 0) {
2173 DHD_ERROR(("failed to parse information (vid=%d, boardtype=%d)\n",
2174 *vid_length
, *boardtype
));
2182 static naming_info_t
*
2183 dhd_find_naming_info_by_chip_rev(naming_info_t table
[], int table_size
,
2184 dhd_bus_t
*bus
, bool *is_murata_fem
)
2186 int board_type
= 0, chip_rev
= 0, vid_length
= 0;
2187 unsigned char vid
[MAX_VID_LEN
];
2188 naming_info_t
*info
= &table
[0];
2189 char *cid_info
= NULL
;
2191 if (!bus
|| !bus
->sih
) {
2192 DHD_ERROR(("%s:bus(%p) or bus->sih is NULL\n", __FUNCTION__
, bus
));
2195 chip_rev
= bus
->sih
->chiprev
;
2197 if (dhd_parse_board_information_bcm4361(bus
, &board_type
, vid
, &vid_length
)
2199 DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__
));
2203 DHD_INFO(("%s:chip version %d\n", __FUNCTION__
, chip_rev
));
2205 /* A0 chipset has exception only */
2206 if (chip_rev
== CHIP_REV_A0
) {
2207 if (board_type
== BOARD_TYPE_EPA
) {
2208 info
= dhd_find_naming_info(table
, table_size
,
2209 DEFAULT_CIDINFO_FOR_EPA
);
2210 } else if ((board_type
== BOARD_TYPE_IPA
) ||
2211 (board_type
== BOARD_TYPE_IPA_OLD
)) {
2212 info
= dhd_find_naming_info(table
, table_size
,
2213 DEFAULT_CIDINFO_FOR_IPA
);
2216 cid_info
= dhd_get_cid_info(vid
, vid_length
);
2218 info
= dhd_find_naming_info_by_cid(table
, table_size
, cid_info
);
2219 if (strstr(cid_info
, CID_FEM_MURATA
)) {
2220 *is_murata_fem
= TRUE
;
2227 #endif /* SUPPORT_BCM4361_MIXED_MODULES && USE_CID_CHECK */
2229 static int concate_revision_bcm4358(dhd_bus_t
*bus
, char *fw_path
, char *nv_path
)
2232 #if defined(SUPPORT_MULTIPLE_CHIPS)
2233 char chipver_tag
[20] = "_4358";
2235 char chipver_tag
[10] = {0, };
2236 #endif /* SUPPORT_MULTIPLE_CHIPS */
2238 chiprev
= dhd_bus_chiprev(bus
);
2240 DHD_ERROR(("----- CHIP 4358 A0 -----\n"));
2241 strcat(chipver_tag
, "_a0");
2242 } else if (chiprev
== 1) {
2243 DHD_ERROR(("----- CHIP 4358 A1 -----\n"));
2244 #if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS)
2245 strcat(chipver_tag
, "_a1");
2246 #endif /* defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) */
2247 } else if (chiprev
== 3) {
2248 DHD_ERROR(("----- CHIP 4358 A3 -----\n"));
2249 #if defined(SUPPORT_MULTIPLE_CHIPS)
2250 strcat(chipver_tag
, "_a3");
2251 #endif /* SUPPORT_MULTIPLE_CHIPS */
2253 DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev
));
2256 strcat(fw_path
, chipver_tag
);
2258 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
2259 if (chiprev
== 1 || chiprev
== 3) {
2260 int ret
= dhd_check_module_b85a();
2261 if ((chiprev
== 1) && (ret
< 0)) {
2262 memset(chipver_tag
, 0x00, sizeof(chipver_tag
));
2263 strcat(chipver_tag
, "_b85");
2264 strcat(chipver_tag
, "_a1");
2268 DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__
, chipver_tag
));
2269 #endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
2271 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
2272 if (system_rev
>= 10) {
2273 DHD_ERROR(("----- Board Rev [%d]-----\n", system_rev
));
2274 strcat(chipver_tag
, "_r10");
2276 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
2277 strcat(nv_path
, chipver_tag
);
2282 static int concate_revision_bcm4359(dhd_bus_t
*bus
, char *fw_path
, char *nv_path
)
2285 char chipver_tag
[10] = {0, };
2286 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2287 defined(SUPPORT_BCM4359_MIXED_MODULES)
2288 char chipver_tag_nv
[20] = {0, };
2289 int module_type
= -1;
2290 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2292 chip_ver
= bus
->sih
->chiprev
;
2293 if (chip_ver
== 4) {
2294 DHD_ERROR(("----- CHIP 4359 B0 -----\n"));
2295 strncat(chipver_tag
, "_b0", strlen("_b0"));
2296 } else if (chip_ver
== 5) {
2297 DHD_ERROR(("----- CHIP 4359 B1 -----\n"));
2298 strncat(chipver_tag
, "_b1", strlen("_b1"));
2299 } else if (chip_ver
== 9) {
2300 DHD_ERROR(("----- CHIP 4359 C0 -----\n"));
2301 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2302 defined(SUPPORT_BCM4359_MIXED_MODULES)
2303 if (dhd_check_module(VENDOR_MURATA
)) {
2304 strncat(chipver_tag_nv
, VNAME_DELIM
, strlen(VNAME_DELIM
));
2305 strncat(chipver_tag_nv
, VENDOR_MURATA
, strlen(VENDOR_MURATA
));
2306 } else if (dhd_check_module(VENDOR_WISOL
)) {
2307 strncat(chipver_tag_nv
, VNAME_DELIM
, strlen(VNAME_DELIM
));
2308 strncat(chipver_tag_nv
, VENDOR_WISOL
, strlen(VENDOR_WISOL
));
2310 /* In case of SEMCO module, extra vendor string doen not need to add */
2311 strncat(chipver_tag_nv
, "_c0", strlen("_c0"));
2312 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2313 strncat(chipver_tag
, "_c0", strlen("_c0"));
2314 #if defined(CONFIG_WLAN_GRACE) || defined(CONFIG_SEC_GRACEQLTE_PROJECT) || \
2315 defined(CONFIG_SEC_LYKANLTE_PROJECT) || defined(CONFIG_SEC_KELLYLTE_PROJECT)
2316 DHD_ERROR(("----- Adding _plus string -----\n"));
2317 strncat(chipver_tag
, "_plus", strlen("_plus"));
2318 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2319 defined(SUPPORT_BCM4359_MIXED_MODULES)
2320 strncat(chipver_tag_nv
, "_plus", strlen("_plus"));
2321 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2322 #endif /* CONFIG_WLAN_GRACE || CONFIG_SEC_GRACEQLTE_PROJECT || CONFIG_SEC_LYKANLTE_PROJECT ||
2323 * CONFIG_SEC_KELLYLTE_PROJECT
2326 DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver
));
2330 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2331 defined(SUPPORT_BCM4359_MIXED_MODULES)
2332 module_type
= dhd_check_module_b90();
2334 switch (module_type
) {
2335 case BCM4359_MODULE_TYPE_B90B
:
2336 strcat(fw_path
, chipver_tag
);
2338 case BCM4359_MODULE_TYPE_B90S
:
2339 strcat(fw_path
, chipver_tag
);
2340 if (!(strstr(nv_path
, VENDOR_MURATA
) || strstr(nv_path
, VENDOR_WISOL
))) {
2341 strcat(nv_path
, chipver_tag_nv
);
2343 strcat(nv_path
, chipver_tag
);
2348 * .cid.info file not exist case,
2349 * loading B90S FW force for initial MFG boot up.
2351 if (chip_ver
== 5) {
2352 strncat(fw_path
, "_b90s", strlen("_b90s"));
2354 strcat(fw_path
, chipver_tag
);
2355 strcat(nv_path
, chipver_tag
);
2358 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2359 strcat(fw_path
, chipver_tag
);
2360 strcat(nv_path
, chipver_tag
);
2361 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2366 concate_revision_bcm4361(dhd_bus_t
*bus
, char *fw_path
, char *nv_path
)
2369 #if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
2370 char module_type
[MAX_VNAME_LEN
];
2371 naming_info_t
*info
= NULL
;
2372 bool is_murata_fem
= FALSE
;
2374 memset(module_type
, 0, sizeof(module_type
));
2376 if (dhd_check_module_bcm4361(module_type
,
2377 MODULE_BCM4361_INDEX
, &is_murata_fem
) == BCME_OK
) {
2378 info
= dhd_find_naming_info(bcm4361_naming_table
,
2379 ARRAYSIZE(bcm4361_naming_table
), module_type
);
2381 /* in case of .cid.info doesn't exists */
2382 info
= dhd_find_naming_info_by_chip_rev(bcm4361_naming_table
,
2383 ARRAYSIZE(bcm4361_naming_table
), bus
, &is_murata_fem
);
2386 if (bcmstrnstr(nv_path
, PATH_MAX
, "_murata", 7)) {
2387 is_murata_fem
= FALSE
;
2391 if (is_murata_fem
) {
2392 strncat(nv_path
, NVRAM_FEM_MURATA
, strlen(NVRAM_FEM_MURATA
));
2394 strncat(nv_path
, info
->nvram_ext
, strlen(info
->nvram_ext
));
2395 strncat(fw_path
, info
->fw_ext
, strlen(info
->fw_ext
));
2397 DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__
));
2400 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2401 char chipver_tag
[10] = {0, };
2403 strcat(fw_path
, chipver_tag
);
2404 strcat(nv_path
, chipver_tag
);
2405 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2411 concate_revision(dhd_bus_t
*bus
, char *fw_path
, char *nv_path
)
2415 if (!bus
|| !bus
->sih
) {
2416 DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__
));
2420 if (!fw_path
|| !nv_path
) {
2421 DHD_ERROR(("fw_path or nv_path is null.\n"));
2425 switch (si_chipid(bus
->sih
)) {
2427 case BCM43569_CHIP_ID
:
2428 case BCM4358_CHIP_ID
:
2429 res
= concate_revision_bcm4358(bus
, fw_path
, nv_path
);
2431 case BCM4355_CHIP_ID
:
2432 case BCM4359_CHIP_ID
:
2433 res
= concate_revision_bcm4359(bus
, fw_path
, nv_path
);
2435 case BCM4361_CHIP_ID
:
2436 case BCM4347_CHIP_ID
:
2437 res
= concate_revision_bcm4361(bus
, fw_path
, nv_path
);
2440 DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
2446 #endif /* SUPPORT_MULTIPLE_REVISION */
2449 dhd_get_chipid(dhd_pub_t
*dhd
)
2451 dhd_bus_t
*bus
= dhd
->bus
;
2453 if (bus
&& bus
->sih
)
2454 return (uint16
)si_chipid(bus
->sih
);
2460 * Loads firmware given by caller supplied path and nvram image into PCIe dongle.
2462 * BCM_REQUEST_FW specific :
2463 * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
2464 * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
2466 * BCMEMBEDIMAGE specific:
2467 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2468 * file will be used instead.
2470 * @return BCME_OK on success
2473 dhd_bus_download_firmware(struct dhd_bus
*bus
, osl_t
*osh
,
2474 char *pfw_path
, char *pnv_path
)
2478 bus
->fw_path
= pfw_path
;
2479 bus
->nv_path
= pnv_path
;
2481 #if defined(SUPPORT_MULTIPLE_REVISION)
2482 if (concate_revision(bus
, bus
->fw_path
, bus
->nv_path
) != 0) {
2483 DHD_ERROR(("%s: fail to concatnate revison \n",
2487 #endif /* SUPPORT_MULTIPLE_REVISION */
2489 #if defined(DHD_BLOB_EXISTENCE_CHECK)
2490 dhd_set_blob_support(bus
->dhd
, bus
->fw_path
);
2491 #endif /* DHD_BLOB_EXISTENCE_CHECK */
2493 DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
2494 __FUNCTION__
, bus
->fw_path
, bus
->nv_path
));
2495 dhdpcie_dump_resource(bus
);
2497 ret
= dhdpcie_download_firmware(bus
, osh
);
2503 * Loads firmware given by 'bus->fw_path' into PCIe dongle.
2505 * BCM_REQUEST_FW specific :
2506 * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
2507 * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
2509 * BCMEMBEDIMAGE specific:
2510 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2511 * file will be used instead.
2513 * @return BCME_OK on success
2516 dhdpcie_download_firmware(struct dhd_bus
*bus
, osl_t
*osh
)
2519 #if defined(BCM_REQUEST_FW)
2520 uint chipid
= bus
->sih
->chip
;
2521 uint revid
= bus
->sih
->chiprev
;
2522 char fw_path
[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */
2523 char nv_path
[64]; /* path to nvram vars file */
2524 bus
->fw_path
= fw_path
;
2525 bus
->nv_path
= nv_path
;
2527 case BCM43570_CHIP_ID
:
2528 bcmstrncat(fw_path
, "43570", 5);
2531 bcmstrncat(fw_path
, "a0", 2);
2534 bcmstrncat(fw_path
, "a2", 2);
2537 DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__
,
2543 DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__
,
2547 /* load board specific nvram file */
2548 snprintf(bus
->nv_path
, sizeof(nv_path
), "%s.nvm", fw_path
);
2550 snprintf(bus
->fw_path
, sizeof(fw_path
), "%s-firmware.bin", fw_path
);
2551 #endif /* BCM_REQUEST_FW */
2553 DHD_OS_WAKE_LOCK(bus
->dhd
);
2554 ret
= _dhdpcie_download_firmware(bus
);
2556 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
2558 } /* dhdpcie_download_firmware */
2560 #define DHD_MEMORY_SET_PATTERN 0xAA
2563 * Downloads a file containing firmware into dongle memory. In case of a .bea file, the DHD
2564 * is updated with the event logging partitions within that file as well.
2566 * @param pfw_path Path to .bin or .bea file
2569 dhdpcie_download_code_file(struct dhd_bus
*bus
, char *pfw_path
)
2571 int bcmerror
= BCME_ERROR
;
2573 #if defined(DHD_FW_MEM_CORRUPTION)
2574 uint8
*p_org_fw
= NULL
;
2575 uint32 org_fw_size
= 0;
2576 uint32 fw_write_offset
= 0;
2577 #endif /* DHD_FW_MEM_CORRUPTION */
2580 char *imgbuf
= NULL
;
2581 uint8
*memblock
= NULL
, *memptr
;
2582 int offset_end
= bus
->ramsize
;
2584 DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__
, pfw_path
));
2586 /* Should succeed in opening image if it is actually given through registry
2587 * entry or in module param.
2589 imgbuf
= dhd_os_open_image1(bus
->dhd
, pfw_path
);
2590 if (imgbuf
== NULL
) {
2594 memptr
= memblock
= MALLOC(bus
->dhd
->osh
, MEMBLOCK
+ DHD_SDALIGN
);
2595 if (memblock
== NULL
) {
2596 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__
, MEMBLOCK
));
2597 bcmerror
= BCME_NOMEM
;
2600 if ((uint32
)(uintptr
)memblock
% DHD_SDALIGN
) {
2601 memptr
+= (DHD_SDALIGN
- ((uint32
)(uintptr
)memblock
% DHD_SDALIGN
));
2604 #if defined(DHD_FW_MEM_CORRUPTION)
2605 if (dhd_bus_get_fw_mode(bus
->dhd
) == DHD_FLAG_MFG_MODE
) {
2606 org_fw_size
= dhd_os_get_image_size(imgbuf
);
2607 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
2608 p_org_fw
= (uint8
*)DHD_OS_PREALLOC(bus
->dhd
,
2609 DHD_PREALLOC_MEMDUMP_RAM
, org_fw_size
);
2611 p_org_fw
= (uint8
*)VMALLOC(bus
->dhd
->osh
, org_fw_size
);
2612 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
2613 if (p_org_fw
== NULL
) {
2614 DHD_ERROR(("%s: Failed to allocate memory %d bytes for download check\n",
2615 __FUNCTION__
, org_fw_size
));
2616 bcmerror
= BCME_NOMEM
;
2619 memset(p_org_fw
, 0, org_fw_size
);
2622 #endif /* DHD_FW_MEM_CORRUPTION */
2624 /* check if CR4/CA7 */
2625 store_reset
= (si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0) ||
2626 si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0));
2627 /* Download image with MEMBLOCK size */
2628 while ((len
= dhd_os_get_image_block((char*)memptr
, MEMBLOCK
, imgbuf
))) {
2630 DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__
, len
));
2631 bcmerror
= BCME_ERROR
;
2634 /* if address is 0, store the reset instruction to be written in 0 */
2636 ASSERT(offset
== 0);
2637 bus
->resetinstr
= *(((uint32
*)memptr
));
2638 /* Add start of RAM address to the address given by user */
2639 offset
+= bus
->dongle_ram_base
;
2640 offset_end
+= offset
;
2641 store_reset
= FALSE
;
2644 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
, offset
, (uint8
*)memptr
, len
);
2646 DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
2647 __FUNCTION__
, bcmerror
, MEMBLOCK
, offset
));
2651 #if defined(DHD_FW_MEM_CORRUPTION)
2652 if (dhd_bus_get_fw_mode(bus
->dhd
) == DHD_FLAG_MFG_MODE
) {
2653 memcpy((p_org_fw
+ fw_write_offset
), memptr
, len
);
2654 fw_write_offset
+= len
;
2656 #endif /* DHD_FW_MEM_CORRUPTION */
2658 if (offset
>= offset_end
) {
2659 DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
2660 __FUNCTION__
, offset
, offset_end
));
2661 bcmerror
= BCME_ERROR
;
2665 #ifdef DHD_FW_MEM_CORRUPTION
2666 /* Read and compare the downloaded code */
2667 if (dhd_bus_get_fw_mode(bus
->dhd
) == DHD_FLAG_MFG_MODE
) {
2668 unsigned char *p_readback_buf
= NULL
;
2669 uint32 compared_len
;
2670 uint32 remaining_len
= 0;
2673 p_readback_buf
= MALLOC(bus
->dhd
->osh
, MEMBLOCK
);
2674 if (p_readback_buf
== NULL
) {
2675 DHD_ERROR(("%s: Failed to allocate memory %d bytes for readback buffer\n",
2676 __FUNCTION__
, MEMBLOCK
));
2677 bcmerror
= BCME_NOMEM
;
2680 /* Read image to verify downloaded contents. */
2681 offset
= bus
->dongle_ram_base
;
2683 while (compared_len
< org_fw_size
) {
2684 memset(p_readback_buf
, DHD_MEMORY_SET_PATTERN
, MEMBLOCK
);
2685 remaining_len
= org_fw_size
- compared_len
;
2687 if (remaining_len
>= MEMBLOCK
) {
2690 len
= remaining_len
;
2692 bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
, offset
,
2693 (uint8
*)p_readback_buf
, len
);
2695 DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
2696 __FUNCTION__
, bcmerror
, MEMBLOCK
, offset
));
2700 if (memcmp((p_org_fw
+ compared_len
), p_readback_buf
, len
) != 0) {
2701 DHD_ERROR(("%s: Downloaded image is corrupted. offset %d\n",
2702 __FUNCTION__
, compared_len
));
2703 bcmerror
= BCME_ERROR
;
2707 compared_len
+= len
;
2710 DHD_ERROR(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__
));
2713 if (p_readback_buf
) {
2714 MFREE(bus
->dhd
->osh
, p_readback_buf
, MEMBLOCK
);
2717 #endif /* DHD_FW_MEM_CORRUPTION */
2720 #if defined(DHD_FW_MEM_CORRUPTION)
2722 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
2723 DHD_OS_PREFREE(bus
->dhd
, p_org_fw
, org_fw_size
);
2725 VMFREE(bus
->dhd
->osh
, p_org_fw
, org_fw_size
);
2728 #endif /* DHD_FW_MEM_CORRUPTION */
2730 MFREE(bus
->dhd
->osh
, memblock
, MEMBLOCK
+ DHD_SDALIGN
);
2734 dhd_os_close_image1(bus
->dhd
, imgbuf
);
2738 } /* dhdpcie_download_code_file */
2740 #ifdef CUSTOMER_HW4_DEBUG
2741 #define MIN_NVRAMVARS_SIZE 128
2742 #endif /* CUSTOMER_HW4_DEBUG */
2745 dhdpcie_download_nvram(struct dhd_bus
*bus
)
2747 int bcmerror
= BCME_ERROR
;
2749 char * memblock
= NULL
;
2752 bool nvram_file_exists
;
2753 bool nvram_uefi_exists
= FALSE
;
2754 bool local_alloc
= FALSE
;
2755 pnv_path
= bus
->nv_path
;
2757 nvram_file_exists
= ((pnv_path
!= NULL
) && (pnv_path
[0] != '\0'));
2759 /* First try UEFI */
2760 len
= MAX_NVRAMBUF_SIZE
;
2761 dhd_get_download_buffer(bus
->dhd
, NULL
, NVRAM
, &memblock
, (int *)&len
);
2763 /* If UEFI empty, then read from file system */
2764 if ((len
<= 0) || (memblock
== NULL
)) {
2766 if (nvram_file_exists
) {
2767 len
= MAX_NVRAMBUF_SIZE
;
2768 dhd_get_download_buffer(bus
->dhd
, pnv_path
, NVRAM
, &memblock
, (int *)&len
);
2769 if ((len
<= 0 || len
> MAX_NVRAMBUF_SIZE
)) {
2774 /* For SROM OTP no external file or UEFI required */
2778 nvram_uefi_exists
= TRUE
;
2781 DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__
, len
));
2783 if (len
> 0 && len
<= MAX_NVRAMBUF_SIZE
&& memblock
!= NULL
) {
2784 bufp
= (char *) memblock
;
2788 if (nvram_uefi_exists
|| nvram_file_exists
) {
2789 len
= process_nvram_vars(bufp
, len
);
2793 DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__
, len
));
2794 #ifdef CUSTOMER_HW4_DEBUG
2795 if (len
< MIN_NVRAMVARS_SIZE
) {
2796 DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
2798 bcmerror
= BCME_ERROR
;
2801 #endif /* CUSTOMER_HW4_DEBUG */
2804 len
+= 4 - (len
% 4);
2809 bcmerror
= dhdpcie_downloadvars(bus
, memblock
, len
+ 1);
2811 DHD_ERROR(("%s: error downloading vars: %d\n",
2812 __FUNCTION__
, bcmerror
));
2819 MFREE(bus
->dhd
->osh
, memblock
, MAX_NVRAMBUF_SIZE
);
2821 dhd_free_download_buffer(bus
->dhd
, memblock
, MAX_NVRAMBUF_SIZE
);
2829 dhdpcie_ramsize_read_image(struct dhd_bus
*bus
, char *buf
, int len
)
2831 int bcmerror
= BCME_ERROR
;
2832 char *imgbuf
= NULL
;
2834 if (buf
== NULL
|| len
== 0)
2837 /* External image takes precedence if specified */
2838 if ((bus
->fw_path
!= NULL
) && (bus
->fw_path
[0] != '\0')) {
2839 // opens and seeks to correct file offset:
2840 imgbuf
= dhd_os_open_image1(bus
->dhd
, bus
->fw_path
);
2841 if (imgbuf
== NULL
) {
2842 DHD_ERROR(("%s: Failed to open firmware file\n", __FUNCTION__
));
2847 if (len
!= dhd_os_get_image_block(buf
, len
, imgbuf
)) {
2848 DHD_ERROR(("%s: Failed to read %d bytes data\n", __FUNCTION__
, len
));
2857 dhd_os_close_image1(bus
->dhd
, imgbuf
);
2862 /* The ramsize can be changed in the dongle image, for example 4365 chip share the sysmem
2863 * with BMC and we can adjust how many sysmem belong to CA7 during dongle compilation.
2864 * So in DHD we need to detect this case and update the correct dongle RAMSIZE as well.
2867 dhdpcie_ramsize_adj(struct dhd_bus
*bus
)
2869 int i
, search_len
= 0;
2870 uint8
*memptr
= NULL
;
2871 uint8
*ramsizeptr
= NULL
;
2873 uint32 ramsize_ptr_ptr
[] = {RAMSIZE_PTR_PTR_LIST
};
2874 hnd_ramsize_ptr_t ramsize_info
;
2876 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
2878 /* Adjust dongle RAMSIZE already called. */
2879 if (bus
->ramsize_adjusted
) {
2883 /* success or failure, we don't want to be here
2886 bus
->ramsize_adjusted
= TRUE
;
2888 /* Not handle if user restrict dongle ram size enabled */
2889 if (dhd_dongle_memsize
) {
2890 DHD_ERROR(("%s: user restrict dongle ram size to %d.\n", __FUNCTION__
,
2891 dhd_dongle_memsize
));
2895 /* Out immediately if no image to download */
2896 if ((bus
->fw_path
== NULL
) || (bus
->fw_path
[0] == '\0')) {
2897 DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__
));
2901 /* Get maximum RAMSIZE info search length */
2902 for (i
= 0; ; i
++) {
2903 if (ramsize_ptr_ptr
[i
] == RAMSIZE_PTR_PTR_END
)
2906 if (search_len
< (int)ramsize_ptr_ptr
[i
])
2907 search_len
= (int)ramsize_ptr_ptr
[i
];
2913 search_len
+= sizeof(hnd_ramsize_ptr_t
);
2915 memptr
= MALLOC(bus
->dhd
->osh
, search_len
);
2916 if (memptr
== NULL
) {
2917 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__
, search_len
));
2921 /* External image takes precedence if specified */
2922 if (dhdpcie_ramsize_read_image(bus
, (char *)memptr
, search_len
) != BCME_OK
) {
2926 ramsizeptr
= memptr
;
2927 ramsizelen
= search_len
;
2932 for (i
= 0; ; i
++) {
2933 if (ramsize_ptr_ptr
[i
] == RAMSIZE_PTR_PTR_END
)
2936 if (ramsize_ptr_ptr
[i
] + sizeof(hnd_ramsize_ptr_t
) > ramsizelen
)
2939 memcpy((char *)&ramsize_info
, ramsizeptr
+ ramsize_ptr_ptr
[i
],
2940 sizeof(hnd_ramsize_ptr_t
));
2942 if (ramsize_info
.magic
== HTOL32(HND_RAMSIZE_PTR_MAGIC
)) {
2943 bus
->orig_ramsize
= LTOH32(ramsize_info
.ram_size
);
2944 bus
->ramsize
= LTOH32(ramsize_info
.ram_size
);
2945 DHD_ERROR(("%s: Adjust dongle RAMSIZE to 0x%x\n", __FUNCTION__
,
2954 MFREE(bus
->dhd
->osh
, memptr
, search_len
);
2957 } /* dhdpcie_ramsize_adj */
2960 * Downloads firmware file given by 'bus->fw_path' into PCIe dongle
2962 * BCMEMBEDIMAGE specific:
2963 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2964 * file will be used instead.
2968 _dhdpcie_download_firmware(struct dhd_bus
*bus
)
2972 bool embed
= FALSE
; /* download embedded firmware */
2973 bool dlok
= FALSE
; /* download firmware succeeded */
2975 /* Out immediately if no image to download */
2976 if ((bus
->fw_path
== NULL
) || (bus
->fw_path
[0] == '\0')) {
2977 DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__
));
2980 /* Adjust ram size */
2981 dhdpcie_ramsize_adj(bus
);
2983 /* Keep arm in reset */
2984 if (dhdpcie_bus_download_state(bus
, TRUE
)) {
2985 DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__
));
2989 /* External image takes precedence if specified */
2990 if ((bus
->fw_path
!= NULL
) && (bus
->fw_path
[0] != '\0')) {
2991 if (dhdpcie_download_code_file(bus
, bus
->fw_path
)) {
2992 DHD_ERROR(("%s:%d dongle image file download failed\n", __FUNCTION__
,
3001 BCM_REFERENCE(embed
);
3003 DHD_ERROR(("%s:%d dongle image download failed\n", __FUNCTION__
, __LINE__
));
3007 /* EXAMPLE: nvram_array */
3008 /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
3009 /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
3011 /* External nvram takes precedence if specified */
3012 if (dhdpcie_download_nvram(bus
)) {
3013 DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__
, __LINE__
));
3017 /* Take arm out of reset */
3018 if (dhdpcie_bus_download_state(bus
, FALSE
)) {
3019 DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__
));
3027 } /* _dhdpcie_download_firmware */
3030 dhdpcie_bus_readconsole(dhd_bus_t
*bus
)
3032 dhd_console_t
*c
= &bus
->console
;
3033 uint8 line
[CONSOLE_LINE_MAX
], ch
;
3034 uint32 n
, idx
, addr
;
3039 /* Don't do anything until FWREADY updates console address */
3040 if (bus
->console_addr
== 0)
3043 /* Read console log struct */
3044 addr
= bus
->console_addr
+ OFFSETOF(hnd_cons_t
, log
);
3046 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
, (uint8
*)&c
->log
, sizeof(c
->log
))) < 0)
3049 /* Allocate console buffer (one time only) */
3050 if (c
->buf
== NULL
) {
3051 c
->bufsize
= ltoh32(c
->log
.buf_size
);
3052 if ((c
->buf
= MALLOC(bus
->dhd
->osh
, c
->bufsize
)) == NULL
)
3054 DHD_INFO(("conlog: bufsize=0x%x\n", c
->bufsize
));
3056 idx
= ltoh32(c
->log
.idx
);
3058 /* Protect against corrupt value */
3059 if (idx
> c
->bufsize
)
3062 /* Skip reading the console buffer if the index pointer has not moved */
3066 DHD_INFO(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c
->log
.buf
,
3069 /* Read the console buffer data to a local buffer */
3070 /* optimize and read only the portion of the buffer needed, but
3071 * important to handle wrap-around.
3073 addr
= ltoh32(c
->log
.buf
);
3075 /* wrap around case - write ptr < read ptr */
3076 if (idx
< c
->last
) {
3077 /* from read ptr to end of buffer */
3078 readlen
= c
->bufsize
- c
->last
;
3079 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
,
3080 addr
+ c
->last
, c
->buf
, readlen
)) < 0) {
3081 DHD_ERROR(("conlog: read error[1] ! \n"));
3084 /* from beginning of buffer to write ptr */
3085 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
,
3086 addr
, c
->buf
+ readlen
,
3088 DHD_ERROR(("conlog: read error[2] ! \n"));
3093 /* non-wraparound case, write ptr > read ptr */
3094 readlen
= (uint
)idx
- c
->last
;
3095 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
,
3096 addr
+ c
->last
, c
->buf
, readlen
)) < 0) {
3097 DHD_ERROR(("conlog: read error[3] ! \n"));
3101 /* update read ptr */
3104 /* now output the read data from the local buffer to the host console */
3105 while (i
< readlen
) {
3106 for (n
= 0; n
< CONSOLE_LINE_MAX
- 2 && i
< readlen
; n
++) {
3115 if (line
[n
- 1] == '\r')
3118 DHD_FWLOG(("CONSOLE: %s\n", line
));
3124 } /* dhdpcie_bus_readconsole */
3127 dhd_bus_dump_console_buffer(dhd_bus_t
*bus
)
3131 char *console_buffer
= NULL
;
3132 uint32 console_ptr
, console_size
, console_index
;
3133 uint8 line
[CONSOLE_LINE_MAX
], ch
;
3136 DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__
));
3138 if (bus
->is_linkdown
) {
3139 DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__
));
3143 addr
= bus
->pcie_sh
->console_addr
+ OFFSETOF(hnd_cons_t
, log
);
3144 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
,
3145 (uint8
*)&console_ptr
, sizeof(console_ptr
))) < 0) {
3149 addr
= bus
->pcie_sh
->console_addr
+ OFFSETOF(hnd_cons_t
, log
.buf_size
);
3150 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
,
3151 (uint8
*)&console_size
, sizeof(console_size
))) < 0) {
3155 addr
= bus
->pcie_sh
->console_addr
+ OFFSETOF(hnd_cons_t
, log
.idx
);
3156 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
,
3157 (uint8
*)&console_index
, sizeof(console_index
))) < 0) {
3161 console_ptr
= ltoh32(console_ptr
);
3162 console_size
= ltoh32(console_size
);
3163 console_index
= ltoh32(console_index
);
3165 if (console_size
> CONSOLE_BUFFER_MAX
||
3166 !(console_buffer
= MALLOC(bus
->dhd
->osh
, console_size
))) {
3170 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, console_ptr
,
3171 (uint8
*)console_buffer
, console_size
)) < 0) {
3175 for (i
= 0, n
= 0; i
< console_size
; i
+= n
+ 1) {
3176 for (n
= 0; n
< CONSOLE_LINE_MAX
- 2; n
++) {
3177 ch
= console_buffer
[(console_index
+ i
+ n
) % console_size
];
3184 if (line
[n
- 1] == '\r')
3187 /* Don't use DHD_ERROR macro since we print
3188 * a lot of information quickly. The macro
3189 * will truncate a lot of the printfs
3192 DHD_FWLOG(("CONSOLE: %s\n", line
));
3198 MFREE(bus
->dhd
->osh
, console_buffer
, console_size
);
3203 * Opens the file given by bus->fw_path, reads part of the file into a buffer and closes the file.
3205 * @return BCME_OK on success
3208 dhdpcie_checkdied(dhd_bus_t
*bus
, char *data
, uint size
)
3212 char *mbuffer
= NULL
;
3213 uint maxstrlen
= 256;
3215 pciedev_shared_t
*local_pciedev_shared
= bus
->pcie_sh
;
3216 struct bcmstrbuf strbuf
;
3217 unsigned long flags
;
3218 bool dongle_trap_occured
= FALSE
;
3220 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
3222 if (DHD_NOCHECKDIED_ON()) {
3228 * Called after a rx ctrl timeout. "data" is NULL.
3229 * allocate memory to trace the trap or assert.
3232 mbuffer
= data
= MALLOC(bus
->dhd
->osh
, msize
);
3234 if (mbuffer
== NULL
) {
3235 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__
, msize
));
3236 bcmerror
= BCME_NOMEM
;
3241 if ((str
= MALLOC(bus
->dhd
->osh
, maxstrlen
)) == NULL
) {
3242 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__
, maxstrlen
));
3243 bcmerror
= BCME_NOMEM
;
3246 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
3247 DHD_BUS_BUSY_SET_IN_CHECKDIED(bus
->dhd
);
3248 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
3250 if (MULTIBP_ENAB(bus
->sih
)) {
3251 dhd_bus_pcie_pwr_req(bus
);
3253 if ((bcmerror
= dhdpcie_readshared(bus
)) < 0) {
3257 bcm_binit(&strbuf
, data
, size
);
3259 bcm_bprintf(&strbuf
, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
3260 local_pciedev_shared
->msgtrace_addr
, local_pciedev_shared
->console_addr
);
3262 if ((local_pciedev_shared
->flags
& PCIE_SHARED_ASSERT_BUILT
) == 0) {
3263 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
3264 * (Avoids conflict with real asserts for programmatic parsing of output.)
3266 bcm_bprintf(&strbuf
, "Assrt not built in dongle\n");
3269 if ((bus
->pcie_sh
->flags
& (PCIE_SHARED_ASSERT
|PCIE_SHARED_TRAP
)) == 0) {
3270 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
3271 * (Avoids conflict with real asserts for programmatic parsing of output.)
3273 bcm_bprintf(&strbuf
, "No trap%s in dongle",
3274 (bus
->pcie_sh
->flags
& PCIE_SHARED_ASSERT_BUILT
)
3277 if (bus
->pcie_sh
->flags
& PCIE_SHARED_ASSERT
) {
3278 /* Download assert */
3279 bcm_bprintf(&strbuf
, "Dongle assert");
3280 if (bus
->pcie_sh
->assert_exp_addr
!= 0) {
3282 if ((bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
,
3283 bus
->pcie_sh
->assert_exp_addr
,
3284 (uint8
*)str
, maxstrlen
)) < 0) {
3288 str
[maxstrlen
- 1] = '\0';
3289 bcm_bprintf(&strbuf
, " expr \"%s\"", str
);
3292 if (bus
->pcie_sh
->assert_file_addr
!= 0) {
3294 if ((bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
,
3295 bus
->pcie_sh
->assert_file_addr
,
3296 (uint8
*)str
, maxstrlen
)) < 0) {
3300 str
[maxstrlen
- 1] = '\0';
3301 bcm_bprintf(&strbuf
, " file \"%s\"", str
);
3304 bcm_bprintf(&strbuf
, " line %d ", bus
->pcie_sh
->assert_line
);
3307 if (bus
->pcie_sh
->flags
& PCIE_SHARED_TRAP
) {
3308 trap_t
*tr
= &bus
->dhd
->last_trap_info
;
3309 dongle_trap_occured
= TRUE
;
3310 if ((bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
,
3311 bus
->pcie_sh
->trap_addr
, (uint8
*)tr
, sizeof(trap_t
))) < 0) {
3312 bus
->dhd
->dongle_trap_occured
= TRUE
;
3315 dhd_bus_dump_trap_info(bus
, &strbuf
);
3319 if (bus
->pcie_sh
->flags
& (PCIE_SHARED_ASSERT
| PCIE_SHARED_TRAP
)) {
3320 DHD_FWLOG(("%s: %s\n", __FUNCTION__
, strbuf
.origbuf
));
3322 /* wake up IOCTL wait event */
3323 dhd_wakeup_ioctl_event(bus
->dhd
, IOCTL_RETURN_ON_TRAP
);
3325 dhd_bus_dump_console_buffer(bus
);
3326 dhd_prot_debug_info_print(bus
->dhd
);
3328 #if defined(DHD_FW_COREDUMP)
3329 /* save core dump or write to a file */
3330 if (bus
->dhd
->memdump_enabled
) {
3331 #ifdef DHD_SSSR_DUMP
3332 if (bus
->dhd
->sssr_inited
) {
3333 dhdpcie_sssr_dump(bus
->dhd
);
3335 #endif /* DHD_SSSR_DUMP */
3336 bus
->dhd
->memdump_type
= DUMP_TYPE_DONGLE_TRAP
;
3337 dhdpcie_mem_dump(bus
);
3339 #endif /* DHD_FW_COREDUMP */
3341 /* set the trap occured flag only after all the memdump,
3342 * logdump and sssr dump collection has been scheduled
3344 if (dongle_trap_occured
) {
3345 bus
->dhd
->dongle_trap_occured
= TRUE
;
3348 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
3349 copy_hang_info_trap(bus
->dhd
);
3350 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
3351 dhd_schedule_reset(bus
->dhd
);
3355 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
3356 DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus
->dhd
);
3357 dhd_os_busbusy_wake(bus
->dhd
);
3358 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
3361 if (MULTIBP_ENAB(bus
->sih
)) {
3362 dhd_bus_pcie_pwr_req_clear(bus
);
3365 MFREE(bus
->dhd
->osh
, mbuffer
, msize
);
3367 MFREE(bus
->dhd
->osh
, str
, maxstrlen
);
3370 } /* dhdpcie_checkdied */
3372 /* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
3373 void dhdpcie_mem_dump_bugcheck(dhd_bus_t
*bus
, uint8
*buf
)
3376 int size
; /* Full mem size */
3377 int start
; /* Start address */
3378 int read_size
= 0; /* Read size of each iteration */
3379 uint8
*databuf
= buf
;
3385 start
= bus
->dongle_ram_base
;
3387 /* check for dead bus */
3390 ret
= dhdpcie_bus_membytes(bus
, FALSE
, start
, (uint8
*)&test_word
, read_size
);
3391 /* if read error or bus timeout */
3392 if (ret
|| (test_word
== 0xFFFFFFFF)) {
3397 /* Get full mem size */
3398 size
= bus
->ramsize
;
3399 /* Read mem content */
3402 read_size
= MIN(MEMBLOCK
, size
);
3403 if ((ret
= dhdpcie_bus_membytes(bus
, FALSE
, start
, databuf
, read_size
))) {
3407 /* Decrement size and increment start address */
3410 databuf
+= read_size
;
3412 bus
->dhd
->soc_ram
= buf
;
3413 bus
->dhd
->soc_ram_length
= bus
->ramsize
;
3417 #if defined(DHD_FW_COREDUMP)
3419 dhdpcie_mem_dump(dhd_bus_t
*bus
)
3422 int size
; /* Full mem size */
3423 int start
= bus
->dongle_ram_base
; /* Start address */
3424 int read_size
= 0; /* Read size of each iteration */
3425 uint8
*buf
= NULL
, *databuf
= NULL
;
3427 #ifdef EXYNOS_PCIE_DEBUG
3428 exynos_pcie_register_dump(1);
3429 #endif /* EXYNOS_PCIE_DEBUG */
3431 #ifdef SUPPORT_LINKDOWN_RECOVERY
3432 if (bus
->is_linkdown
) {
3433 DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__
));
3436 #endif /* SUPPORT_LINKDOWN_RECOVERY */
3438 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3439 if (pm_runtime_get_sync(dhd_bus_to_dev(bus
)) < 0)
3441 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3443 /* Get full mem size */
3444 size
= bus
->ramsize
;
3445 buf
= dhd_get_fwdump_buf(bus
->dhd
, size
);
3447 DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__
, size
));
3451 /* Read mem content */
3452 DHD_TRACE_HW4(("Dump dongle memory\n"));
3456 read_size
= MIN(MEMBLOCK
, size
);
3457 if ((ret
= dhdpcie_bus_membytes(bus
, FALSE
, start
, databuf
, read_size
)))
3459 DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__
, ret
));
3460 #ifdef DHD_DEBUG_UART
3461 bus
->dhd
->memdump_success
= FALSE
;
3462 #endif /* DHD_DEBUG_UART */
3467 /* Decrement size and increment start address */
3470 databuf
+= read_size
;
3472 #ifdef DHD_DEBUG_UART
3473 bus
->dhd
->memdump_success
= TRUE
;
3474 #endif /* DHD_DEBUG_UART */
3476 dhd_schedule_memdump(bus
->dhd
, buf
, bus
->ramsize
);
3477 /* buf, actually soc_ram free handled in dhd_{free,clear} */
3479 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3480 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus
));
3481 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus
));
3482 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3488 dhd_bus_mem_dump(dhd_pub_t
*dhdp
)
3490 dhd_bus_t
*bus
= dhdp
->bus
;
3491 int ret
= BCME_ERROR
;
3493 if (dhdp
->busstate
== DHD_BUS_DOWN
) {
3494 DHD_ERROR(("%s bus is down\n", __FUNCTION__
));
3498 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp
)) {
3499 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
3500 __FUNCTION__
, dhdp
->busstate
, dhdp
->dhd_bus_busy_state
));
3504 DHD_OS_WAKE_LOCK(dhdp
);
3505 ret
= dhdpcie_mem_dump(bus
);
3506 DHD_OS_WAKE_UNLOCK(dhdp
);
3509 #endif /* DHD_FW_COREDUMP */
3512 dhd_socram_dump(dhd_bus_t
*bus
)
3514 #if defined(DHD_FW_COREDUMP)
3515 DHD_OS_WAKE_LOCK(bus
->dhd
);
3516 dhd_bus_mem_dump(bus
->dhd
);
3517 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
3525 * Transfers bytes from host to dongle using pio mode.
3526 * Parameter 'address' is a backplane address.
3529 dhdpcie_bus_membytes(dhd_bus_t
*bus
, bool write
, ulong address
, uint8
*data
, uint size
)
3532 int detect_endian_flag
= 0x01;
3535 if (write
&& bus
->is_linkdown
) {
3536 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
3540 if (MULTIBP_ENAB(bus
->sih
)) {
3541 dhd_bus_pcie_pwr_req(bus
);
3543 /* Detect endianness. */
3544 little_endian
= *(char *)&detect_endian_flag
;
3546 /* In remap mode, adjust address beyond socram and redirect
3547 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
3548 * is not backplane accessible
3551 /* Determine initial transfer parameters */
3552 #ifdef DHD_SUPPORT_64BIT
3553 dsize
= sizeof(uint64
);
3554 #else /* !DHD_SUPPORT_64BIT */
3555 dsize
= sizeof(uint32
);
3556 #endif /* DHD_SUPPORT_64BIT */
3558 /* Do the transfer(s) */
3561 #ifdef DHD_SUPPORT_64BIT
3562 if (size
>= sizeof(uint64
) && little_endian
&& !(address
% 8)) {
3563 dhdpcie_bus_wtcm64(bus
, address
, *((uint64
*)data
));
3565 #else /* !DHD_SUPPORT_64BIT */
3566 if (size
>= sizeof(uint32
) && little_endian
&& !(address
% 4)) {
3567 dhdpcie_bus_wtcm32(bus
, address
, *((uint32
*)data
));
3569 #endif /* DHD_SUPPORT_64BIT */
3571 dsize
= sizeof(uint8
);
3572 dhdpcie_bus_wtcm8(bus
, address
, *data
);
3575 /* Adjust for next transfer (if any) */
3576 if ((size
-= dsize
)) {
3583 #ifdef DHD_SUPPORT_64BIT
3584 if (size
>= sizeof(uint64
) && little_endian
&& !(address
% 8))
3586 *(uint64
*)data
= dhdpcie_bus_rtcm64(bus
, address
);
3588 #else /* !DHD_SUPPORT_64BIT */
3589 if (size
>= sizeof(uint32
) && little_endian
&& !(address
% 4))
3591 *(uint32
*)data
= dhdpcie_bus_rtcm32(bus
, address
);
3593 #endif /* DHD_SUPPORT_64BIT */
3595 dsize
= sizeof(uint8
);
3596 *data
= dhdpcie_bus_rtcm8(bus
, address
);
3599 /* Adjust for next transfer (if any) */
3600 if ((size
-= dsize
) > 0) {
3606 if (MULTIBP_ENAB(bus
->sih
)) {
3607 dhd_bus_pcie_pwr_req_clear(bus
);
3610 } /* dhdpcie_bus_membytes */
3613 * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue
3614 * to the (non flow controlled) flow ring.
3617 dhd_bus_schedule_queue(struct dhd_bus
*bus
, uint16 flow_id
, bool txs
)
3619 flow_ring_node_t
*flow_ring_node
;
3621 #ifdef DHD_LOSSLESS_ROAMING
3622 dhd_pub_t
*dhdp
= bus
->dhd
;
3624 DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__
, flow_id
));
3626 /* ASSERT on flow_id */
3627 if (flow_id
>= bus
->max_submission_rings
) {
3628 DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__
,
3629 flow_id
, bus
->max_submission_rings
));
3633 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flow_id
);
3635 if (flow_ring_node
->prot_info
== NULL
) {
3636 DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__
));
3637 return BCME_NOTREADY
;
3640 #ifdef DHD_LOSSLESS_ROAMING
3641 if ((dhdp
->dequeue_prec_map
& (1 << flow_ring_node
->flow_info
.tid
)) == 0) {
3642 DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
3643 __FUNCTION__
, flow_ring_node
->flow_info
.tid
));
3646 #endif /* DHD_LOSSLESS_ROAMING */
3649 unsigned long flags
;
3651 flow_queue_t
*queue
;
3652 #ifdef DHD_LOSSLESS_ROAMING
3653 struct ether_header
*eh
;
3655 #endif /* DHD_LOSSLESS_ROAMING */
3657 queue
= &flow_ring_node
->queue
; /* queue associated with flow ring */
3659 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
3661 if (flow_ring_node
->status
!= FLOW_RING_STATUS_OPEN
) {
3662 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
3663 return BCME_NOTREADY
;
3666 while ((txp
= dhd_flow_queue_dequeue(bus
->dhd
, queue
)) != NULL
) {
3670 * Modifying the packet length caused P2P cert failures.
3671 * Specifically on test cases where a packet of size 52 bytes
3672 * was injected, the sniffer capture showed 62 bytes because of
3673 * which the cert tests failed. So making the below change
3674 * only Router specific.
3677 #ifdef DHDTCPACK_SUPPRESS
3678 if (bus
->dhd
->tcpack_sup_mode
!= TCPACK_SUP_HOLD
) {
3679 ret
= dhd_tcpack_check_xmit(bus
->dhd
, txp
);
3680 if (ret
!= BCME_OK
) {
3681 DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n",
3685 #endif /* DHDTCPACK_SUPPRESS */
3686 #ifdef DHD_LOSSLESS_ROAMING
3687 pktdata
= (uint8
*)PKTDATA(OSH_NULL
, txp
);
3688 eh
= (struct ether_header
*) pktdata
;
3689 if (eh
->ether_type
== hton16(ETHER_TYPE_802_1X
)) {
3690 uint8 prio
= (uint8
)PKTPRIO(txp
);
3691 /* Restore to original priority for 802.1X packet */
3692 if (prio
== PRIO_8021D_NC
) {
3693 PKTSETPRIO(txp
, dhdp
->prio_8021x
);
3696 #endif /* DHD_LOSSLESS_ROAMING */
3697 /* Attempt to transfer packet over flow ring */
3698 ret
= dhd_prot_txdata(bus
->dhd
, txp
, flow_ring_node
->flow_info
.ifindex
);
3699 if (ret
!= BCME_OK
) { /* may not have resources in flow ring */
3700 DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__
, ret
));
3701 dhd_prot_txdata_write_flush(bus
->dhd
, flow_id
);
3702 /* reinsert at head */
3703 dhd_flow_queue_reinsert(bus
->dhd
, queue
, txp
);
3704 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
3706 /* If we are able to requeue back, return success */
3711 dhd_prot_txdata_write_flush(bus
->dhd
, flow_id
);
3713 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
3717 } /* dhd_bus_schedule_queue */
3719 /** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */
3721 dhd_bus_txdata(struct dhd_bus
*bus
, void *txp
, uint8 ifidx
)
3724 #ifdef IDLE_TX_FLOW_MGMT
3726 #endif /* IDLE_TX_FLOW_MGMT */
3727 flow_queue_t
*queue
;
3728 flow_ring_node_t
*flow_ring_node
;
3729 unsigned long flags
;
3731 void *txp_pend
= NULL
;
3733 if (!bus
->dhd
->flowid_allocator
) {
3734 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__
));
3738 flowid
= DHD_PKT_GET_FLOWID(txp
);
3740 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flowid
);
3742 DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
3743 __FUNCTION__
, flowid
, flow_ring_node
->status
, flow_ring_node
->active
));
3745 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
3746 if ((flowid
>= bus
->dhd
->num_flow_rings
) ||
3747 #ifdef IDLE_TX_FLOW_MGMT
3748 (!flow_ring_node
->active
))
3750 (!flow_ring_node
->active
) ||
3751 (flow_ring_node
->status
== FLOW_RING_STATUS_DELETE_PENDING
) ||
3752 (flow_ring_node
->status
== FLOW_RING_STATUS_STA_FREEING
))
3753 #endif /* IDLE_TX_FLOW_MGMT */
3755 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
3756 DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
3757 __FUNCTION__
, flowid
, flow_ring_node
->status
,
3758 flow_ring_node
->active
));
3763 #ifdef IDLE_TX_FLOW_MGMT
3764 node_status
= flow_ring_node
->status
;
3766 /* handle diffrent status states here!! */
3767 switch (node_status
)
3769 case FLOW_RING_STATUS_OPEN
:
3771 if (bus
->enable_idle_flowring_mgmt
) {
3772 /* Move the node to the head of active list */
3773 dhd_flow_ring_move_to_active_list_head(bus
, flow_ring_node
);
3777 case FLOW_RING_STATUS_SUSPENDED
:
3778 DHD_INFO(("Need to Initiate TX Flow resume\n"));
3779 /* Issue resume_ring request */
3780 dhd_bus_flow_ring_resume_request(bus
,
3784 case FLOW_RING_STATUS_CREATE_PENDING
:
3785 case FLOW_RING_STATUS_RESUME_PENDING
:
3786 /* Dont do anything here!! */
3787 DHD_INFO(("Waiting for Flow create/resume! status is %u\n",
3791 case FLOW_RING_STATUS_DELETE_PENDING
:
3793 DHD_ERROR(("Dropping packet!! flowid %u status is %u\n",
3794 flowid
, node_status
));
3797 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
3800 /* Now queue the packet */
3801 #endif /* IDLE_TX_FLOW_MGMT */
3803 queue
= &flow_ring_node
->queue
; /* queue associated with flow ring */
3805 if ((ret
= dhd_flow_queue_enqueue(bus
->dhd
, queue
, txp
)) != BCME_OK
)
3808 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
3810 if (flow_ring_node
->status
) {
3811 DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
3812 __FUNCTION__
, flowid
, flow_ring_node
->status
,
3813 flow_ring_node
->active
));
3820 ret
= dhd_bus_schedule_queue(bus
, flowid
, FALSE
); /* from queue to flowring */
3822 /* If we have anything pending, try to push into q */
3824 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
3826 if ((ret
= dhd_flow_queue_enqueue(bus
->dhd
, queue
, txp_pend
)) != BCME_OK
) {
3827 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
3832 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
3838 DHD_INFO(("%s: Toss %d\n", __FUNCTION__
, ret
));
3839 PKTCFREE(bus
->dhd
->osh
, txp
, TRUE
);
3841 } /* dhd_bus_txdata */
3844 dhd_bus_stop_queue(struct dhd_bus
*bus
)
3846 dhd_txflowcontrol(bus
->dhd
, ALL_INTERFACES
, ON
);
3850 dhd_bus_start_queue(struct dhd_bus
*bus
)
3852 dhd_txflowcontrol(bus
->dhd
, ALL_INTERFACES
, OFF
);
3855 /* Device console input function */
3856 int dhd_bus_console_in(dhd_pub_t
*dhd
, uchar
*msg
, uint msglen
)
3858 dhd_bus_t
*bus
= dhd
->bus
;
3861 /* Address could be zero if CONSOLE := 0 in dongle Makefile */
3862 if (bus
->console_addr
== 0)
3863 return BCME_UNSUPPORTED
;
3865 /* Don't allow input if dongle is in reset */
3866 if (bus
->dhd
->dongle_reset
) {
3867 return BCME_NOTREADY
;
3870 /* Zero cbuf_index */
3871 addr
= bus
->console_addr
+ OFFSETOF(hnd_cons_t
, cbuf_idx
);
3873 if ((rv
= dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*)&val
, sizeof(val
))) < 0)
3876 /* Write message into cbuf */
3877 addr
= bus
->console_addr
+ OFFSETOF(hnd_cons_t
, cbuf
);
3878 if ((rv
= dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*)msg
, msglen
)) < 0)
3881 /* Write length into vcons_in */
3882 addr
= bus
->console_addr
+ OFFSETOF(hnd_cons_t
, vcons_in
);
3883 val
= htol32(msglen
);
3884 if ((rv
= dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*)&val
, sizeof(val
))) < 0)
3887 /* generate an interrupt to dongle to indicate that it needs to process cons command */
3888 dhdpcie_send_mb_data(bus
, H2D_HOST_CONS_INT
);
3891 } /* dhd_bus_console_in */
3894 * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
3895 * contained in 'pkt'. Processes rx frame, forwards up the layer to netif.
3898 dhd_bus_rx_frame(struct dhd_bus
*bus
, void* pkt
, int ifidx
, uint pkt_count
)
3900 dhd_rx_frame(bus
->dhd
, ifidx
, pkt
, pkt_count
, 0);
3903 /** 'offset' is a backplane address */
3905 dhdpcie_bus_wtcm8(dhd_bus_t
*bus
, ulong offset
, uint8 data
)
3907 if (bus
->is_linkdown
) {
3908 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
3911 W_REG(bus
->dhd
->osh
, (volatile uint8
*)(bus
->tcm
+ offset
), data
);
3916 dhdpcie_bus_rtcm8(dhd_bus_t
*bus
, ulong offset
)
3918 volatile uint8 data
;
3919 if (bus
->is_linkdown
) {
3920 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
3923 data
= R_REG(bus
->dhd
->osh
, (volatile uint8
*)(bus
->tcm
+ offset
));
3929 dhdpcie_bus_wtcm32(dhd_bus_t
*bus
, ulong offset
, uint32 data
)
3931 if (bus
->is_linkdown
) {
3932 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
3935 W_REG(bus
->dhd
->osh
, (volatile uint32
*)(bus
->tcm
+ offset
), data
);
3939 dhdpcie_bus_wtcm16(dhd_bus_t
*bus
, ulong offset
, uint16 data
)
3941 if (bus
->is_linkdown
) {
3942 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
3945 W_REG(bus
->dhd
->osh
, (volatile uint16
*)(bus
->tcm
+ offset
), data
);
3948 #ifdef DHD_SUPPORT_64BIT
3950 dhdpcie_bus_wtcm64(dhd_bus_t
*bus
, ulong offset
, uint64 data
)
3952 if (bus
->is_linkdown
) {
3953 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
3956 W_REG(bus
->dhd
->osh
, (volatile uint64
*)(bus
->tcm
+ offset
), data
);
3959 #endif /* DHD_SUPPORT_64BIT */
3962 dhdpcie_bus_rtcm16(dhd_bus_t
*bus
, ulong offset
)
3964 volatile uint16 data
;
3965 if (bus
->is_linkdown
) {
3966 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
3969 data
= R_REG(bus
->dhd
->osh
, (volatile uint16
*)(bus
->tcm
+ offset
));
3975 dhdpcie_bus_rtcm32(dhd_bus_t
*bus
, ulong offset
)
3977 volatile uint32 data
;
3978 if (bus
->is_linkdown
) {
3979 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
3982 data
= R_REG(bus
->dhd
->osh
, (volatile uint32
*)(bus
->tcm
+ offset
));
3987 #ifdef DHD_SUPPORT_64BIT
3989 dhdpcie_bus_rtcm64(dhd_bus_t
*bus
, ulong offset
)
3991 volatile uint64 data
;
3992 if (bus
->is_linkdown
) {
3993 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
3996 data
= R_REG(bus
->dhd
->osh
, (volatile uint64
*)(bus
->tcm
+ offset
));
4000 #endif /* DHD_SUPPORT_64BIT */
4002 /** A snippet of dongle memory is shared between host and dongle */
4004 dhd_bus_cmn_writeshared(dhd_bus_t
*bus
, void *data
, uint32 len
, uint8 type
, uint16 ringid
)
4007 ulong addr
; /* dongle address */
4009 DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__
, type
, len
));
4011 if (bus
->is_linkdown
) {
4012 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
4016 if (MULTIBP_ENAB(bus
->sih
)) {
4017 dhd_bus_pcie_pwr_req(bus
);
4020 case D2H_DMA_SCRATCH_BUF
:
4021 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, host_dma_scratch_buffer
);
4022 long_data
= HTOL64(*(uint64
*)data
);
4023 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4024 if (dhd_msg_level
& DHD_INFO_VAL
) {
4025 prhex(__FUNCTION__
, data
, len
);
4029 case D2H_DMA_SCRATCH_BUF_LEN
:
4030 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, host_dma_scratch_buffer_len
);
4031 dhdpcie_bus_wtcm32(bus
, addr
, (uint32
) HTOL32(*(uint32
*)data
));
4032 if (dhd_msg_level
& DHD_INFO_VAL
) {
4033 prhex(__FUNCTION__
, data
, len
);
4037 case H2D_DMA_INDX_WR_BUF
:
4038 long_data
= HTOL64(*(uint64
*)data
);
4039 addr
= DHD_RING_INFO_MEMBER_ADDR(bus
, h2d_w_idx_hostaddr
);
4040 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4041 if (dhd_msg_level
& DHD_INFO_VAL
) {
4042 prhex(__FUNCTION__
, data
, len
);
4046 case H2D_DMA_INDX_RD_BUF
:
4047 long_data
= HTOL64(*(uint64
*)data
);
4048 addr
= DHD_RING_INFO_MEMBER_ADDR(bus
, h2d_r_idx_hostaddr
);
4049 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4050 if (dhd_msg_level
& DHD_INFO_VAL
) {
4051 prhex(__FUNCTION__
, data
, len
);
4055 case D2H_DMA_INDX_WR_BUF
:
4056 long_data
= HTOL64(*(uint64
*)data
);
4057 addr
= DHD_RING_INFO_MEMBER_ADDR(bus
, d2h_w_idx_hostaddr
);
4058 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4059 if (dhd_msg_level
& DHD_INFO_VAL
) {
4060 prhex(__FUNCTION__
, data
, len
);
4064 case D2H_DMA_INDX_RD_BUF
:
4065 long_data
= HTOL64(*(uint64
*)data
);
4066 addr
= DHD_RING_INFO_MEMBER_ADDR(bus
, d2h_r_idx_hostaddr
);
4067 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4068 if (dhd_msg_level
& DHD_INFO_VAL
) {
4069 prhex(__FUNCTION__
, data
, len
);
4073 case H2D_IFRM_INDX_WR_BUF
:
4074 long_data
= HTOL64(*(uint64
*)data
);
4075 addr
= DHD_RING_INFO_MEMBER_ADDR(bus
, ifrm_w_idx_hostaddr
);
4076 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4077 if (dhd_msg_level
& DHD_INFO_VAL
) {
4078 prhex(__FUNCTION__
, data
, len
);
4082 case RING_ITEM_LEN
:
4083 addr
= DHD_RING_MEM_MEMBER_ADDR(bus
, ringid
, len_items
);
4084 dhdpcie_bus_wtcm16(bus
, addr
, (uint16
) HTOL16(*(uint16
*)data
));
4087 case RING_MAX_ITEMS
:
4088 addr
= DHD_RING_MEM_MEMBER_ADDR(bus
, ringid
, max_item
);
4089 dhdpcie_bus_wtcm16(bus
, addr
, (uint16
) HTOL16(*(uint16
*)data
));
4092 case RING_BUF_ADDR
:
4093 long_data
= HTOL64(*(uint64
*)data
);
4094 addr
= DHD_RING_MEM_MEMBER_ADDR(bus
, ringid
, base_addr
);
4095 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4096 if (dhd_msg_level
& DHD_INFO_VAL
) {
4097 prhex(__FUNCTION__
, data
, len
);
4102 addr
= bus
->ring_sh
[ringid
].ring_state_w
;
4103 dhdpcie_bus_wtcm16(bus
, addr
, (uint16
) HTOL16(*(uint16
*)data
));
4107 addr
= bus
->ring_sh
[ringid
].ring_state_r
;
4108 dhdpcie_bus_wtcm16(bus
, addr
, (uint16
) HTOL16(*(uint16
*)data
));
4112 addr
= bus
->d2h_mb_data_ptr_addr
;
4113 dhdpcie_bus_wtcm32(bus
, addr
, (uint32
) HTOL32(*(uint32
*)data
));
4117 addr
= bus
->h2d_mb_data_ptr_addr
;
4118 dhdpcie_bus_wtcm32(bus
, addr
, (uint32
) HTOL32(*(uint32
*)data
));
4121 case HOST_API_VERSION
:
4122 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, host_cap
);
4123 dhdpcie_bus_wtcm32(bus
, addr
, (uint32
) HTOL32(*(uint32
*)data
));
4126 case DNGL_TO_HOST_TRAP_ADDR
:
4127 long_data
= HTOL64(*(uint64
*)data
);
4128 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, host_trap_addr
);
4129 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4130 DHD_INFO(("Wrote trap addr:0x%x\n", (uint32
) HTOL32(*(uint32
*)data
)));
4134 case DNGL_TO_HOST_TRAP_ADDR_LEN
:
4135 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, device_trap_debug_buffer_len
);
4136 dhdpcie_bus_wtcm16(bus
, addr
, (uint16
) HTOL16(*(uint16
*)data
));
4138 #endif /* D2H_MINIDUMP */
4143 if (MULTIBP_ENAB(bus
->sih
)) {
4144 dhd_bus_pcie_pwr_req_clear(bus
);
4146 } /* dhd_bus_cmn_writeshared */
4148 /** A snippet of dongle memory is shared between host and dongle */
4150 dhd_bus_cmn_readshared(dhd_bus_t
*bus
, void* data
, uint8 type
, uint16 ringid
)
4152 ulong addr
; /* dongle address */
4154 if (MULTIBP_ENAB(bus
->sih
)) {
4155 dhd_bus_pcie_pwr_req(bus
);
4159 addr
= bus
->ring_sh
[ringid
].ring_state_w
;
4160 *(uint16
*)data
= LTOH16(dhdpcie_bus_rtcm16(bus
, addr
));
4164 addr
= bus
->ring_sh
[ringid
].ring_state_r
;
4165 *(uint16
*)data
= LTOH16(dhdpcie_bus_rtcm16(bus
, addr
));
4168 case TOTAL_LFRAG_PACKET_CNT
:
4169 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, total_lfrag_pkt_cnt
);
4170 *(uint16
*)data
= LTOH16(dhdpcie_bus_rtcm16(bus
, addr
));
4174 addr
= bus
->h2d_mb_data_ptr_addr
;
4175 *(uint32
*)data
= LTOH32(dhdpcie_bus_rtcm32(bus
, addr
));
4179 addr
= bus
->d2h_mb_data_ptr_addr
;
4180 *(uint32
*)data
= LTOH32(dhdpcie_bus_rtcm32(bus
, addr
));
4183 case MAX_HOST_RXBUFS
:
4184 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, max_host_rxbufs
);
4185 *(uint16
*)data
= LTOH16(dhdpcie_bus_rtcm16(bus
, addr
));
4191 if (MULTIBP_ENAB(bus
->sih
)) {
4192 dhd_bus_pcie_pwr_req_clear(bus
);
4196 uint32
dhd_bus_get_sharedflags(dhd_bus_t
*bus
)
4198 return ((pciedev_shared_t
*)bus
->pcie_sh
)->flags
;
4202 dhd_bus_clearcounts(dhd_pub_t
*dhdp
)
4207 * @param params input buffer, NULL for 'set' operation.
4208 * @param plen length of 'params' buffer, 0 for 'set' operation.
4209 * @param arg output buffer
4212 dhd_bus_iovar_op(dhd_pub_t
*dhdp
, const char *name
,
4213 void *params
, int plen
, void *arg
, int len
, bool set
)
4215 dhd_bus_t
*bus
= dhdp
->bus
;
4216 const bcm_iovar_t
*vi
= NULL
;
4217 int bcmerror
= BCME_UNSUPPORTED
;
4221 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
4225 if (!name
|| len
< 0)
4228 /* Get MUST have return space */
4229 ASSERT(set
|| (arg
&& len
));
4230 if (!(set
|| (arg
&& len
)))
4233 /* Set does NOT take qualifiers */
4234 ASSERT(!set
|| (!params
&& !plen
));
4235 if (!(!set
|| (!params
&& !plen
)))
4238 DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__
,
4239 name
, (set
? "set" : "get"), len
, plen
));
4241 if (MULTIBP_ENAB(bus
->sih
)) {
4242 dhd_bus_pcie_pwr_req(bus
);
4245 /* Look up var locally; if not found pass to host driver */
4246 if ((vi
= bcm_iovar_lookup(dhdpcie_iovars
, name
)) == NULL
) {
4250 /* set up 'params' pointer in case this is a set command so that
4251 * the convenience int and bool code can be common to set and get
4253 if (params
== NULL
) {
4258 if (vi
->type
== IOVT_VOID
)
4260 else if (vi
->type
== IOVT_BUFFER
)
4263 /* all other types are integer sized */
4264 val_size
= sizeof(int);
4266 actionid
= set
? IOV_SVAL(vi
->varid
) : IOV_GVAL(vi
->varid
);
4267 bcmerror
= dhdpcie_bus_doiovar(bus
, vi
, actionid
, name
, params
, plen
, arg
, len
, val_size
);
4270 /* In DEVRESET_QUIESCE/DEVRESET_ON,
4271 * this includes dongle re-attach which initialize pwr_req_ref count to 0 and
4272 * causes pwr_req_ref count miss-match in pwr req clear function and hang.
4273 * In this case, bypass pwr req clear.
4275 if (bcmerror
== BCME_DNGL_DEVRESET
) {
4278 if (MULTIBP_ENAB(bus
->sih
)) {
4279 dhd_bus_pcie_pwr_req_clear(bus
);
4283 } /* dhd_bus_iovar_op */
4286 #include <bcm_buzzz.h>
4289 dhd_buzzz_dump_cntrs(char *p
, uint32
*core
, uint32
*log
,
4290 const int num_counters
)
4294 uint32 curr
[BCM_BUZZZ_COUNTERS_MAX
], prev
[BCM_BUZZZ_COUNTERS_MAX
];
4295 uint32 delta
[BCM_BUZZZ_COUNTERS_MAX
];
4297 /* Compute elapsed counter values per counter event type */
4298 for (ctr
= 0U; ctr
< num_counters
; ctr
++) {
4299 prev
[ctr
] = core
[ctr
];
4301 core
[ctr
] = curr
[ctr
]; /* saved for next log */
4303 if (curr
[ctr
] < prev
[ctr
])
4304 delta
[ctr
] = curr
[ctr
] + (~0U - prev
[ctr
]);
4306 delta
[ctr
] = (curr
[ctr
] - prev
[ctr
]);
4308 bytes
+= sprintf(p
+ bytes
, "%12u ", delta
[ctr
]);
4314 typedef union cm3_cnts
{ /* export this in bcm_buzzz.h */
4326 dhd_bcm_buzzz_dump_cntrs6(char *p
, uint32
*core
, uint32
*log
)
4330 uint32 cyccnt
, instrcnt
;
4331 cm3_cnts_t cm3_cnts
;
4334 { /* 32bit cyccnt */
4335 uint32 curr
, prev
, delta
;
4336 prev
= core
[0]; curr
= *log
++; core
[0] = curr
;
4338 delta
= curr
+ (~0U - prev
);
4340 delta
= (curr
- prev
);
4342 bytes
+= sprintf(p
+ bytes
, "%12u ", delta
);
4346 { /* Extract the 4 cnts: cpi, exc, sleep and lsu */
4349 cm3_cnts_t curr
, prev
, delta
;
4350 prev
.u32
= core
[1]; curr
.u32
= * log
++; core
[1] = curr
.u32
;
4351 for (i
= 0; i
< 4; i
++) {
4352 if (curr
.u8
[i
] < prev
.u8
[i
])
4353 delta
.u8
[i
] = curr
.u8
[i
] + (max8
- prev
.u8
[i
]);
4355 delta
.u8
[i
] = (curr
.u8
[i
] - prev
.u8
[i
]);
4356 bytes
+= sprintf(p
+ bytes
, "%4u ", delta
.u8
[i
]);
4358 cm3_cnts
.u32
= delta
.u32
;
4361 { /* Extract the foldcnt from arg0 */
4362 uint8 curr
, prev
, delta
, max8
= ~0;
4363 bcm_buzzz_arg0_t arg0
; arg0
.u32
= *log
;
4364 prev
= core
[2]; curr
= arg0
.klog
.cnt
; core
[2] = curr
;
4366 delta
= curr
+ (max8
- prev
);
4368 delta
= (curr
- prev
);
4369 bytes
+= sprintf(p
+ bytes
, "%4u ", delta
);
4373 instrcnt
= cyccnt
- (cm3_cnts
.u8
[0] + cm3_cnts
.u8
[1] + cm3_cnts
.u8
[2]
4374 + cm3_cnts
.u8
[3]) + foldcnt
;
4375 if (instrcnt
> 0xFFFFFF00)
4376 bytes
+= sprintf(p
+ bytes
, "[%10s] ", "~");
4378 bytes
+= sprintf(p
+ bytes
, "[%10u] ", instrcnt
);
4383 dhd_buzzz_dump_log(char *p
, uint32
*core
, uint32
*log
, bcm_buzzz_t
*buzzz
)
4386 bcm_buzzz_arg0_t arg0
;
4387 static uint8
* fmt
[] = BCM_BUZZZ_FMT_STRINGS
;
4389 if (buzzz
->counters
== 6) {
4390 bytes
+= dhd_bcm_buzzz_dump_cntrs6(p
, core
, log
);
4391 log
+= 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
4393 bytes
+= dhd_buzzz_dump_cntrs(p
, core
, log
, buzzz
->counters
);
4394 log
+= buzzz
->counters
; /* (N x 32bit) CR4=3, CA7=4 */
4397 /* Dump the logged arguments using the registered formats */
4400 switch (arg0
.klog
.args
) {
4402 bytes
+= sprintf(p
+ bytes
, fmt
[arg0
.klog
.id
]);
4406 uint32 arg1
= *log
++;
4407 bytes
+= sprintf(p
+ bytes
, fmt
[arg0
.klog
.id
], arg1
);
4413 arg1
= *log
++; arg2
= *log
++;
4414 bytes
+= sprintf(p
+ bytes
, fmt
[arg0
.klog
.id
], arg1
, arg2
);
4419 uint32 arg1
, arg2
, arg3
;
4420 arg1
= *log
++; arg2
= *log
++; arg3
= *log
++;
4421 bytes
+= sprintf(p
+ bytes
, fmt
[arg0
.klog
.id
], arg1
, arg2
, arg3
);
4426 uint32 arg1
, arg2
, arg3
, arg4
;
4427 arg1
= *log
++; arg2
= *log
++;
4428 arg3
= *log
++; arg4
= *log
++;
4429 bytes
+= sprintf(p
+ bytes
, fmt
[arg0
.klog
.id
], arg1
, arg2
, arg3
, arg4
);
4433 printf("Maximum one argument supported\n");
4437 bytes
+= sprintf(p
+ bytes
, "\n");
4442 void dhd_buzzz_dump(bcm_buzzz_t
*buzzz_p
, void *buffer_p
, char *p
)
4445 uint32 total
, part1
, part2
, log_sz
, core
[BCM_BUZZZ_COUNTERS_MAX
];
4448 for (i
= 0; i
< BCM_BUZZZ_COUNTERS_MAX
; i
++) {
4452 log_sz
= buzzz_p
->log_sz
;
4454 part1
= ((uint32
)buzzz_p
->cur
- (uint32
)buzzz_p
->log
) / log_sz
;
4456 if (buzzz_p
->wrap
== TRUE
) {
4457 part2
= ((uint32
)buzzz_p
->end
- (uint32
)buzzz_p
->cur
) / log_sz
;
4458 total
= (buzzz_p
->buffer_sz
- BCM_BUZZZ_LOGENTRY_MAXSZ
) / log_sz
;
4461 total
= buzzz_p
->count
;
4465 printf("bcm_buzzz_dump total<%u> done\n", total
);
4468 printf("bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n",
4469 total
, part2
, part1
);
4472 if (part2
) { /* with wrap */
4473 log
= (void*)((size_t)buffer_p
+ (buzzz_p
->cur
- buzzz_p
->log
));
4474 while (part2
--) { /* from cur to end : part2 */
4476 dhd_buzzz_dump_log(p
, core
, (uint32
*)log
, buzzz_p
);
4478 log
= (void*)((size_t)log
+ buzzz_p
->log_sz
);
4482 log
= (void*)buffer_p
;
4485 dhd_buzzz_dump_log(p
, core
, (uint32
*)log
, buzzz_p
);
4487 log
= (void*)((size_t)log
+ buzzz_p
->log_sz
);
4490 printf("bcm_buzzz_dump done.\n");
4493 int dhd_buzzz_dump_dngl(dhd_bus_t
*bus
)
4495 bcm_buzzz_t
* buzzz_p
= NULL
;
4496 void * buffer_p
= NULL
;
4497 char * page_p
= NULL
;
4498 pciedev_shared_t
*sh
;
4501 if (bus
->dhd
->busstate
!= DHD_BUS_DATA
) {
4502 return BCME_UNSUPPORTED
;
4504 if ((page_p
= (char *)MALLOC(bus
->dhd
->osh
, 4096)) == NULL
) {
4505 printf("Page memory allocation failure\n");
4508 if ((buzzz_p
= MALLOC(bus
->dhd
->osh
, sizeof(bcm_buzzz_t
))) == NULL
) {
4509 printf("BCM BUZZZ memory allocation failure\n");
4513 ret
= dhdpcie_readshared(bus
);
4515 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__
));
4521 DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__
, sh
->buzz_dbg_ptr
));
4523 if (sh
->buzz_dbg_ptr
!= 0U) { /* Fetch and display dongle BUZZZ Trace */
4525 dhdpcie_bus_membytes(bus
, FALSE
, (ulong
)sh
->buzz_dbg_ptr
,
4526 (uint8
*)buzzz_p
, sizeof(bcm_buzzz_t
));
4528 printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
4529 "count<%u> status<%u> wrap<%u>\n"
4530 "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
4531 (int)sh
->buzz_dbg_ptr
,
4532 (int)buzzz_p
->log
, (int)buzzz_p
->cur
, (int)buzzz_p
->end
,
4533 buzzz_p
->count
, buzzz_p
->status
, buzzz_p
->wrap
,
4534 buzzz_p
->cpu_idcode
, buzzz_p
->counters
, buzzz_p
->group
,
4535 buzzz_p
->buffer_sz
, buzzz_p
->log_sz
);
4537 if (buzzz_p
->count
== 0) {
4538 printf("Empty dongle BUZZZ trace\n\n");
4542 /* Allocate memory for trace buffer and format strings */
4543 buffer_p
= MALLOC(bus
->dhd
->osh
, buzzz_p
->buffer_sz
);
4544 if (buffer_p
== NULL
) {
4545 printf("Buffer memory allocation failure\n");
4549 /* Fetch the trace. format strings are exported via bcm_buzzz.h */
4550 dhdpcie_bus_membytes(bus
, FALSE
, (uint32
)buzzz_p
->log
, /* Trace */
4551 (uint8
*)buffer_p
, buzzz_p
->buffer_sz
);
4553 /* Process and display the trace using formatted output */
4557 for (ctr
= 0; ctr
< buzzz_p
->counters
; ctr
++) {
4558 printf("<Evt[%02X]> ", buzzz_p
->eventid
[ctr
]);
4560 printf("<code execution point>\n");
4563 dhd_buzzz_dump(buzzz_p
, buffer_p
, page_p
);
4565 printf("----- End of dongle BCM BUZZZ Trace -----\n\n");
4567 MFREE(bus
->dhd
->osh
, buffer_p
, buzzz_p
->buffer_sz
); buffer_p
= NULL
;
4572 if (page_p
) MFREE(bus
->dhd
->osh
, page_p
, 4096);
4573 if (buzzz_p
) MFREE(bus
->dhd
->osh
, buzzz_p
, sizeof(bcm_buzzz_t
));
4574 if (buffer_p
) MFREE(bus
->dhd
->osh
, buffer_p
, buzzz_p
->buffer_sz
);
4578 #endif /* BCM_BUZZZ */
4580 #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \
4581 ((sih)->buscoretype == PCIE2_CORE_ID))
4583 #define PCIE_FLR_CAPAB_BIT 28
4584 #define PCIE_FUNCTION_LEVEL_RESET_BIT 15
4586 /* Change delays for only QT HW, FPGA and silicon uses same delay */
4588 #define DHD_FUNCTION_LEVEL_RESET_DELAY 300000u
4589 #define DHD_SSRESET_STATUS_RETRY_DELAY 10000u
4591 #define DHD_FUNCTION_LEVEL_RESET_DELAY 55u /* 55 msec delay */
4592 #define DHD_SSRESET_STATUS_RETRY_DELAY 40u
4594 #define DHD_SSRESET_STATUS_RETRIES 50u
4597 dhd_bus_perform_flr(dhd_bus_t
*bus
, bool force_fail
)
4603 DHD_ERROR(("******** Perform FLR ********\n"));
4605 /* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */
4606 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_DEVICE_CAPABILITY
, sizeof(val
));
4607 flr_capab
= val
& (1 << PCIE_FLR_CAPAB_BIT
);
4608 DHD_ERROR(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
4609 PCIE_CFG_DEVICE_CAPABILITY
, val
, flr_capab
));
4611 DHD_ERROR(("Chip does not support FLR\n"));
4612 return BCME_UNSUPPORTED
;
4615 /* Save pcie config space */
4616 DHD_ERROR(("Save Pcie Config Space\n"));
4617 DHD_PCIE_CONFIG_SAVE(bus
);
4619 /* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */
4620 DHD_ERROR(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
4621 PCIE_FUNCTION_LEVEL_RESET_BIT
, PCIE_CFG_DEVICE_CONTROL
));
4622 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_DEVICE_CONTROL
, sizeof(val
));
4623 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL
, val
));
4624 val
= val
| (1 << PCIE_FUNCTION_LEVEL_RESET_BIT
);
4625 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL
, val
));
4626 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIE_CFG_DEVICE_CONTROL
, sizeof(val
), val
);
4628 /* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */
4629 DHD_ERROR(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY
));
4630 OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY
* 1000u);
4633 DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
4634 PCIE_SSRESET_DISABLE_BIT
, PCIE_CFG_SUBSYSTEM_CONTROL
));
4635 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
));
4636 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL
,
4638 val
= val
| (1 << PCIE_SSRESET_DISABLE_BIT
);
4639 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL
,
4641 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
), val
);
4643 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
));
4644 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL
,
4648 /* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */
4649 DHD_ERROR(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
4650 PCIE_FUNCTION_LEVEL_RESET_BIT
, PCIE_CFG_DEVICE_CONTROL
));
4651 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_DEVICE_CONTROL
, sizeof(val
));
4652 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL
, val
));
4653 val
= val
& ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT
);
4654 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL
, val
));
4655 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIE_CFG_DEVICE_CONTROL
, sizeof(val
), val
);
4657 /* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
4658 DHD_ERROR(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
4659 "is cleared\n", PCIE_SSRESET_STATUS_BIT
, PCIE_CFG_SUBSYSTEM_CONTROL
));
4661 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
));
4662 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
4663 PCIE_CFG_SUBSYSTEM_CONTROL
, val
));
4664 val
= val
& (1 << PCIE_SSRESET_STATUS_BIT
);
4665 OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY
);
4666 } while (val
&& (retry
++ < DHD_SSRESET_STATUS_RETRIES
));
4669 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
4670 PCIE_CFG_SUBSYSTEM_CONTROL
, PCIE_SSRESET_STATUS_BIT
));
4671 /* User has to fire the IOVAR again, if force_fail is needed */
4673 bus
->flr_force_fail
= FALSE
;
4674 DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__
));
4679 /* Restore pcie config space */
4680 DHD_ERROR(("Restore Pcie Config Space\n"));
4681 DHD_PCIE_CONFIG_RESTORE(bus
);
4683 DHD_ERROR(("******** FLR Succedeed ********\n"));
4688 #ifdef DHD_USE_BP_RESET
4689 #define DHD_BP_RESET_ASPM_DISABLE_DELAY 500u /* usec */
4691 #define DHD_BP_RESET_STATUS_RETRY_DELAY 40u /* usec */
4692 #define DHD_BP_RESET_STATUS_RETRIES 50u
4694 #define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT 10
4695 #define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT 21
4697 dhd_bus_perform_bp_reset(struct dhd_bus
*bus
)
4701 uint dar_clk_ctrl_status_reg
= DAR_CLK_CTRL(bus
->sih
->buscorerev
);
4705 DHD_ERROR(("******** Perform BP reset ********\n"));
4708 DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
4709 PCIECFGREG_LINK_STATUS_CTRL
));
4710 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
, sizeof(val
));
4711 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
, val
));
4712 val
= val
& (~PCIE_ASPM_ENAB
);
4713 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
, val
));
4714 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
, sizeof(val
), val
);
4716 /* wait for delay usec */
4717 DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY
));
4718 OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY
);
4720 /* Set bit 10 of PCIECFGREG_SPROM_CTRL */
4721 DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n",
4722 PCIE_CFG_SPROM_CTRL_SB_RESET_BIT
, PCIECFGREG_SPROM_CTRL
));
4723 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIECFGREG_SPROM_CTRL
, sizeof(val
));
4724 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL
, val
));
4725 val
= val
| (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT
);
4726 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL
, val
));
4727 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIECFGREG_SPROM_CTRL
, sizeof(val
), val
);
4729 /* Wait till bit backplane reset is ASSERTED i,e
4730 * bit 10 of PCIECFGREG_SPROM_CTRL is cleared.
4731 * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid
4732 * else DAR register will read previous old value
4734 DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of "
4735 "PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n",
4736 PCIE_CFG_SPROM_CTRL_SB_RESET_BIT
, PCIECFGREG_SPROM_CTRL
));
4738 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIECFGREG_SPROM_CTRL
, sizeof(val
));
4739 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL
, val
));
4740 cond
= val
& (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT
);
4741 OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY
);
4742 } while (cond
&& (retry
++ < DHD_BP_RESET_STATUS_RETRIES
));
4745 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
4746 PCIECFGREG_SPROM_CTRL
, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT
));
4751 /* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */
4752 DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of "
4753 "dar_clk_ctrl_status_reg(0x%x) is cleared\n",
4754 PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT
, dar_clk_ctrl_status_reg
));
4756 val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
4757 dar_clk_ctrl_status_reg
, 0, 0);
4758 DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n",
4759 dar_clk_ctrl_status_reg
, val
));
4760 cond
= val
& (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT
);
4761 OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY
);
4762 } while (cond
&& (retry
++ < DHD_BP_RESET_STATUS_RETRIES
));
4765 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
4766 dar_clk_ctrl_status_reg
, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT
));
4772 DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
4773 PCIECFGREG_LINK_STATUS_CTRL
));
4774 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
, sizeof(val
));
4775 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
, val
));
4776 val
= val
| (PCIE_ASPM_L1_ENAB
);
4777 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
, val
));
4778 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
, sizeof(val
), val
);
4780 DHD_ERROR(("******** BP reset Succedeed ********\n"));
4784 #endif /* DHD_USE_BP_RESET */
4787 dhd_bus_devreset(dhd_pub_t
*dhdp
, uint8 flag
)
4789 dhd_bus_t
*bus
= dhdp
->bus
;
4791 unsigned long flags
;
4792 unsigned long flags_bus
;
4793 #ifdef CONFIG_ARCH_MSM
4794 int retry
= POWERUP_MAX_RETRY
;
4795 #endif /* CONFIG_ARCH_MSM */
4797 if (flag
== TRUE
) { /* Turn off WLAN */
4798 /* Removing Power */
4799 DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__
));
4801 bus
->dhd
->up
= FALSE
;
4803 /* wait for other contexts to finish -- if required a call
4804 * to OSL_DELAY for 1s can be added to give other contexts
4805 * a chance to finish
4807 dhdpcie_advertise_bus_cleanup(bus
->dhd
);
4809 if (bus
->dhd
->busstate
!= DHD_BUS_DOWN
) {
4810 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4811 atomic_set(&bus
->dhd
->block_bus
, TRUE
);
4812 dhd_flush_rx_tx_wq(bus
->dhd
);
4813 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4815 #ifdef BCMPCIE_OOB_HOST_WAKE
4816 /* Clean up any pending host wake IRQ */
4817 dhd_bus_oob_intr_set(bus
->dhd
, FALSE
);
4818 dhd_bus_oob_intr_unregister(bus
->dhd
);
4819 #endif /* BCMPCIE_OOB_HOST_WAKE */
4820 dhd_os_wd_timer(dhdp
, 0);
4821 dhd_bus_stop(bus
, TRUE
);
4823 DHD_BUS_LOCK(bus
->bus_lock
, flags_bus
);
4824 dhdpcie_bus_intr_disable(bus
);
4825 DHD_BUS_UNLOCK(bus
->bus_lock
, flags_bus
);
4826 dhdpcie_free_irq(bus
);
4828 dhd_deinit_bus_lock(bus
);
4829 dhd_bus_release_dongle(bus
);
4830 dhdpcie_bus_free_resource(bus
);
4831 bcmerror
= dhdpcie_bus_disable_device(bus
);
4833 DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
4834 __FUNCTION__
, bcmerror
));
4835 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4836 atomic_set(&bus
->dhd
->block_bus
, FALSE
);
4837 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4839 /* Clean up protocol data after Bus Master Enable bit clear
4840 * so that host can safely unmap DMA and remove the allocated buffers
4841 * from the PKTID MAP. Some Applicantion Processors supported
4842 * System MMU triggers Kernel panic when they detect to attempt to
4843 * DMA-unmapped memory access from the devices which use the
4844 * System MMU. Therefore, Kernel panic can be happened since it is
4845 * possible that dongle can access to DMA-unmapped memory after
4846 * calling the dhd_prot_reset().
4847 * For this reason, the dhd_prot_reset() and dhd_clear() functions
4848 * should be located after the dhdpcie_bus_disable_device().
4850 dhd_prot_reset(dhdp
);
4852 #ifdef CONFIG_ARCH_MSM
4853 bcmerror
= dhdpcie_bus_clock_stop(bus
);
4855 DHD_ERROR(("%s: host clock stop failed: %d\n",
4856 __FUNCTION__
, bcmerror
));
4857 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4858 atomic_set(&bus
->dhd
->block_bus
, FALSE
);
4859 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4862 #endif /* CONFIG_ARCH_MSM */
4863 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
4864 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
4865 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
4866 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4867 atomic_set(&bus
->dhd
->block_bus
, FALSE
);
4868 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4871 dhdpcie_free_irq(bus
);
4873 #ifdef BCMPCIE_OOB_HOST_WAKE
4874 /* Clean up any pending host wake IRQ */
4875 dhd_bus_oob_intr_set(bus
->dhd
, FALSE
);
4876 dhd_bus_oob_intr_unregister(bus
->dhd
);
4877 #endif /* BCMPCIE_OOB_HOST_WAKE */
4878 dhd_dpc_kill(bus
->dhd
);
4879 if (!bus
->no_bus_init
) {
4880 dhd_bus_release_dongle(bus
);
4881 dhdpcie_bus_free_resource(bus
);
4882 bcmerror
= dhdpcie_bus_disable_device(bus
);
4884 DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
4885 __FUNCTION__
, bcmerror
));
4888 /* Clean up protocol data after Bus Master Enable bit clear
4889 * so that host can safely unmap DMA and remove the allocated
4890 * buffers from the PKTID MAP. Some Applicantion Processors
4891 * supported System MMU triggers Kernel panic when they detect
4892 * to attempt to DMA-unmapped memory access from the devices
4893 * which use the System MMU.
4894 * Therefore, Kernel panic can be happened since it is possible
4895 * that dongle can access to DMA-unmapped memory after calling
4896 * the dhd_prot_reset().
4897 * For this reason, the dhd_prot_reset() and dhd_clear() functions
4898 * should be located after the dhdpcie_bus_disable_device().
4900 dhd_prot_reset(dhdp
);
4903 bus
->no_bus_init
= FALSE
;
4905 #ifdef CONFIG_ARCH_MSM
4906 bcmerror
= dhdpcie_bus_clock_stop(bus
);
4908 DHD_ERROR(("%s: host clock stop failed: %d\n",
4909 __FUNCTION__
, bcmerror
));
4912 #endif /* CONFIG_ARCH_MSM */
4915 bus
->dhd
->dongle_reset
= TRUE
;
4916 DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__
));
4918 } else { /* Turn on WLAN */
4919 if (bus
->dhd
->busstate
== DHD_BUS_DOWN
) {
4921 DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__
));
4922 #ifdef CONFIG_ARCH_MSM
4924 bcmerror
= dhdpcie_bus_clock_start(bus
);
4926 DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
4934 if (bcmerror
&& !retry
) {
4935 DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
4936 __FUNCTION__
, bcmerror
));
4939 #endif /* CONFIG_ARCH_MSM */
4940 bus
->is_linkdown
= 0;
4941 #ifdef SUPPORT_LINKDOWN_RECOVERY
4942 bus
->read_shm_fail
= FALSE
;
4943 #endif /* SUPPORT_LINKDOWN_RECOVERY */
4944 bcmerror
= dhdpcie_bus_enable_device(bus
);
4946 DHD_ERROR(("%s: host configuration restore failed: %d\n",
4947 __FUNCTION__
, bcmerror
));
4951 bcmerror
= dhdpcie_bus_alloc_resource(bus
);
4953 DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
4954 __FUNCTION__
, bcmerror
));
4958 bcmerror
= dhdpcie_bus_dongle_attach(bus
);
4960 DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
4961 __FUNCTION__
, bcmerror
));
4965 bcmerror
= dhd_bus_request_irq(bus
);
4967 DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
4968 __FUNCTION__
, bcmerror
));
4972 bus
->dhd
->dongle_reset
= FALSE
;
4974 bcmerror
= dhd_bus_start(dhdp
);
4976 DHD_ERROR(("%s: dhd_bus_start: %d\n",
4977 __FUNCTION__
, bcmerror
));
4981 bus
->dhd
->up
= TRUE
;
4982 /* Renabling watchdog which is disabled in dhdpcie_advertise_bus_cleanup */
4983 if (bus
->dhd
->dhd_watchdog_ms_backup
) {
4984 DHD_ERROR(("%s: Enabling wdtick after dhd init\n",
4986 dhd_os_wd_timer(bus
->dhd
, bus
->dhd
->dhd_watchdog_ms_backup
);
4988 DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__
));
4990 DHD_ERROR(("%s: what should we do here\n", __FUNCTION__
));
4997 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
4998 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
4999 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5005 dhdpcie_get_dma_ring_indices(dhd_pub_t
*dhd
)
5007 int h2d_support
, d2h_support
;
5009 d2h_support
= dhd
->dma_d2h_ring_upd_support
? 1 : 0;
5010 h2d_support
= dhd
->dma_h2d_ring_upd_support
? 1 : 0;
5011 return (d2h_support
| (h2d_support
<< 1));
5015 dhdpcie_set_dma_ring_indices(dhd_pub_t
*dhd
, int32 int_val
)
5018 /* Can change it only during initialization/FW download */
5019 if (dhd
->busstate
== DHD_BUS_DOWN
) {
5020 if ((int_val
> 3) || (int_val
< 0)) {
5021 DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
5022 bcmerror
= BCME_BADARG
;
5024 dhd
->dma_d2h_ring_upd_support
= (int_val
& 1) ? TRUE
: FALSE
;
5025 dhd
->dma_h2d_ring_upd_support
= (int_val
& 2) ? TRUE
: FALSE
;
5026 dhd
->dma_ring_upd_overwrite
= TRUE
;
5029 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5031 bcmerror
= BCME_NOTDOWN
;
5038 * IOVAR handler of the DHD bus layer (in this case, the PCIe bus).
5040 * @param actionid e.g. IOV_SVAL(IOV_PCIEREG)
5041 * @param params input buffer
5042 * @param plen length in [bytes] of input buffer 'params'
5043 * @param arg output buffer
5044 * @param len length in [bytes] of output buffer 'arg'
5047 dhdpcie_bus_doiovar(dhd_bus_t
*bus
, const bcm_iovar_t
*vi
, uint32 actionid
, const char *name
,
5048 void *params
, int plen
, void *arg
, int len
, int val_size
)
5056 DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
5057 __FUNCTION__
, actionid
, name
, params
, plen
, arg
, len
, val_size
));
5059 if ((bcmerror
= bcm_iovar_lencheck(vi
, arg
, len
, IOV_ISSET(actionid
))) != 0)
5062 if (plen
>= (int)sizeof(int_val
))
5063 bcopy(params
, &int_val
, sizeof(int_val
));
5065 if (plen
>= (int)sizeof(int_val
) * 2)
5066 bcopy((void*)((uintptr
)params
+ sizeof(int_val
)), &int_val2
, sizeof(int_val2
));
5068 if (plen
>= (int)sizeof(int_val
) * 3)
5069 bcopy((void*)((uintptr
)params
+ 2 * sizeof(int_val
)), &int_val3
, sizeof(int_val3
));
5071 bool_val
= (int_val
!= 0) ? TRUE
: FALSE
;
5073 /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
5074 if (bus
->dhd
->dongle_reset
&& !(actionid
== IOV_SVAL(IOV_DEVRESET
) ||
5075 actionid
== IOV_GVAL(IOV_DEVRESET
))) {
5076 bcmerror
= BCME_NOTREADY
;
5082 case IOV_SVAL(IOV_VARS
):
5083 bcmerror
= dhdpcie_downloadvars(bus
, arg
, len
);
5085 case IOV_SVAL(IOV_PCIE_LPBK
):
5086 bcmerror
= dhdpcie_bus_lpback_req(bus
, int_val
);
5089 case IOV_SVAL(IOV_PCIE_DMAXFER
): {
5093 if (plen
>= (int)sizeof(int_val
) * 4) {
5094 bcopy((void*)((uintptr
)params
+ 3 * sizeof(int_val
)),
5095 &int_val4
, sizeof(int_val4
));
5097 if (plen
>= (int)sizeof(int_val
) * 5) {
5098 bcopy((void*)((uintptr
)params
+ 4 * sizeof(int_val
)),
5099 &wait
, sizeof(wait
));
5101 if (plen
>= (int)sizeof(core_num
) * 6) {
5102 bcopy((void*)((uintptr
)params
+ 5 * sizeof(core_num
)),
5103 &core_num
, sizeof(core_num
));
5105 bcmerror
= dhdpcie_bus_dmaxfer_req(bus
, int_val
, int_val2
, int_val3
,
5106 int_val4
, core_num
, wait
);
5107 if (wait
&& bcmerror
>= 0) {
5108 /* get the status of the dma transfer */
5109 int_val4
= dhdmsgbuf_dmaxfer_status(bus
->dhd
);
5110 bcopy(&int_val4
, params
, sizeof(int_val
));
5115 case IOV_GVAL(IOV_PCIE_DMAXFER
): {
5117 dma_status
= dhdmsgbuf_dmaxfer_status(bus
->dhd
);
5118 bcopy(&dma_status
, arg
, val_size
);
5123 case IOV_GVAL(IOV_PCIE_SUSPEND
):
5124 int_val
= (bus
->dhd
->busstate
== DHD_BUS_SUSPEND
) ? 1 : 0;
5125 bcopy(&int_val
, arg
, val_size
);
5128 case IOV_SVAL(IOV_PCIE_SUSPEND
):
5129 if (bool_val
) { /* Suspend */
5131 unsigned long flags
;
5134 * If some other context is busy, wait until they are done,
5135 * before starting suspend
5137 ret
= dhd_os_busbusy_wait_condition(bus
->dhd
,
5138 &bus
->dhd
->dhd_bus_busy_state
, DHD_BUS_BUSY_IN_DHD_IOVAR
);
5140 DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n",
5141 __FUNCTION__
, bus
->dhd
->dhd_bus_busy_state
));
5145 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5146 DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus
->dhd
);
5147 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5148 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5149 dhdpcie_bus_suspend(bus
, TRUE
, TRUE
);
5151 dhdpcie_bus_suspend(bus
, TRUE
);
5152 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5154 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5155 DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus
->dhd
);
5156 dhd_os_busbusy_wake(bus
->dhd
);
5157 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5158 } else { /* Resume */
5159 unsigned long flags
;
5160 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5161 DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus
->dhd
);
5162 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5164 dhdpcie_bus_suspend(bus
, FALSE
);
5166 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5167 DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus
->dhd
);
5168 dhd_os_busbusy_wake(bus
->dhd
);
5169 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5173 case IOV_GVAL(IOV_MEMSIZE
):
5174 int_val
= (int32
)bus
->ramsize
;
5175 bcopy(&int_val
, arg
, val_size
);
5178 /* Debug related. Dumps core registers or one of the dongle memory */
5179 case IOV_GVAL(IOV_DUMP_DONGLE
):
5181 dump_dongle_in_t ddi
= *(dump_dongle_in_t
*)params
;
5182 dump_dongle_out_t
*ddo
= (dump_dongle_out_t
*)arg
;
5183 uint32
*p
= ddo
->val
;
5184 const uint max_offset
= 4096 - 1; /* one core contains max 4096/4 registers */
5186 if (plen
< sizeof(ddi
) || len
< sizeof(ddo
)) {
5187 bcmerror
= BCME_BADARG
;
5192 case DUMP_DONGLE_COREREG
:
5195 if (si_setcoreidx(bus
->sih
, ddi
.index
) == NULL
) {
5196 break; // beyond last core: core enumeration ended
5199 ddo
->address
= si_addrspace(bus
->sih
, CORE_SLAVE_PORT_0
, CORE_BASE_ADDR_0
);
5200 ddo
->address
+= ddi
.offset
; // BP address at which this dump starts
5202 ddo
->id
= si_coreid(bus
->sih
);
5203 ddo
->rev
= si_corerev(bus
->sih
);
5205 while (ddi
.offset
< max_offset
&&
5206 sizeof(dump_dongle_out_t
) + ddo
->n_bytes
< (uint
)len
) {
5207 *p
++ = si_corereg(bus
->sih
, ddi
.index
, ddi
.offset
, 0, 0);
5208 ddi
.offset
+= sizeof(uint32
);
5209 ddo
->n_bytes
+= sizeof(uint32
);
5213 // TODO: implement d11 SHM/TPL dumping
5214 bcmerror
= BCME_BADARG
;
5220 /* Debug related. Returns a string with dongle capabilities */
5221 case IOV_GVAL(IOV_DNGL_CAPS
):
5223 strncpy(arg
, bus
->dhd
->fw_capabilities
,
5224 MIN(strlen(bus
->dhd
->fw_capabilities
), (size_t)len
));
5225 ((char*)arg
)[len
- 1] = '\0';
5229 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
5230 case IOV_SVAL(IOV_GDB_SERVER
):
5231 /* debugger_*() functions may sleep, so cannot hold spinlock */
5232 DHD_PERIM_UNLOCK(bus
->dhd
);
5234 debugger_init((void *) bus
, &bus_ops
, int_val
, SI_ENUM_BASE(bus
->sih
));
5238 DHD_PERIM_LOCK(bus
->dhd
);
5240 #endif /* DEBUGGER || DHD_DSCOPE */
5243 /* Dump dongle side buzzz trace to console */
5244 case IOV_GVAL(IOV_BUZZZ_DUMP
):
5245 bcmerror
= dhd_buzzz_dump_dngl(bus
);
5247 #endif /* BCM_BUZZZ */
5249 case IOV_SVAL(IOV_SET_DOWNLOAD_STATE
):
5250 bcmerror
= dhdpcie_bus_download_state(bus
, bool_val
);
5253 case IOV_GVAL(IOV_RAMSIZE
):
5254 int_val
= (int32
)bus
->ramsize
;
5255 bcopy(&int_val
, arg
, val_size
);
5258 case IOV_SVAL(IOV_RAMSIZE
):
5259 bus
->ramsize
= int_val
;
5260 bus
->orig_ramsize
= int_val
;
5263 case IOV_GVAL(IOV_RAMSTART
):
5264 int_val
= (int32
)bus
->dongle_ram_base
;
5265 bcopy(&int_val
, arg
, val_size
);
5268 case IOV_GVAL(IOV_CC_NVMSHADOW
):
5270 struct bcmstrbuf dump_b
;
5272 bcm_binit(&dump_b
, arg
, len
);
5273 bcmerror
= dhdpcie_cc_nvmshadow(bus
, &dump_b
);
5277 case IOV_GVAL(IOV_SLEEP_ALLOWED
):
5278 bool_val
= bus
->sleep_allowed
;
5279 bcopy(&bool_val
, arg
, val_size
);
5282 case IOV_SVAL(IOV_SLEEP_ALLOWED
):
5283 bus
->sleep_allowed
= bool_val
;
5286 case IOV_GVAL(IOV_DONGLEISOLATION
):
5287 int_val
= bus
->dhd
->dongle_isolation
;
5288 bcopy(&int_val
, arg
, val_size
);
5291 case IOV_SVAL(IOV_DONGLEISOLATION
):
5292 bus
->dhd
->dongle_isolation
= bool_val
;
5295 case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD
):
5296 int_val
= bus
->ltrsleep_on_unload
;
5297 bcopy(&int_val
, arg
, val_size
);
5300 case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD
):
5301 bus
->ltrsleep_on_unload
= bool_val
;
5304 case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK
):
5306 struct bcmstrbuf dump_b
;
5307 bcm_binit(&dump_b
, arg
, len
);
5308 bcmerror
= dhd_prot_ringupd_dump(bus
->dhd
, &dump_b
);
5311 case IOV_GVAL(IOV_DMA_RINGINDICES
):
5313 int_val
= dhdpcie_get_dma_ring_indices(bus
->dhd
);
5314 bcopy(&int_val
, arg
, sizeof(int_val
));
5317 case IOV_SVAL(IOV_DMA_RINGINDICES
):
5318 bcmerror
= dhdpcie_set_dma_ring_indices(bus
->dhd
, int_val
);
5321 case IOV_GVAL(IOV_METADATA_DBG
):
5322 int_val
= dhd_prot_metadata_dbg_get(bus
->dhd
);
5323 bcopy(&int_val
, arg
, val_size
);
5325 case IOV_SVAL(IOV_METADATA_DBG
):
5326 dhd_prot_metadata_dbg_set(bus
->dhd
, (int_val
!= 0));
5329 case IOV_GVAL(IOV_RX_METADATALEN
):
5330 int_val
= dhd_prot_metadatalen_get(bus
->dhd
, TRUE
);
5331 bcopy(&int_val
, arg
, val_size
);
5334 case IOV_SVAL(IOV_RX_METADATALEN
):
5336 bcmerror
= BCME_BUFTOOLONG
;
5339 dhd_prot_metadatalen_set(bus
->dhd
, int_val
, TRUE
);
5342 case IOV_SVAL(IOV_TXP_THRESHOLD
):
5343 dhd_prot_txp_threshold(bus
->dhd
, TRUE
, int_val
);
5346 case IOV_GVAL(IOV_TXP_THRESHOLD
):
5347 int_val
= dhd_prot_txp_threshold(bus
->dhd
, FALSE
, int_val
);
5348 bcopy(&int_val
, arg
, val_size
);
5351 case IOV_SVAL(IOV_DB1_FOR_MB
):
5353 bus
->db1_for_mb
= TRUE
;
5355 bus
->db1_for_mb
= FALSE
;
5358 case IOV_GVAL(IOV_DB1_FOR_MB
):
5359 if (bus
->db1_for_mb
)
5363 bcopy(&int_val
, arg
, val_size
);
5366 case IOV_GVAL(IOV_TX_METADATALEN
):
5367 int_val
= dhd_prot_metadatalen_get(bus
->dhd
, FALSE
);
5368 bcopy(&int_val
, arg
, val_size
);
5371 case IOV_SVAL(IOV_TX_METADATALEN
):
5373 bcmerror
= BCME_BUFTOOLONG
;
5376 dhd_prot_metadatalen_set(bus
->dhd
, int_val
, FALSE
);
5379 case IOV_SVAL(IOV_DEVRESET
):
5381 case DHD_BUS_DEVRESET_ON
:
5382 bcmerror
= dhd_bus_devreset(bus
->dhd
, (uint8
)int_val
);
5384 case DHD_BUS_DEVRESET_OFF
:
5385 bcmerror
= dhd_bus_devreset(bus
->dhd
, (uint8
)int_val
);
5387 case DHD_BUS_DEVRESET_FLR
:
5388 bcmerror
= dhd_bus_perform_flr(bus
, bus
->flr_force_fail
);
5390 case DHD_BUS_DEVRESET_FLR_FORCE_FAIL
:
5391 bus
->flr_force_fail
= TRUE
;
5394 DHD_ERROR(("%s: invalid argument for devreset\n", __FUNCTION__
));
5398 case IOV_SVAL(IOV_FORCE_FW_TRAP
):
5399 if (bus
->dhd
->busstate
== DHD_BUS_DATA
)
5400 dhdpcie_fw_trap(bus
);
5402 DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__
));
5403 bcmerror
= BCME_NOTUP
;
5406 case IOV_GVAL(IOV_FLOW_PRIO_MAP
):
5407 int_val
= bus
->dhd
->flow_prio_map_type
;
5408 bcopy(&int_val
, arg
, val_size
);
5411 case IOV_SVAL(IOV_FLOW_PRIO_MAP
):
5412 int_val
= (int32
)dhd_update_flow_prio_map(bus
->dhd
, (uint8
)int_val
);
5413 bcopy(&int_val
, arg
, val_size
);
5416 #ifdef DHD_PCIE_RUNTIMEPM
5417 case IOV_GVAL(IOV_IDLETIME
):
5418 int_val
= bus
->idletime
;
5419 bcopy(&int_val
, arg
, val_size
);
5422 case IOV_SVAL(IOV_IDLETIME
):
5424 bcmerror
= BCME_BADARG
;
5426 bus
->idletime
= int_val
;
5427 if (bus
->idletime
) {
5428 DHD_ENABLE_RUNTIME_PM(bus
->dhd
);
5430 DHD_DISABLE_RUNTIME_PM(bus
->dhd
);
5434 #endif /* DHD_PCIE_RUNTIMEPM */
5436 case IOV_GVAL(IOV_TXBOUND
):
5437 int_val
= (int32
)dhd_txbound
;
5438 bcopy(&int_val
, arg
, val_size
);
5441 case IOV_SVAL(IOV_TXBOUND
):
5442 dhd_txbound
= (uint
)int_val
;
5445 case IOV_SVAL(IOV_H2D_MAILBOXDATA
):
5446 dhdpcie_send_mb_data(bus
, (uint
)int_val
);
5449 case IOV_SVAL(IOV_INFORINGS
):
5450 dhd_prot_init_info_rings(bus
->dhd
);
5453 case IOV_SVAL(IOV_H2D_PHASE
):
5454 if (bus
->dhd
->busstate
!= DHD_BUS_DOWN
) {
5455 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5457 bcmerror
= BCME_NOTDOWN
;
5461 bus
->dhd
->h2d_phase_supported
= TRUE
;
5463 bus
->dhd
->h2d_phase_supported
= FALSE
;
5466 case IOV_GVAL(IOV_H2D_PHASE
):
5467 int_val
= (int32
) bus
->dhd
->h2d_phase_supported
;
5468 bcopy(&int_val
, arg
, val_size
);
5471 case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE
):
5472 if (bus
->dhd
->busstate
!= DHD_BUS_DOWN
) {
5473 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5475 bcmerror
= BCME_NOTDOWN
;
5479 bus
->dhd
->force_dongletrap_on_bad_h2d_phase
= TRUE
;
5481 bus
->dhd
->force_dongletrap_on_bad_h2d_phase
= FALSE
;
5484 case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE
):
5485 int_val
= (int32
) bus
->dhd
->force_dongletrap_on_bad_h2d_phase
;
5486 bcopy(&int_val
, arg
, val_size
);
5489 case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM
):
5490 if (bus
->dhd
->busstate
!= DHD_BUS_DOWN
) {
5491 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5493 bcmerror
= BCME_NOTDOWN
;
5496 dhd_prot_set_h2d_max_txpost(bus
->dhd
, (uint16
)int_val
);
5499 case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM
):
5500 int_val
= dhd_prot_get_h2d_max_txpost(bus
->dhd
);
5501 bcopy(&int_val
, arg
, val_size
);
5504 case IOV_GVAL(IOV_RXBOUND
):
5505 int_val
= (int32
)dhd_rxbound
;
5506 bcopy(&int_val
, arg
, val_size
);
5509 case IOV_SVAL(IOV_RXBOUND
):
5510 dhd_rxbound
= (uint
)int_val
;
5513 case IOV_GVAL(IOV_TRAPDATA
):
5515 struct bcmstrbuf dump_b
;
5516 bcm_binit(&dump_b
, arg
, len
);
5517 bcmerror
= dhd_prot_dump_extended_trap(bus
->dhd
, &dump_b
, FALSE
);
5521 case IOV_GVAL(IOV_TRAPDATA_RAW
):
5523 struct bcmstrbuf dump_b
;
5524 bcm_binit(&dump_b
, arg
, len
);
5525 bcmerror
= dhd_prot_dump_extended_trap(bus
->dhd
, &dump_b
, TRUE
);
5528 case IOV_SVAL(IOV_HANGREPORT
):
5529 bus
->dhd
->hang_report
= bool_val
;
5530 DHD_ERROR(("%s: Set hang_report as %d\n",
5531 __FUNCTION__
, bus
->dhd
->hang_report
));
5534 case IOV_GVAL(IOV_HANGREPORT
):
5535 int_val
= (int32
)bus
->dhd
->hang_report
;
5536 bcopy(&int_val
, arg
, val_size
);
5539 case IOV_SVAL(IOV_CTO_PREVENTION
):
5543 if (bus
->sih
->buscorerev
< 19) {
5544 bcmerror
= BCME_UNSUPPORTED
;
5547 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5548 OFFSETOF(sbpcieregs_t
, configaddr
), ~0, PCI_LINK_STATUS
);
5550 pcie_lnkst
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5551 OFFSETOF(sbpcieregs_t
, configdata
), 0, 0);
5553 if ((bus
->sih
->buscorerev
== 19) &&
5554 (((pcie_lnkst
>> PCI_LINK_SPEED_SHIFT
) &
5555 PCI_LINK_SPEED_MASK
) == PCIE_LNK_SPEED_GEN1
)) {
5556 bcmerror
= BCME_UNSUPPORTED
;
5559 bus
->cto_enable
= bool_val
;
5560 dhdpcie_cto_init(bus
, bus
->cto_enable
);
5561 DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
5562 __FUNCTION__
, bus
->cto_enable
));
5566 case IOV_GVAL(IOV_CTO_PREVENTION
):
5567 if (bus
->sih
->buscorerev
< 19) {
5568 bcmerror
= BCME_UNSUPPORTED
;
5571 int_val
= (int32
)bus
->cto_enable
;
5572 bcopy(&int_val
, arg
, val_size
);
5575 case IOV_SVAL(IOV_CTO_THRESHOLD
):
5577 if (bus
->sih
->buscorerev
< 19) {
5578 bcmerror
= BCME_UNSUPPORTED
;
5581 bus
->cto_threshold
= (uint32
)int_val
;
5585 case IOV_GVAL(IOV_CTO_THRESHOLD
):
5586 if (bus
->sih
->buscorerev
< 19) {
5587 bcmerror
= BCME_UNSUPPORTED
;
5590 if (bus
->cto_threshold
)
5591 int_val
= (int32
)bus
->cto_threshold
;
5593 int_val
= (int32
)PCIE_CTO_TO_THRESH_DEFAULT
;
5595 bcopy(&int_val
, arg
, val_size
);
5598 case IOV_SVAL(IOV_PCIE_WD_RESET
):
5600 uint32 wd_en
= (bus
->sih
->buscorerev
== 66) ? WD_SSRESET_PCIE_F0_EN
:
5601 (WD_SSRESET_PCIE_F0_EN
| WD_SSRESET_PCIE_ALL_FN_EN
);
5602 pcie_watchdog_reset(bus
->osh
, bus
->sih
,
5603 WD_ENABLE_MASK
, wd_en
);
5607 case IOV_GVAL(IOV_IDMA_ENABLE
):
5608 int_val
= bus
->idma_enabled
;
5609 bcopy(&int_val
, arg
, val_size
);
5611 case IOV_SVAL(IOV_IDMA_ENABLE
):
5612 bus
->idma_enabled
= (bool)int_val
;
5614 case IOV_GVAL(IOV_IFRM_ENABLE
):
5615 int_val
= bus
->ifrm_enabled
;
5616 bcopy(&int_val
, arg
, val_size
);
5618 case IOV_SVAL(IOV_IFRM_ENABLE
):
5619 bus
->ifrm_enabled
= (bool)int_val
;
5621 case IOV_GVAL(IOV_CLEAR_RING
):
5622 bcopy(&int_val
, arg
, val_size
);
5623 dhd_flow_rings_flush(bus
->dhd
, 0);
5625 case IOV_GVAL(IOV_DAR_ENABLE
):
5626 int_val
= bus
->dar_enabled
;
5627 bcopy(&int_val
, arg
, val_size
);
5629 case IOV_SVAL(IOV_DAR_ENABLE
):
5630 bus
->dar_enabled
= (bool)int_val
;
5633 case IOV_GVAL(IOV_MINIDUMP_OVERRIDE
):
5634 int_val
= bus
->d2h_minidump_override
;
5635 bcopy(&int_val
, arg
, val_size
);
5637 case IOV_SVAL(IOV_MINIDUMP_OVERRIDE
):
5638 /* Can change it only before FW download */
5639 if (bus
->dhd
->busstate
!= DHD_BUS_DOWN
) {
5640 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5642 bcmerror
= BCME_NOTDOWN
;
5645 bus
->d2h_minidump_override
= (bool)int_val
;
5647 #endif /* D2H_MINIDUMP */
5649 bcmerror
= BCME_UNSUPPORTED
;
5655 } /* dhdpcie_bus_doiovar */
5657 /** Transfers bytes from host to dongle using pio mode */
5659 dhdpcie_bus_lpback_req(struct dhd_bus
*bus
, uint32 len
)
5661 if (bus
->dhd
== NULL
) {
5662 DHD_ERROR(("bus not inited\n"));
5665 if (bus
->dhd
->prot
== NULL
) {
5666 DHD_ERROR(("prot is not inited\n"));
5669 if (bus
->dhd
->busstate
!= DHD_BUS_DATA
) {
5670 DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
5673 dhdmsgbuf_lpbk_req(bus
->dhd
, len
);
5677 /* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
5679 dhd_bus_hostready(struct dhd_bus
*bus
)
5681 if (!bus
->dhd
->d2h_hostrdy_supported
) {
5685 if (bus
->is_linkdown
) {
5686 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
5690 DHD_INFO_HW4(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__
,
5691 dhd_pcie_config_read(bus
->osh
, PCI_CFG_CMD
, sizeof(uint32
))));
5692 if (DAR_PWRREQ(bus
)) {
5693 dhd_bus_pcie_pwr_req(bus
);
5695 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dhd_bus_db1_addr_get(bus
), ~0, 0x12345678);
5696 bus
->hostready_count
++;
5697 DHD_INFO_HW4(("%s: Ring Hostready:%d\n", __FUNCTION__
, bus
->hostready_count
));
5700 /* Clear INTSTATUS */
5702 dhdpcie_bus_clear_intstatus(struct dhd_bus
*bus
)
5704 uint32 intstatus
= 0;
5705 if ((bus
->sih
->buscorerev
== 6) || (bus
->sih
->buscorerev
== 4) ||
5706 (bus
->sih
->buscorerev
== 2)) {
5707 intstatus
= dhdpcie_bus_cfg_read_dword(bus
, PCIIntstatus
, 4);
5708 dhdpcie_bus_cfg_write_dword(bus
, PCIIntstatus
, 4, intstatus
);
5710 /* this is a PCIE core register..not a config register... */
5711 intstatus
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_int
, 0, 0);
5712 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_int
, bus
->def_intmask
,
5718 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5719 dhdpcie_bus_suspend(struct dhd_bus
*bus
, bool state
, bool byint
)
5721 dhdpcie_bus_suspend(struct dhd_bus
*bus
, bool state
)
5722 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5726 unsigned long flags
, flags_bus
;
5727 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5728 int d3_read_retry
= 0;
5729 uint32 d2h_mb_data
= 0;
5731 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5733 if (bus
->dhd
== NULL
) {
5734 DHD_ERROR(("bus not inited\n"));
5737 if (bus
->dhd
->prot
== NULL
) {
5738 DHD_ERROR(("prot is not inited\n"));
5742 if (dhd_query_bus_erros(bus
->dhd
)) {
5746 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5747 if (!(bus
->dhd
->busstate
== DHD_BUS_DATA
|| bus
->dhd
->busstate
== DHD_BUS_SUSPEND
)) {
5748 DHD_ERROR(("not in a readystate\n"));
5749 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5752 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5753 if (bus
->dhd
->dongle_reset
) {
5754 DHD_ERROR(("Dongle is in reset state.\n"));
5758 /* Check whether we are already in the requested state.
5759 * state=TRUE means Suspend
5760 * state=FALSE meanse Resume
5762 if (state
== TRUE
&& bus
->dhd
->busstate
== DHD_BUS_SUSPEND
) {
5763 DHD_ERROR(("Bus is already in SUSPEND state.\n"));
5765 } else if (state
== FALSE
&& bus
->dhd
->busstate
== DHD_BUS_DATA
) {
5766 DHD_ERROR(("Bus is already in RESUME state.\n"));
5774 if (bus
->is_linkdown
) {
5775 DHD_ERROR(("%s: PCIe link was down, state=%d\n",
5776 __FUNCTION__
, state
));
5781 DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__
));
5783 bus
->dhd
->dhd_watchdog_ms_backup
= dhd_watchdog_ms
;
5784 if (bus
->dhd
->dhd_watchdog_ms_backup
) {
5785 DHD_ERROR(("%s: Disabling wdtick before going to suspend\n",
5787 dhd_os_wd_timer(bus
->dhd
, 0);
5790 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5791 if (DHD_BUS_BUSY_CHECK_IN_TX(bus
->dhd
)) {
5792 DHD_ERROR(("Tx Request is not ended\n"));
5793 bus
->dhd
->busstate
= DHD_BUS_DATA
;
5794 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5798 bus
->last_suspend_start_time
= OSL_LOCALTIME_NS();
5800 /* stop all interface network queue. */
5801 dhd_bus_stop_queue(bus
);
5802 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5804 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5806 DHD_OS_WAKE_LOCK_WAIVE(bus
->dhd
);
5807 /* Clear wait_for_d3_ack before sending D3_INFORM */
5808 bus
->wait_for_d3_ack
= 0;
5809 dhdpcie_send_mb_data(bus
, H2D_HOST_D3_INFORM
);
5811 timeleft
= dhd_os_d3ack_wait(bus
->dhd
, &bus
->wait_for_d3_ack
);
5812 DHD_OS_WAKE_LOCK_RESTORE(bus
->dhd
);
5814 /* Clear wait_for_d3_ack before sending D3_INFORM */
5815 bus
->wait_for_d3_ack
= 0;
5816 dhdpcie_send_mb_data(bus
, H2D_HOST_D3_INFORM
| H2D_HOST_ACK_NOINT
);
5817 while (!bus
->wait_for_d3_ack
&& d3_read_retry
< MAX_D3_ACK_TIMEOUT
) {
5818 dhdpcie_handle_mb_data(bus
);
5819 usleep_range(1000, 1500);
5824 DHD_OS_WAKE_LOCK_WAIVE(bus
->dhd
);
5826 /* Clear wait_for_d3_ack before sending D3_INFORM */
5827 bus
->wait_for_d3_ack
= 0;
5829 * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state
5830 * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata
5831 * inside atomic context, so that no more DBs will be
5832 * rung after sending D3_INFORM
5834 dhdpcie_send_mb_data(bus
, H2D_HOST_D3_INFORM
);
5836 /* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
5838 timeleft
= dhd_os_d3ack_wait(bus
->dhd
, &bus
->wait_for_d3_ack
);
5840 #ifdef DHD_RECOVER_TIMEOUT
5841 if (bus
->wait_for_d3_ack
== 0) {
5842 /* If wait_for_d3_ack was not updated because D2H MB was not received */
5843 uint32 intstatus
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5844 bus
->pcie_mailbox_int
, 0, 0);
5845 int host_irq_disabled
= dhdpcie_irq_disabled(bus
);
5846 if ((intstatus
) && (intstatus
!= (uint32
)-1) &&
5847 (timeleft
== 0) && (!dhd_query_bus_erros(bus
->dhd
))) {
5848 DHD_ERROR(("%s: D3 ACK trying again intstatus=%x"
5849 " host_irq_disabled=%d\n",
5850 __FUNCTION__
, intstatus
, host_irq_disabled
));
5851 dhd_pcie_intr_count_dump(bus
->dhd
);
5852 dhd_print_tasklet_status(bus
->dhd
);
5853 if (bus
->api
.fw_rev
>= PCIE_SHARED_VERSION_6
&&
5854 !bus
->use_mailbox
) {
5855 dhd_prot_process_ctrlbuf(bus
->dhd
);
5857 dhdpcie_handle_mb_data(bus
);
5859 timeleft
= dhd_os_d3ack_wait(bus
->dhd
, &bus
->wait_for_d3_ack
);
5860 /* Clear Interrupts */
5861 dhdpcie_bus_clear_intstatus(bus
);
5863 } /* bus->wait_for_d3_ack was 0 */
5864 #endif /* DHD_RECOVER_TIMEOUT */
5866 DHD_OS_WAKE_LOCK_RESTORE(bus
->dhd
);
5867 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5869 /* To allow threads that got pre-empted to complete.
5871 while ((active
= dhd_os_check_wakelock_all(bus
->dhd
)) &&
5872 (idle_retry
< MAX_WKLK_IDLE_CHECK
)) {
5877 if (bus
->wait_for_d3_ack
) {
5878 DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__
));
5879 /* Got D3 Ack. Suspend the bus */
5881 DHD_ERROR(("%s():Suspend failed because of wakelock"
5882 "restoring Dongle to D0\n", __FUNCTION__
));
5884 if (bus
->dhd
->dhd_watchdog_ms_backup
) {
5885 DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n",
5887 dhd_os_wd_timer(bus
->dhd
,
5888 bus
->dhd
->dhd_watchdog_ms_backup
);
5892 * Dongle still thinks that it has to be in D3 state until
5893 * it gets a D0 Inform, but we are backing off from suspend.
5894 * Ensure that Dongle is brought back to D0.
5896 * Bringing back Dongle from D3 Ack state to D0 state is a
5897 * 2 step process. Dongle would want to know that D0 Inform
5898 * would be sent as a MB interrupt to bring it out of D3 Ack
5899 * state to D0 state. So we have to send both this message.
5902 /* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
5903 bus
->wait_for_d3_ack
= 0;
5905 DHD_BUS_LOCK(bus
->bus_lock
, flags_bus
);
5906 bus
->bus_low_power_state
= DHD_BUS_NO_LOW_POWER_STATE
;
5907 /* Enable back the intmask which was cleared in DPC
5908 * after getting D3_ACK.
5910 bus
->resume_intr_enable_count
++;
5912 /* For Linux, Macos etc (otherthan NDIS) enable back the dongle
5913 * interrupts using intmask and host interrupts
5914 * which were disabled in the dhdpcie_bus_isr()->
5915 * dhd_bus_handle_d3_ack().
5917 /* Enable back interrupt using Intmask!! */
5918 dhdpcie_bus_intr_enable(bus
);
5919 /* Enable back interrupt from Host side!! */
5920 dhdpcie_enable_irq(bus
);
5922 DHD_BUS_UNLOCK(bus
->bus_lock
, flags_bus
);
5924 if (bus
->use_d0_inform
) {
5925 DHD_OS_WAKE_LOCK_WAIVE(bus
->dhd
);
5926 dhdpcie_send_mb_data(bus
,
5927 (H2D_HOST_D0_INFORM_IN_USE
| H2D_HOST_D0_INFORM
));
5928 DHD_OS_WAKE_LOCK_RESTORE(bus
->dhd
);
5930 /* ring doorbell 1 (hostready) */
5931 dhd_bus_hostready(bus
);
5933 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5934 bus
->dhd
->busstate
= DHD_BUS_DATA
;
5935 /* resume all interface network queue. */
5936 dhd_bus_start_queue(bus
);
5937 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5940 /* Actual Suspend after no wakelock */
5941 /* At this time bus->bus_low_power_state will be
5942 * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK
5943 * in dhd_bus_handle_d3_ack()
5945 if (bus
->use_d0_inform
&&
5946 (bus
->api
.fw_rev
< PCIE_SHARED_VERSION_6
)) {
5947 DHD_OS_WAKE_LOCK_WAIVE(bus
->dhd
);
5948 dhdpcie_send_mb_data(bus
, (H2D_HOST_D0_INFORM_IN_USE
));
5949 DHD_OS_WAKE_LOCK_RESTORE(bus
->dhd
);
5952 #if defined(BCMPCIE_OOB_HOST_WAKE)
5953 dhdpcie_oob_intr_set(bus
, TRUE
);
5954 #endif /* BCMPCIE_OOB_HOST_WAKE */
5956 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5957 /* The Host cannot process interrupts now so disable the same.
5958 * No need to disable the dongle INTR using intmask, as we are
5959 * already calling disabling INTRs from DPC context after
5960 * getting D3_ACK in dhd_bus_handle_d3_ack.
5961 * Code may not look symmetric between Suspend and
5962 * Resume paths but this is done to close down the timing window
5963 * between DPC and suspend context and bus->bus_low_power_state
5964 * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC.
5966 bus
->dhd
->d3ackcnt_timeout
= 0;
5967 bus
->dhd
->busstate
= DHD_BUS_SUSPEND
;
5968 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5969 DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, "
5970 "BaseAddress1(0x%x)=0x%x\n", __FUNCTION__
,
5971 PCIECFGREG_BASEADDR0
,
5972 dhd_pcie_config_read(bus
->osh
,
5973 PCIECFGREG_BASEADDR0
, sizeof(uint32
)),
5974 PCIECFGREG_BASEADDR1
,
5975 dhd_pcie_config_read(bus
->osh
,
5976 PCIECFGREG_BASEADDR1
, sizeof(uint32
))));
5977 dhdpcie_dump_resource(bus
);
5978 /* Handle Host Suspend */
5979 rc
= dhdpcie_pci_suspend_resume(bus
, state
);
5981 bus
->last_suspend_end_time
= OSL_LOCALTIME_NS();
5984 } else if (timeleft
== 0) { /* D3 ACK Timeout */
5985 #ifdef DHD_FW_COREDUMP
5986 uint32 cur_memdump_mode
= bus
->dhd
->memdump_enabled
;
5987 #endif /* DHD_FW_COREDUMP */
5989 /* check if the D3 ACK timeout due to scheduling issue */
5990 bus
->dhd
->is_sched_error
= !dhd_query_bus_erros(bus
->dhd
) &&
5991 bus
->isr_entry_time
> bus
->last_d3_inform_time
&&
5992 dhd_bus_query_dpc_sched_errors(bus
->dhd
);
5993 bus
->dhd
->d3ack_timeout_occured
= TRUE
;
5994 /* If the D3 Ack has timeout */
5995 bus
->dhd
->d3ackcnt_timeout
++;
5996 DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n",
5997 __FUNCTION__
, bus
->dhd
->is_sched_error
?
5998 " due to scheduling problem" : "", bus
->dhd
->d3ackcnt_timeout
));
5999 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
6000 if (bus
->dhd
->is_sched_error
&& cur_memdump_mode
) {
6001 /* change g_assert_type to trigger Kernel panic */
6003 /* use ASSERT() to trigger panic */
6006 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
6007 DHD_BUS_LOCK(bus
->bus_lock
, flags_bus
);
6008 bus
->bus_low_power_state
= DHD_BUS_NO_LOW_POWER_STATE
;
6009 DHD_BUS_UNLOCK(bus
->bus_lock
, flags_bus
);
6010 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
6011 bus
->dhd
->busstate
= DHD_BUS_DATA
;
6012 /* resume all interface network queue. */
6013 dhd_bus_start_queue(bus
);
6014 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
6015 if (!bus
->dhd
->dongle_trap_occured
&&
6016 !bus
->is_linkdown
) {
6017 uint32 intstatus
= 0;
6019 /* Check if PCIe bus status is valid */
6020 intstatus
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
6021 bus
->pcie_mailbox_int
, 0, 0);
6022 if (intstatus
== (uint32
)-1) {
6023 /* Invalidate PCIe bus status */
6024 bus
->is_linkdown
= 1;
6027 dhd_bus_dump_console_buffer(bus
);
6028 dhd_prot_debug_info_print(bus
->dhd
);
6029 #ifdef DHD_FW_COREDUMP
6030 if (cur_memdump_mode
) {
6031 /* write core dump to file */
6032 bus
->dhd
->memdump_type
= DUMP_TYPE_D3_ACK_TIMEOUT
;
6033 dhdpcie_mem_dump(bus
);
6035 #endif /* DHD_FW_COREDUMP */
6037 DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
6039 #ifdef SUPPORT_LINKDOWN_RECOVERY
6040 #ifdef CONFIG_ARCH_MSM
6041 bus
->no_cfg_restore
= 1;
6042 #endif /* CONFIG_ARCH_MSM */
6043 #endif /* SUPPORT_LINKDOWN_RECOVERY */
6044 dhd_os_check_hang(bus
->dhd
, 0, -ETIMEDOUT
);
6046 #if defined(DHD_ERPOM)
6047 dhd_schedule_reset(bus
->dhd
);
6053 DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__
));
6054 bus
->last_resume_start_time
= OSL_LOCALTIME_NS();
6057 * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
6058 * si_backplane_access(function to read/write backplane)
6059 * updates the window(PCIE2_BAR0_CORE2_WIN) only if
6060 * window being accessed is different form the window
6061 * being pointed by second_bar0win.
6062 * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold,
6063 * invalidating second_bar0win after resume updates
6064 * PCIE2_BAR0_CORE2_WIN with right window.
6066 si_invalidate_second_bar0win(bus
->sih
);
6067 #if defined(BCMPCIE_OOB_HOST_WAKE)
6068 DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus
->dhd
);
6069 #endif /* BCMPCIE_OOB_HOST_WAKE */
6070 rc
= dhdpcie_pci_suspend_resume(bus
, state
);
6071 DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, BaseAddress1(0x%x)=0x%x\n",
6072 __FUNCTION__
, PCIECFGREG_BASEADDR0
,
6073 dhd_pcie_config_read(bus
->osh
, PCIECFGREG_BASEADDR0
, sizeof(uint32
)),
6074 PCIECFGREG_BASEADDR1
,
6075 dhd_pcie_config_read(bus
->osh
, PCIECFGREG_BASEADDR1
, sizeof(uint32
))));
6076 dhdpcie_dump_resource(bus
);
6078 DHD_BUS_LOCK(bus
->bus_lock
, flags_bus
);
6079 /* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */
6080 bus
->bus_low_power_state
= DHD_BUS_NO_LOW_POWER_STATE
;
6081 DHD_BUS_UNLOCK(bus
->bus_lock
, flags_bus
);
6083 if (!rc
&& bus
->dhd
->busstate
== DHD_BUS_SUSPEND
) {
6084 if (bus
->use_d0_inform
) {
6085 DHD_OS_WAKE_LOCK_WAIVE(bus
->dhd
);
6086 dhdpcie_send_mb_data(bus
, (H2D_HOST_D0_INFORM
));
6087 DHD_OS_WAKE_LOCK_RESTORE(bus
->dhd
);
6089 /* ring doorbell 1 (hostready) */
6090 dhd_bus_hostready(bus
);
6092 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
6093 bus
->dhd
->busstate
= DHD_BUS_DATA
;
6094 #ifdef DHD_PCIE_RUNTIMEPM
6095 if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus
->dhd
)) {
6098 wake_up_interruptible(&bus
->rpm_queue
);
6100 #endif /* DHD_PCIE_RUNTIMEPM */
6101 /* resume all interface network queue. */
6102 dhd_bus_start_queue(bus
);
6104 /* TODO: for NDIS also we need to use enable_irq in future */
6105 bus
->resume_intr_enable_count
++;
6107 /* For Linux, Macos etc (otherthan NDIS) enable back the dongle interrupts
6108 * using intmask and host interrupts
6109 * which were disabled in the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack().
6111 dhdpcie_bus_intr_enable(bus
); /* Enable back interrupt using Intmask!! */
6112 dhdpcie_enable_irq(bus
); /* Enable back interrupt from Host side!! */
6114 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
6116 if (bus
->dhd
->dhd_watchdog_ms_backup
) {
6117 DHD_ERROR(("%s: Enabling wdtick after resume\n",
6119 dhd_os_wd_timer(bus
->dhd
, bus
->dhd
->dhd_watchdog_ms_backup
);
6122 bus
->last_resume_end_time
= OSL_LOCALTIME_NS();
6129 dhdpcie_force_alp(struct dhd_bus
*bus
, bool enable
)
6131 ASSERT(bus
&& bus
->sih
);
6133 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
6134 OFFSETOF(sbpcieregs_t
, u
.pcie2
.clk_ctl_st
), CCS_FORCEALP
, CCS_FORCEALP
);
6136 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
6137 OFFSETOF(sbpcieregs_t
, u
.pcie2
.clk_ctl_st
), CCS_FORCEALP
, 0);
6142 /* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */
6144 dhdpcie_set_l1_entry_time(struct dhd_bus
*bus
, int l1_entry_time
)
6148 ASSERT(bus
&& bus
->sih
);
6150 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, OFFSETOF(sbpcieregs_t
, configaddr
), ~0,
6152 reg_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
6153 OFFSETOF(sbpcieregs_t
, configdata
), 0, 0);
6154 reg_val
= (reg_val
& ~(0x7f << 16)) | ((l1_entry_time
& 0x7f) << 16);
6155 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, OFFSETOF(sbpcieregs_t
, configdata
), ~0,
6162 dhd_apply_d11_war_length(struct dhd_bus
*bus
, uint32 len
, uint32 d11_lpbk
)
6164 uint16 chipid
= si_chipid(bus
->sih
);
6165 if ((chipid
== BCM4375_CHIP_ID
||
6166 chipid
== BCM4377_CHIP_ID
) &&
6167 (d11_lpbk
!= M2M_DMA_LPBK
&& d11_lpbk
!= M2M_NON_DMA_LPBK
) &&
6171 DHD_ERROR(("%s: len %d\n", __FUNCTION__
, len
));
6175 /** Transfers bytes from host to dongle and to host again using DMA */
6177 dhdpcie_bus_dmaxfer_req(struct dhd_bus
*bus
,
6178 uint32 len
, uint32 srcdelay
, uint32 destdelay
,
6179 uint32 d11_lpbk
, uint32 core_num
, uint32 wait
)
6183 if (bus
->dhd
== NULL
) {
6184 DHD_ERROR(("bus not inited\n"));
6187 if (bus
->dhd
->prot
== NULL
) {
6188 DHD_ERROR(("prot is not inited\n"));
6191 if (bus
->dhd
->busstate
!= DHD_BUS_DATA
) {
6192 DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
6196 if (len
< 5 || len
> 4194296) {
6197 DHD_ERROR(("len is too small or too large\n"));
6201 len
= dhd_apply_d11_war_length(bus
, len
, d11_lpbk
);
6203 bus
->dmaxfer_complete
= FALSE
;
6204 ret
= dhdmsgbuf_dmaxfer_req(bus
->dhd
, len
, srcdelay
, destdelay
,
6205 d11_lpbk
, core_num
);
6206 if (ret
!= BCME_OK
|| !wait
)
6209 ret
= dhd_os_dmaxfer_wait(bus
->dhd
, &bus
->dmaxfer_complete
);
6211 ret
= BCME_NOTREADY
;
6218 dhdpcie_bus_download_state(dhd_bus_t
*bus
, bool enter
)
6221 volatile uint32
*cr4_regs
;
6224 DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__
));
6227 /* To enter download state, disable ARM and reset SOCRAM.
6228 * To exit download state, simply reset ARM (default is RAM boot).
6231 /* Make sure BAR1 maps to backplane address 0 */
6232 dhdpcie_bus_cfg_write_dword(bus
, PCI_BAR1_WIN
, 4, 0x00000000);
6233 bus
->alp_only
= TRUE
;
6235 /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
6236 cr4_regs
= si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0);
6238 if (cr4_regs
== NULL
&& !(si_setcore(bus
->sih
, ARM7S_CORE_ID
, 0)) &&
6239 !(si_setcore(bus
->sih
, ARMCM3_CORE_ID
, 0)) &&
6240 !(si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0))) {
6241 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__
));
6242 bcmerror
= BCME_ERROR
;
6246 if (si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0)) {
6247 /* Halt ARM & remove reset */
6248 si_core_reset(bus
->sih
, SICF_CPUHALT
, SICF_CPUHALT
);
6249 if (!(si_setcore(bus
->sih
, SYSMEM_CORE_ID
, 0))) {
6250 DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__
));
6251 bcmerror
= BCME_ERROR
;
6254 si_core_reset(bus
->sih
, 0, 0);
6255 /* reset last 4 bytes of RAM address. to be used for shared area */
6256 dhdpcie_init_shared_addr(bus
);
6257 } else if (cr4_regs
== NULL
) { /* no CR4 present on chip */
6258 si_core_disable(bus
->sih
, 0);
6260 if (!(si_setcore(bus
->sih
, SOCRAM_CORE_ID
, 0))) {
6261 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__
));
6262 bcmerror
= BCME_ERROR
;
6266 si_core_reset(bus
->sih
, 0, 0);
6268 /* Clear the top bit of memory */
6271 if (dhdpcie_bus_membytes(bus
, TRUE
, bus
->ramsize
- 4,
6272 (uint8
*)&zeros
, 4) < 0) {
6273 bcmerror
= BCME_ERROR
;
6281 * Read RAM base address [0x18_0000]
6282 * [next] Download firmware
6283 * [done at else] Populate the reset vector
6284 * [done at else] Remove ARM halt
6286 /* Halt ARM & remove reset */
6287 si_core_reset(bus
->sih
, SICF_CPUHALT
, SICF_CPUHALT
);
6288 if (BCM43602_CHIP(bus
->sih
->chip
)) {
6289 W_REG(bus
->pcie_mb_intr_osh
, cr4_regs
+ ARMCR4REG_BANKIDX
, 5);
6290 W_REG(bus
->pcie_mb_intr_osh
, cr4_regs
+ ARMCR4REG_BANKPDA
, 0);
6291 W_REG(bus
->pcie_mb_intr_osh
, cr4_regs
+ ARMCR4REG_BANKIDX
, 7);
6292 W_REG(bus
->pcie_mb_intr_osh
, cr4_regs
+ ARMCR4REG_BANKPDA
, 0);
6294 /* reset last 4 bytes of RAM address. to be used for shared area */
6295 dhdpcie_init_shared_addr(bus
);
6298 if (si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0)) {
6300 if ((bcmerror
= dhdpcie_bus_write_vars(bus
))) {
6301 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__
));
6304 /* write random numbers to sysmem for the purpose of
6305 * randomizing heap address space.
6307 if ((bcmerror
= dhdpcie_wrt_rnd(bus
)) != BCME_OK
) {
6308 DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
6312 /* switch back to arm core again */
6313 if (!(si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0))) {
6314 DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__
));
6315 bcmerror
= BCME_ERROR
;
6318 /* write address 0 with reset instruction */
6319 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
, 0,
6320 (uint8
*)&bus
->resetinstr
, sizeof(bus
->resetinstr
));
6321 /* now remove reset and halt and continue to run CA7 */
6322 } else if (!si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0)) {
6323 if (!(si_setcore(bus
->sih
, SOCRAM_CORE_ID
, 0))) {
6324 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__
));
6325 bcmerror
= BCME_ERROR
;
6329 if (!si_iscoreup(bus
->sih
)) {
6330 DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__
));
6331 bcmerror
= BCME_ERROR
;
6335 /* Enable remap before ARM reset but after vars.
6336 * No backplane access in remap mode
6338 if (!si_setcore(bus
->sih
, PCMCIA_CORE_ID
, 0) &&
6339 !si_setcore(bus
->sih
, SDIOD_CORE_ID
, 0)) {
6340 DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__
));
6341 bcmerror
= BCME_ERROR
;
6345 if (!(si_setcore(bus
->sih
, ARM7S_CORE_ID
, 0)) &&
6346 !(si_setcore(bus
->sih
, ARMCM3_CORE_ID
, 0))) {
6347 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__
));
6348 bcmerror
= BCME_ERROR
;
6352 if (BCM43602_CHIP(bus
->sih
->chip
)) {
6353 /* Firmware crashes on SOCSRAM access when core is in reset */
6354 if (!(si_setcore(bus
->sih
, SOCRAM_CORE_ID
, 0))) {
6355 DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
6357 bcmerror
= BCME_ERROR
;
6360 si_core_reset(bus
->sih
, 0, 0);
6361 si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0);
6365 if ((bcmerror
= dhdpcie_bus_write_vars(bus
))) {
6366 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__
));
6370 /* write a random number to TCM for the purpose of
6371 * randomizing heap address space.
6373 if ((bcmerror
= dhdpcie_wrt_rnd(bus
)) != BCME_OK
) {
6374 DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
6379 if ((bcmerror
= dhdpcie_wrt_host_whitelist_region(bus
)) != BCME_OK
) {
6380 DHD_ERROR(("%s: Failed to write Whitelist region to TCM !\n",
6384 /* switch back to arm core again */
6385 if (!(si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0))) {
6386 DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__
));
6387 bcmerror
= BCME_ERROR
;
6391 /* write address 0 with reset instruction */
6392 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
, 0,
6393 (uint8
*)&bus
->resetinstr
, sizeof(bus
->resetinstr
));
6395 if (bcmerror
== BCME_OK
) {
6398 bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
, 0,
6399 (uint8
*)&tmp
, sizeof(tmp
));
6401 if (bcmerror
== BCME_OK
&& tmp
!= bus
->resetinstr
) {
6402 DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
6403 __FUNCTION__
, bus
->resetinstr
));
6404 DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
6405 __FUNCTION__
, tmp
));
6406 bcmerror
= BCME_ERROR
;
6411 /* now remove reset and halt and continue to run CR4 */
6414 si_core_reset(bus
->sih
, 0, 0);
6416 /* Allow HT Clock now that the ARM is running. */
6417 bus
->alp_only
= FALSE
;
6419 bus
->dhd
->busstate
= DHD_BUS_LOAD
;
6423 /* Always return to PCIE core */
6424 si_setcore(bus
->sih
, PCIE2_CORE_ID
, 0);
6427 } /* dhdpcie_bus_download_state */
6430 dhdpcie_bus_write_vars(dhd_bus_t
*bus
)
6433 uint32 varsize
, phys_size
;
6438 uint8
*nvram_ularray
;
6439 #endif /* DHD_DEBUG */
6441 /* Even if there are no vars are to be written, we still need to set the ramsize. */
6442 varsize
= bus
->varsz
? ROUNDUP(bus
->varsz
, 4) : 0;
6443 varaddr
= (bus
->ramsize
- 4) - varsize
;
6445 varaddr
+= bus
->dongle_ram_base
;
6449 vbuffer
= (uint8
*)MALLOC(bus
->dhd
->osh
, varsize
);
6453 bzero(vbuffer
, varsize
);
6454 bcopy(bus
->vars
, vbuffer
, bus
->varsz
);
6455 /* Write the vars list */
6456 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
, varaddr
, vbuffer
, varsize
);
6458 /* Implement read back and verify later */
6460 /* Verify NVRAM bytes */
6461 DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize
));
6462 nvram_ularray
= (uint8
*)MALLOC(bus
->dhd
->osh
, varsize
);
6463 if (!nvram_ularray
) {
6464 MFREE(bus
->dhd
->osh
, vbuffer
, varsize
);
6468 /* Upload image to verify downloaded contents. */
6469 memset(nvram_ularray
, 0xaa, varsize
);
6471 /* Read the vars list to temp buffer for comparison */
6472 bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
, varaddr
, nvram_ularray
, varsize
);
6474 DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
6475 __FUNCTION__
, bcmerror
, varsize
, varaddr
));
6478 /* Compare the org NVRAM with the one read from RAM */
6479 if (memcmp(vbuffer
, nvram_ularray
, varsize
)) {
6480 DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__
));
6482 DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
6485 MFREE(bus
->dhd
->osh
, nvram_ularray
, varsize
);
6486 #endif /* DHD_DEBUG */
6488 MFREE(bus
->dhd
->osh
, vbuffer
, varsize
);
6491 phys_size
= REMAP_ENAB(bus
) ? bus
->ramsize
: bus
->orig_ramsize
;
6493 phys_size
+= bus
->dongle_ram_base
;
6495 /* adjust to the user specified RAM */
6496 DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
6497 phys_size
, bus
->ramsize
));
6498 DHD_INFO(("Vars are at %d, orig varsize is %d\n",
6500 varsize
= ((phys_size
- 4) - varaddr
);
6503 * Determine the length token:
6504 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
6508 bus
->nvram_csm
= varsizew
;
6510 varsizew
= varsize
/ 4;
6511 varsizew
= (~varsizew
<< 16) | (varsizew
& 0x0000FFFF);
6512 bus
->nvram_csm
= varsizew
;
6513 varsizew
= htol32(varsizew
);
6516 DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize
, varsizew
));
6518 /* Write the length token to the last word */
6519 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
, (phys_size
- 4),
6520 (uint8
*)&varsizew
, 4);
6523 } /* dhdpcie_bus_write_vars */
6526 dhdpcie_downloadvars(dhd_bus_t
*bus
, void *arg
, int len
)
6528 int bcmerror
= BCME_OK
;
6529 #if defined(KEEP_KR_REGREV) || defined(KEEP_JP_REGREV)
6532 #endif /* KEEP_KR_REGREV || KEEP_JP_REGREV */
6534 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
6536 /* Basic sanity checks */
6538 bcmerror
= BCME_NOTDOWN
;
6542 bcmerror
= BCME_BUFTOOSHORT
;
6546 /* Free the old ones and replace with passed variables */
6548 MFREE(bus
->dhd
->osh
, bus
->vars
, bus
->varsz
);
6550 bus
->vars
= MALLOC(bus
->dhd
->osh
, len
);
6551 bus
->varsz
= bus
->vars
? len
: 0;
6552 if (bus
->vars
== NULL
) {
6553 bcmerror
= BCME_NOMEM
;
6557 /* Copy the passed variables, which should include the terminating double-null */
6558 bcopy(arg
, bus
->vars
, bus
->varsz
);
6560 #ifdef DHD_USE_SINGLE_NVRAM_FILE
6561 if (dhd_bus_get_fw_mode(bus
->dhd
) == DHD_FLAG_MFG_MODE
) {
6565 char tag
[2][8] = {"ccode=", "regrev="};
6567 /* Find ccode and regrev info */
6568 for (i
= 0; i
< 2; i
++) {
6569 sp
= strnstr(bus
->vars
, tag
[i
], bus
->varsz
);
6571 DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n",
6572 __FUNCTION__
, bus
->nv_path
));
6573 bcmerror
= BCME_ERROR
;
6576 sp
= strchr(sp
, '=');
6577 ep
= strchr(sp
, '\0');
6578 /* We assumed that string length of both ccode and
6579 * regrev values should not exceed WLC_CNTRY_BUF_SZ
6581 if (ep
&& ((ep
- sp
) <= WLC_CNTRY_BUF_SZ
)) {
6583 while (*sp
!= '\0') {
6584 DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
6585 __FUNCTION__
, tag
[i
], *sp
));
6589 DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n",
6590 __FUNCTION__
, tag
[i
]));
6591 bcmerror
= BCME_ERROR
;
6596 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
6598 #if defined(KEEP_KR_REGREV) || defined(KEEP_JP_REGREV)
6599 #ifdef DHD_USE_SINGLE_NVRAM_FILE
6600 if (dhd_bus_get_fw_mode(bus
->dhd
) != DHD_FLAG_MFG_MODE
)
6601 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
6604 tmpbuf
= MALLOCZ(bus
->dhd
->osh
, bus
->varsz
+ 1);
6605 if (tmpbuf
== NULL
) {
6608 memcpy(tmpbuf
, bus
->vars
, bus
->varsz
);
6609 for (tmpidx
= 0; tmpidx
< bus
->varsz
; tmpidx
++) {
6610 if (tmpbuf
[tmpidx
] == 0) {
6611 tmpbuf
[tmpidx
] = '\n';
6614 bus
->dhd
->vars_ccode
[0] = 0;
6615 bus
->dhd
->vars_regrev
= 0;
6616 if ((pos
= strstr(tmpbuf
, "ccode"))) {
6617 sscanf(pos
, "ccode=%3s\n", bus
->dhd
->vars_ccode
);
6619 if ((pos
= strstr(tmpbuf
, "regrev"))) {
6620 sscanf(pos
, "regrev=%u\n", &(bus
->dhd
->vars_regrev
));
6622 MFREE(bus
->dhd
->osh
, tmpbuf
, bus
->varsz
+ 1);
6624 #endif /* KEEP_KR_REGREV || KEEP_JP_REGREV */
6630 /* loop through the capability list and see if the pcie capabilty exists */
6632 dhdpcie_find_pci_capability(osl_t
*osh
, uint8 req_cap_id
)
6638 /* check for Header type 0 */
6639 byte_val
= read_pci_cfg_byte(PCI_CFG_HDR
);
6640 if ((byte_val
& 0x7f) != PCI_HEADER_NORMAL
) {
6641 DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__
));
6645 /* check if the capability pointer field exists */
6646 byte_val
= read_pci_cfg_byte(PCI_CFG_STAT
);
6647 if (!(byte_val
& PCI_CAPPTR_PRESENT
)) {
6648 DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__
));
6652 cap_ptr
= read_pci_cfg_byte(PCI_CFG_CAPPTR
);
6653 /* check if the capability pointer is 0x00 */
6654 if (cap_ptr
== 0x00) {
6655 DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__
));
6659 /* loop thr'u the capability list and see if the pcie capabilty exists */
6661 cap_id
= read_pci_cfg_byte(cap_ptr
);
6663 while (cap_id
!= req_cap_id
) {
6664 cap_ptr
= read_pci_cfg_byte((cap_ptr
+ 1));
6665 if (cap_ptr
== 0x00) break;
6666 cap_id
= read_pci_cfg_byte(cap_ptr
);
6674 dhdpcie_pme_active(osl_t
*osh
, bool enable
)
6679 cap_ptr
= dhdpcie_find_pci_capability(osh
, PCI_CAP_POWERMGMTCAP_ID
);
6682 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__
));
6686 pme_csr
= OSL_PCI_READ_CONFIG(osh
, cap_ptr
+ PME_CSR_OFFSET
, sizeof(uint32
));
6687 DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__
, pme_csr
));
6689 pme_csr
|= PME_CSR_PME_STAT
;
6691 pme_csr
|= PME_CSR_PME_EN
;
6693 pme_csr
&= ~PME_CSR_PME_EN
;
6696 OSL_PCI_WRITE_CONFIG(osh
, cap_ptr
+ PME_CSR_OFFSET
, sizeof(uint32
), pme_csr
);
6700 dhdpcie_pme_cap(osl_t
*osh
)
6705 cap_ptr
= dhdpcie_find_pci_capability(osh
, PCI_CAP_POWERMGMTCAP_ID
);
6708 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__
));
6712 pme_cap
= OSL_PCI_READ_CONFIG(osh
, cap_ptr
, sizeof(uint32
));
6714 DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__
, pme_cap
));
6716 return ((pme_cap
& PME_CAP_PM_STATES
) != 0);
6720 dhdpcie_lcreg(osl_t
*osh
, uint32 mask
, uint32 val
)
6724 uint8 lcreg_offset
; /* PCIE capability LCreg offset in the config space */
6727 pcie_cap
= dhdpcie_find_pci_capability(osh
, PCI_CAP_PCIECAP_ID
);
6730 DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__
));
6734 lcreg_offset
= pcie_cap
+ PCIE_CAP_LINKCTRL_OFFSET
;
6739 reg_val
= OSL_PCI_READ_CONFIG(osh
, lcreg_offset
, sizeof(uint32
));
6743 reg_val
|= (mask
& val
);
6746 OSL_PCI_WRITE_CONFIG(osh
, lcreg_offset
, sizeof(uint32
), reg_val
);
6748 return OSL_PCI_READ_CONFIG(osh
, lcreg_offset
, sizeof(uint32
));
6752 dhdpcie_clkreq(osl_t
*osh
, uint32 mask
, uint32 val
)
6756 uint8 lcreg_offset
; /* PCIE capability LCreg offset in the config space */
6758 pcie_cap
= dhdpcie_find_pci_capability(osh
, PCI_CAP_PCIECAP_ID
);
6761 DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__
));
6765 lcreg_offset
= pcie_cap
+ PCIE_CAP_LINKCTRL_OFFSET
;
6767 reg_val
= OSL_PCI_READ_CONFIG(osh
, lcreg_offset
, sizeof(uint32
));
6771 reg_val
|= PCIE_CLKREQ_ENAB
;
6773 reg_val
&= ~PCIE_CLKREQ_ENAB
;
6774 OSL_PCI_WRITE_CONFIG(osh
, lcreg_offset
, sizeof(uint32
), reg_val
);
6775 reg_val
= OSL_PCI_READ_CONFIG(osh
, lcreg_offset
, sizeof(uint32
));
6777 if (reg_val
& PCIE_CLKREQ_ENAB
)
6783 void dhd_dump_intr_counters(dhd_pub_t
*dhd
, struct bcmstrbuf
*strbuf
)
6786 uint64 current_time
= OSL_LOCALTIME_NS();
6789 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
6795 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__
));
6799 bcm_bprintf(strbuf
, "\n ------- DUMPING INTR enable/disable counters-------\n");
6800 bcm_bprintf(strbuf
, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
6801 "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
6802 "dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
6803 bus
->resume_intr_enable_count
, bus
->dpc_intr_enable_count
,
6804 bus
->isr_intr_disable_count
, bus
->suspend_intr_disable_count
,
6805 bus
->dpc_return_busdown_count
, bus
->non_ours_irq_count
);
6806 #ifdef BCMPCIE_OOB_HOST_WAKE
6807 bcm_bprintf(strbuf
, "oob_intr_count=%lu oob_intr_enable_count=%lu"
6808 " oob_intr_disable_count=%lu\n oob_irq_num=%d last_oob_irq_time=%llu\n",
6809 bus
->oob_intr_count
, bus
->oob_intr_enable_count
,
6810 bus
->oob_intr_disable_count
, dhdpcie_get_oob_irq_num(dhd
->bus
),
6811 bus
->last_oob_irq_time
);
6812 #endif /* BCMPCIE_OOB_HOST_WAKE */
6813 bcm_bprintf(strbuf
, "\ncurrent_time="SEC_USEC_FMT
" isr_entry_time="SEC_USEC_FMT
6814 " isr_exit_time="SEC_USEC_FMT
"\ndpc_sched_time="SEC_USEC_FMT
6815 " last_non_ours_irq_time="SEC_USEC_FMT
" dpc_entry_time="SEC_USEC_FMT
"\n"
6816 "last_process_ctrlbuf_time="SEC_USEC_FMT
" last_process_flowring_time="SEC_USEC_FMT
6817 " last_process_txcpl_time="SEC_USEC_FMT
"\nlast_process_rxcpl_time="SEC_USEC_FMT
6818 " last_process_infocpl_time="SEC_USEC_FMT
6819 "\ndpc_exit_time="SEC_USEC_FMT
" resched_dpc_time="SEC_USEC_FMT
"\n"
6820 "last_d3_inform_time="SEC_USEC_FMT
"\n",
6821 GET_SEC_USEC(current_time
), GET_SEC_USEC(bus
->isr_entry_time
),
6822 GET_SEC_USEC(bus
->isr_exit_time
), GET_SEC_USEC(bus
->dpc_entry_time
),
6823 GET_SEC_USEC(bus
->dpc_sched_time
), GET_SEC_USEC(dhd
->bus
->last_non_ours_irq_time
),
6824 GET_SEC_USEC(bus
->last_process_ctrlbuf_time
),
6825 GET_SEC_USEC(bus
->last_process_flowring_time
),
6826 GET_SEC_USEC(bus
->last_process_txcpl_time
),
6827 GET_SEC_USEC(bus
->last_process_rxcpl_time
),
6828 GET_SEC_USEC(bus
->last_process_infocpl_time
),
6829 GET_SEC_USEC(bus
->dpc_exit_time
), GET_SEC_USEC(bus
->resched_dpc_time
),
6830 GET_SEC_USEC(bus
->last_d3_inform_time
));
6832 bcm_bprintf(strbuf
, "\nlast_suspend_start_time="SEC_USEC_FMT
" last_suspend_end_time="
6833 SEC_USEC_FMT
" last_resume_start_time="SEC_USEC_FMT
" last_resume_end_time="
6834 SEC_USEC_FMT
"\n", GET_SEC_USEC(bus
->last_suspend_start_time
),
6835 GET_SEC_USEC(dhd
->bus
->last_suspend_end_time
),
6836 GET_SEC_USEC(dhd
->bus
->last_resume_start_time
),
6837 GET_SEC_USEC(dhd
->bus
->last_resume_end_time
));
6840 void dhd_dump_intr_registers(dhd_pub_t
*dhd
, struct bcmstrbuf
*strbuf
)
6842 uint32 intstatus
= 0;
6845 uint32 d2h_mb_data
= 0;
6847 intstatus
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
6848 dhd
->bus
->pcie_mailbox_int
, 0, 0);
6849 intmask
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
6850 dhd
->bus
->pcie_mailbox_mask
, 0, 0);
6851 d2h_db0
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, PCID2H_MailBox
, 0, 0);
6852 dhd_bus_cmn_readshared(dhd
->bus
, &d2h_mb_data
, D2H_MB_DATA
, 0);
6854 bcm_bprintf(strbuf
, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
6855 intstatus
, intmask
, d2h_db0
);
6856 bcm_bprintf(strbuf
, "d2h_mb_data=0x%x def_intmask=0x%x\n",
6857 d2h_mb_data
, dhd
->bus
->def_intmask
);
6859 /** Add bus dump output to a buffer */
6860 void dhd_bus_dump(dhd_pub_t
*dhdp
, struct bcmstrbuf
*strbuf
)
6864 flow_ring_node_t
*flow_ring_node
;
6865 flow_info_t
*flow_info
;
6866 #ifdef TX_STATUS_LATENCY_STATS
6868 if_flow_lkup_t
*if_flow_lkup
;
6869 dhd_if_tx_status_latency_t if_tx_status_latency
[DHD_MAX_IFS
];
6870 #endif /* TX_STATUS_LATENCY_STATS */
6872 if (dhdp
->busstate
!= DHD_BUS_DATA
)
6875 #ifdef TX_STATUS_LATENCY_STATS
6876 memset(if_tx_status_latency
, 0, sizeof(if_tx_status_latency
));
6877 #endif /* TX_STATUS_LATENCY_STATS */
6878 #ifdef DHD_WAKE_STATUS
6879 bcm_bprintf(strbuf
, "wake %u rxwake %u readctrlwake %u\n",
6880 bcmpcie_get_total_wake(dhdp
->bus
), dhdp
->bus
->wake_counts
.rxwake
,
6881 dhdp
->bus
->wake_counts
.rcwake
);
6882 #ifdef DHD_WAKE_RX_STATUS
6883 bcm_bprintf(strbuf
, " unicast %u muticast %u broadcast %u arp %u\n",
6884 dhdp
->bus
->wake_counts
.rx_ucast
, dhdp
->bus
->wake_counts
.rx_mcast
,
6885 dhdp
->bus
->wake_counts
.rx_bcast
, dhdp
->bus
->wake_counts
.rx_arp
);
6886 bcm_bprintf(strbuf
, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
6887 dhdp
->bus
->wake_counts
.rx_multi_ipv4
, dhdp
->bus
->wake_counts
.rx_multi_ipv6
,
6888 dhdp
->bus
->wake_counts
.rx_icmpv6
, dhdp
->bus
->wake_counts
.rx_multi_other
);
6889 bcm_bprintf(strbuf
, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
6890 dhdp
->bus
->wake_counts
.rx_icmpv6_ra
, dhdp
->bus
->wake_counts
.rx_icmpv6_na
,
6891 dhdp
->bus
->wake_counts
.rx_icmpv6_ns
);
6892 #endif /* DHD_WAKE_RX_STATUS */
6893 #ifdef DHD_WAKE_EVENT_STATUS
6894 for (flowid
= 0; flowid
< WLC_E_LAST
; flowid
++)
6895 if (dhdp
->bus
->wake_counts
.rc_event
[flowid
] != 0)
6896 bcm_bprintf(strbuf
, " %s = %u\n", bcmevent_get_name(flowid
),
6897 dhdp
->bus
->wake_counts
.rc_event
[flowid
]);
6898 bcm_bprintf(strbuf
, "\n");
6899 #endif /* DHD_WAKE_EVENT_STATUS */
6900 #endif /* DHD_WAKE_STATUS */
6902 dhd_prot_print_info(dhdp
, strbuf
);
6903 dhd_dump_intr_registers(dhdp
, strbuf
);
6904 dhd_dump_intr_counters(dhdp
, strbuf
);
6905 bcm_bprintf(strbuf
, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
6906 dhdp
->bus
->h2d_mb_data_ptr_addr
, dhdp
->bus
->d2h_mb_data_ptr_addr
);
6907 bcm_bprintf(strbuf
, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp
->cumm_ctr
));
6909 "%s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
6910 "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
6911 "Overflows", "RD", "WR");
6913 #ifdef TX_STATUS_LATENCY_STATS
6914 /* Average Tx status/Completion Latency in micro secs */
6915 bcm_bprintf(strbuf
, "%12s", "AvgTxCmpL_Us ");
6916 #endif /* TX_STATUS_LATENCY_STATS */
6918 bcm_bprintf(strbuf
, "%5s %6s %5s \n", "Acked", "tossed", "noack");
6920 for (flowid
= 0; flowid
< dhdp
->num_flow_rings
; flowid
++) {
6921 flow_ring_node
= DHD_FLOW_RING(dhdp
, flowid
);
6922 if (!flow_ring_node
->active
)
6925 flow_info
= &flow_ring_node
->flow_info
;
6927 "%3d. %4d %2d %4d "MACDBG
" %4d %4d %6d %10u ", ix
++,
6928 flow_ring_node
->flowid
, flow_info
->ifindex
, flow_info
->tid
,
6929 MAC2STRDBG(flow_info
->da
),
6930 DHD_FLOW_QUEUE_LEN(&flow_ring_node
->queue
),
6931 DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node
->queue
)),
6932 DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node
->queue
)),
6933 DHD_FLOW_QUEUE_FAILURES(&flow_ring_node
->queue
));
6934 dhd_prot_print_flow_ring(dhdp
, flow_ring_node
->prot_info
, strbuf
,
6937 #ifdef TX_STATUS_LATENCY_STATS
6938 bcm_bprintf(strbuf
, "%12d ",
6939 flow_info
->num_tx_status
?
6940 DIV_U64_BY_U64(flow_info
->cum_tx_status_latency
,
6941 flow_info
->num_tx_status
) : 0);
6943 ifindex
= flow_info
->ifindex
;
6944 ASSERT(ifindex
< DHD_MAX_IFS
);
6945 if (ifindex
< DHD_MAX_IFS
) {
6946 if_tx_status_latency
[ifindex
].num_tx_status
+= flow_info
->num_tx_status
;
6947 if_tx_status_latency
[ifindex
].cum_tx_status_latency
+=
6948 flow_info
->cum_tx_status_latency
;
6950 DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n",
6951 __FUNCTION__
, ifindex
, flowid
));
6953 #endif /* TX_STATUS_LATENCY_STATS */
6955 "%5s %6s %5s\n", "NA", "NA", "NA");
6958 #ifdef TX_STATUS_LATENCY_STATS
6959 bcm_bprintf(strbuf
, "%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStats");
6960 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
6961 for (ix
= 0; ix
< DHD_MAX_IFS
; ix
++) {
6962 if (!if_flow_lkup
[ix
].status
) {
6965 bcm_bprintf(strbuf
, "%2d %16d %16d\n",
6967 if_tx_status_latency
[ix
].num_tx_status
?
6968 DIV_U64_BY_U64(if_tx_status_latency
[ix
].cum_tx_status_latency
,
6969 if_tx_status_latency
[ix
].num_tx_status
): 0,
6970 if_tx_status_latency
[ix
].num_tx_status
);
6972 #endif /* TX_STATUS_LATENCY_STATS */
6973 bcm_bprintf(strbuf
, "D3 inform cnt %d\n", dhdp
->bus
->d3_inform_cnt
);
6974 bcm_bprintf(strbuf
, "D0 inform cnt %d\n", dhdp
->bus
->d0_inform_cnt
);
6975 bcm_bprintf(strbuf
, "D0 inform in use cnt %d\n", dhdp
->bus
->d0_inform_in_use_cnt
);
6976 if (dhdp
->d2h_hostrdy_supported
) {
6977 bcm_bprintf(strbuf
, "hostready count:%d\n", dhdp
->bus
->hostready_count
);
6979 bcm_bprintf(strbuf
, "d2h_intr_method -> %s\n",
6980 dhdp
->bus
->d2h_intr_method
? "PCIE_MSI" : "PCIE_INTX");
6984 * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
6985 * flow queue to their flow ring.
6988 dhd_update_txflowrings(dhd_pub_t
*dhd
)
6990 unsigned long flags
;
6992 flow_ring_node_t
*flow_ring_node
;
6993 struct dhd_bus
*bus
= dhd
->bus
;
6995 /* Hold flowring_list_lock to ensure no race condition while accessing the List */
6996 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
6997 for (item
= dll_head_p(&bus
->flowring_active_list
);
6998 (!dhd_is_device_removed(dhd
) && !dll_end(&bus
->flowring_active_list
, item
));
7000 if (dhd
->hang_was_sent
) {
7004 next
= dll_next_p(item
);
7005 flow_ring_node
= dhd_constlist_to_flowring(item
);
7007 /* Ensure that flow_ring_node in the list is Not Null */
7008 ASSERT(flow_ring_node
!= NULL
);
7010 /* Ensure that the flowring node has valid contents */
7011 ASSERT(flow_ring_node
->prot_info
!= NULL
);
7013 dhd_prot_update_txflowring(dhd
, flow_ring_node
->flowid
, flow_ring_node
->prot_info
);
7015 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
7018 /** Mailbox ringbell Function */
7020 dhd_bus_gen_devmb_intr(struct dhd_bus
*bus
)
7022 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
7023 (bus
->sih
->buscorerev
== 4)) {
7024 DHD_ERROR(("mailbox communication not supported\n"));
7027 if (bus
->db1_for_mb
) {
7028 /* this is a pcie core register, not the config register */
7029 DHD_INFO(("writing a mail box interrupt to the device, through doorbell 1\n"));
7030 if (DAR_PWRREQ(bus
)) {
7031 dhd_bus_pcie_pwr_req(bus
);
7033 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dhd_bus_db1_addr_get(bus
),
7036 DHD_INFO(("writing a mail box interrupt to the device, through config space\n"));
7037 dhdpcie_bus_cfg_write_dword(bus
, PCISBMbx
, 4, (1 << 0));
7038 dhdpcie_bus_cfg_write_dword(bus
, PCISBMbx
, 4, (1 << 0));
7042 /* Upon receiving a mailbox interrupt,
7043 * if H2D_FW_TRAP bit is set in mailbox location
7047 dhdpcie_fw_trap(dhd_bus_t
*bus
)
7049 /* Send the mailbox data and generate mailbox intr. */
7050 dhdpcie_send_mb_data(bus
, H2D_FW_TRAP
);
7051 /* For FWs that cannot interprete H2D_FW_TRAP */
7052 (void)dhd_wl_ioctl_set_intiovar(bus
->dhd
, "bus:disconnect", 99, WLC_SET_VAR
, TRUE
, 0);
7055 /** mailbox doorbell ring function */
7057 dhd_bus_ringbell(struct dhd_bus
*bus
, uint32 value
)
7059 /* Skip after sending D3_INFORM */
7060 if (bus
->bus_low_power_state
!= DHD_BUS_NO_LOW_POWER_STATE
) {
7061 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7062 __FUNCTION__
, bus
->bus_low_power_state
));
7066 /* Skip in the case of link down */
7067 if (bus
->is_linkdown
) {
7068 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
7072 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
7073 (bus
->sih
->buscorerev
== 4)) {
7074 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_int
,
7075 PCIE_INTB
, PCIE_INTB
);
7077 /* this is a pcie core register, not the config regsiter */
7078 DHD_INFO(("writing a door bell to the device\n"));
7079 if (IDMA_ACTIVE(bus
->dhd
)) {
7080 if (DAR_PWRREQ(bus
)) {
7081 dhd_bus_pcie_pwr_req(bus
);
7083 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dhd_bus_db0_addr_2_get(bus
),
7086 if (DAR_PWRREQ(bus
)) {
7087 dhd_bus_pcie_pwr_req(bus
);
7089 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
7090 dhd_bus_db0_addr_get(bus
), ~0, 0x12345678);
7095 /** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */
7097 dhd_bus_ringbell_2(struct dhd_bus
*bus
, uint32 value
, bool devwake
)
7099 /* this is a pcie core register, not the config regsiter */
7100 /* Skip after sending D3_INFORM */
7101 if (bus
->bus_low_power_state
!= DHD_BUS_NO_LOW_POWER_STATE
) {
7102 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7103 __FUNCTION__
, bus
->bus_low_power_state
));
7107 /* Skip in the case of link down */
7108 if (bus
->is_linkdown
) {
7109 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
7113 DHD_INFO(("writing a door bell 2 to the device\n"));
7114 if (DAR_PWRREQ(bus
)) {
7115 dhd_bus_pcie_pwr_req(bus
);
7117 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dhd_bus_db0_addr_2_get(bus
),
7122 dhdpcie_bus_ringbell_fast(struct dhd_bus
*bus
, uint32 value
)
7124 /* Skip after sending D3_INFORM */
7125 if (bus
->bus_low_power_state
!= DHD_BUS_NO_LOW_POWER_STATE
) {
7126 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7127 __FUNCTION__
, bus
->bus_low_power_state
));
7131 /* Skip in the case of link down */
7132 if (bus
->is_linkdown
) {
7133 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
7137 if (DAR_PWRREQ(bus
)) {
7138 dhd_bus_pcie_pwr_req(bus
);
7140 W_REG(bus
->pcie_mb_intr_osh
, bus
->pcie_mb_intr_addr
, value
);
7144 dhdpcie_bus_ringbell_2_fast(struct dhd_bus
*bus
, uint32 value
, bool devwake
)
7146 /* Skip after sending D3_INFORM */
7147 if (bus
->bus_low_power_state
!= DHD_BUS_NO_LOW_POWER_STATE
) {
7148 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7149 __FUNCTION__
, bus
->bus_low_power_state
));
7153 /* Skip in the case of link down */
7154 if (bus
->is_linkdown
) {
7155 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
7159 if (DAR_PWRREQ(bus
)) {
7160 dhd_bus_pcie_pwr_req(bus
);
7162 W_REG(bus
->pcie_mb_intr_osh
, bus
->pcie_mb_intr_2_addr
, value
);
7166 dhd_bus_ringbell_oldpcie(struct dhd_bus
*bus
, uint32 value
)
7169 /* Skip after sending D3_INFORM */
7170 if (bus
->bus_low_power_state
!= DHD_BUS_NO_LOW_POWER_STATE
) {
7171 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7172 __FUNCTION__
, bus
->bus_low_power_state
));
7176 /* Skip in the case of link down */
7177 if (bus
->is_linkdown
) {
7178 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
7182 w
= (R_REG(bus
->pcie_mb_intr_osh
, bus
->pcie_mb_intr_addr
) & ~PCIE_INTB
) | PCIE_INTB
;
7183 W_REG(bus
->pcie_mb_intr_osh
, bus
->pcie_mb_intr_addr
, w
);
7187 dhd_bus_get_mbintr_fn(struct dhd_bus
*bus
)
7189 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
7190 (bus
->sih
->buscorerev
== 4)) {
7191 bus
->pcie_mb_intr_addr
= si_corereg_addr(bus
->sih
, bus
->sih
->buscoreidx
,
7192 bus
->pcie_mailbox_int
);
7193 if (bus
->pcie_mb_intr_addr
) {
7194 bus
->pcie_mb_intr_osh
= si_osh(bus
->sih
);
7195 return dhd_bus_ringbell_oldpcie
;
7198 bus
->pcie_mb_intr_addr
= si_corereg_addr(bus
->sih
, bus
->sih
->buscoreidx
,
7199 dhd_bus_db0_addr_get(bus
));
7200 if (bus
->pcie_mb_intr_addr
) {
7201 bus
->pcie_mb_intr_osh
= si_osh(bus
->sih
);
7202 return dhdpcie_bus_ringbell_fast
;
7205 return dhd_bus_ringbell
;
7209 dhd_bus_get_mbintr_2_fn(struct dhd_bus
*bus
)
7211 bus
->pcie_mb_intr_2_addr
= si_corereg_addr(bus
->sih
, bus
->sih
->buscoreidx
,
7212 dhd_bus_db0_addr_2_get(bus
));
7213 if (bus
->pcie_mb_intr_2_addr
) {
7214 bus
->pcie_mb_intr_osh
= si_osh(bus
->sih
);
7215 return dhdpcie_bus_ringbell_2_fast
;
7217 return dhd_bus_ringbell_2
;
7221 dhd_bus_dpc(struct dhd_bus
*bus
)
7223 bool resched
= FALSE
; /* Flag indicating resched wanted */
7224 unsigned long flags
;
7226 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
7228 bus
->dpc_entry_time
= OSL_LOCALTIME_NS();
7230 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
7231 /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
7232 * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
7233 * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
7234 * and if we return from here, then IOCTL response will never be handled
7236 if (bus
->dhd
->busstate
== DHD_BUS_DOWN
) {
7237 DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__
));
7239 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
7240 bus
->dpc_return_busdown_count
++;
7243 #ifdef DHD_PCIE_RUNTIMEPM
7245 #endif /* DHD_PCIE_RUNTIMEPM */
7246 DHD_BUS_BUSY_SET_IN_DPC(bus
->dhd
);
7247 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
7249 resched
= dhdpcie_bus_process_mailbox_intr(bus
, bus
->intstatus
);
7252 bus
->dpc_intr_enable_count
++;
7253 /* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts
7254 * which has been disabled in the dhdpcie_bus_isr()
7256 dhdpcie_enable_irq(bus
); /* Enable back interrupt!! */
7257 bus
->dpc_exit_time
= OSL_LOCALTIME_NS();
7259 bus
->resched_dpc_time
= OSL_LOCALTIME_NS();
7262 bus
->dpc_sched
= resched
;
7264 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
7265 DHD_BUS_BUSY_CLEAR_IN_DPC(bus
->dhd
);
7266 dhd_os_busbusy_wake(bus
->dhd
);
7267 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
7274 dhdpcie_send_mb_data(dhd_bus_t
*bus
, uint32 h2d_mb_data
)
7276 uint32 cur_h2d_mb_data
= 0;
7278 DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__
, h2d_mb_data
));
7280 if (bus
->is_linkdown
) {
7281 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
7285 if (bus
->api
.fw_rev
>= PCIE_SHARED_VERSION_6
&& !bus
->use_mailbox
) {
7286 DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n",
7288 /* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */
7290 if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus
->dhd
, h2d_mb_data
)) {
7291 DHD_ERROR(("failure sending the H2D Mailbox message "
7299 dhd_bus_cmn_readshared(bus
, &cur_h2d_mb_data
, H2D_MB_DATA
, 0);
7301 if (cur_h2d_mb_data
!= 0) {
7303 DHD_INFO(("GRRRRRRR: MB transaction is already pending 0x%04x\n", cur_h2d_mb_data
));
7304 while ((i
++ < 100) && cur_h2d_mb_data
) {
7306 dhd_bus_cmn_readshared(bus
, &cur_h2d_mb_data
, H2D_MB_DATA
, 0);
7309 DHD_ERROR(("%s : waited 1ms for the dngl "
7310 "to ack the previous mb transaction\n", __FUNCTION__
));
7311 DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
7312 __FUNCTION__
, cur_h2d_mb_data
));
7316 dhd_bus_cmn_writeshared(bus
, &h2d_mb_data
, sizeof(uint32
), H2D_MB_DATA
, 0);
7317 dhd_bus_gen_devmb_intr(bus
);
7320 if (h2d_mb_data
== H2D_HOST_D3_INFORM
) {
7321 DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__
));
7322 bus
->last_d3_inform_time
= OSL_LOCALTIME_NS();
7323 bus
->d3_inform_cnt
++;
7325 if (h2d_mb_data
== H2D_HOST_D0_INFORM_IN_USE
) {
7326 DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__
));
7327 bus
->d0_inform_in_use_cnt
++;
7329 if (h2d_mb_data
== H2D_HOST_D0_INFORM
) {
7330 DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__
));
7331 bus
->d0_inform_cnt
++;
7339 dhd_bus_handle_d3_ack(dhd_bus_t
*bus
)
7341 unsigned long flags_bus
;
7342 DHD_BUS_LOCK(bus
->bus_lock
, flags_bus
);
7343 bus
->suspend_intr_disable_count
++;
7344 /* Disable dongle Interrupts Immediately after D3 */
7346 /* For Linux, Macos etc (otherthan NDIS) along with disabling
7347 * dongle interrupt by clearing the IntMask, disable directly
7348 * interrupt from the host side as well. Also clear the intstatus
7349 * if it is set to avoid unnecessary intrrupts after D3 ACK.
7351 dhdpcie_bus_intr_disable(bus
); /* Disable interrupt using IntMask!! */
7352 dhdpcie_bus_clear_intstatus(bus
);
7353 dhdpcie_disable_irq_nosync(bus
); /* Disable host interrupt!! */
7355 /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
7356 bus
->bus_low_power_state
= DHD_BUS_D3_ACK_RECIEVED
;
7357 DHD_BUS_UNLOCK(bus
->bus_lock
, flags_bus
);
7358 bus
->wait_for_d3_ack
= 1;
7359 dhd_os_d3ack_wake(bus
->dhd
);
7362 dhd_bus_handle_mb_data(dhd_bus_t
*bus
, uint32 d2h_mb_data
)
7364 if (MULTIBP_ENAB(bus
->sih
)) {
7365 dhd_bus_pcie_pwr_req(bus
);
7368 DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data
));
7370 if (d2h_mb_data
& D2H_DEV_FWHALT
) {
7371 DHD_ERROR(("FW trap has happened\n"));
7372 dhdpcie_checkdied(bus
, NULL
, 0);
7373 #ifdef SUPPORT_LINKDOWN_RECOVERY
7374 #ifdef CONFIG_ARCH_MSM
7375 bus
->no_cfg_restore
= 1;
7376 #endif /* CONFIG_ARCH_MSM */
7377 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7378 dhd_os_check_hang(bus
->dhd
, 0, -EREMOTEIO
);
7381 if (d2h_mb_data
& D2H_DEV_DS_ENTER_REQ
) {
7382 bool ds_acked
= FALSE
;
7383 BCM_REFERENCE(ds_acked
);
7384 if (bus
->bus_low_power_state
== DHD_BUS_D3_ACK_RECIEVED
) {
7385 DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
7386 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
7389 /* what should we do */
7390 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
7392 dhdpcie_send_mb_data(bus
, H2D_HOST_DS_ACK
);
7393 DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
7396 if (d2h_mb_data
& D2H_DEV_DS_EXIT_NOTE
) {
7397 /* what should we do */
7398 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
7400 if (d2h_mb_data
& D2HMB_DS_HOST_SLEEP_EXIT_ACK
) {
7401 /* what should we do */
7402 DHD_INFO(("D2H_MB_DATA: D0 ACK\n"));
7404 if (d2h_mb_data
& D2H_DEV_D3_ACK
) {
7405 /* what should we do */
7406 DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
7407 if (!bus
->wait_for_d3_ack
) {
7408 #if defined(DHD_HANG_SEND_UP_TEST)
7409 if (bus
->dhd
->req_hang_type
== HANG_REASON_D3_ACK_TIMEOUT
) {
7410 DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
7412 dhd_bus_handle_d3_ack(bus
);
7414 #else /* DHD_HANG_SEND_UP_TEST */
7415 dhd_bus_handle_d3_ack(bus
);
7416 #endif /* DHD_HANG_SEND_UP_TEST */
7421 if (MULTIBP_ENAB(bus
->sih
)) {
7422 dhd_bus_pcie_pwr_req_clear(bus
);
7427 dhdpcie_handle_mb_data(dhd_bus_t
*bus
)
7429 uint32 d2h_mb_data
= 0;
7432 if (MULTIBP_ENAB(bus
->sih
)) {
7433 dhd_bus_pcie_pwr_req(bus
);
7436 dhd_bus_cmn_readshared(bus
, &d2h_mb_data
, D2H_MB_DATA
, 0);
7437 if (D2H_DEV_MB_INVALIDATED(d2h_mb_data
)) {
7438 DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
7439 __FUNCTION__
, d2h_mb_data
));
7443 dhd_bus_cmn_writeshared(bus
, &zero
, sizeof(uint32
), D2H_MB_DATA
, 0);
7445 DHD_INFO_HW4(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data
));
7446 if (d2h_mb_data
& D2H_DEV_FWHALT
) {
7447 DHD_ERROR(("FW trap has happened\n"));
7448 dhdpcie_checkdied(bus
, NULL
, 0);
7449 /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
7452 if (d2h_mb_data
& D2H_DEV_DS_ENTER_REQ
) {
7453 /* what should we do */
7454 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
7455 dhdpcie_send_mb_data(bus
, H2D_HOST_DS_ACK
);
7456 DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
7458 if (d2h_mb_data
& D2H_DEV_DS_EXIT_NOTE
) {
7459 /* what should we do */
7460 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
7462 if (d2h_mb_data
& D2H_DEV_D3_ACK
) {
7463 /* what should we do */
7464 DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
7465 if (!bus
->wait_for_d3_ack
) {
7466 #if defined(DHD_HANG_SEND_UP_TEST)
7467 if (bus
->dhd
->req_hang_type
== HANG_REASON_D3_ACK_TIMEOUT
) {
7468 DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
7470 dhd_bus_handle_d3_ack(bus
);
7472 #else /* DHD_HANG_SEND_UP_TEST */
7473 dhd_bus_handle_d3_ack(bus
);
7474 #endif /* DHD_HANG_SEND_UP_TEST */
7479 if (MULTIBP_ENAB(bus
->sih
)) {
7480 dhd_bus_pcie_pwr_req_clear(bus
);
7485 dhdpcie_read_handle_mb_data(dhd_bus_t
*bus
)
7487 uint32 d2h_mb_data
= 0;
7490 if (bus
->is_linkdown
) {
7491 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__
));
7495 if (MULTIBP_ENAB(bus
->sih
)) {
7496 dhd_bus_pcie_pwr_req(bus
);
7499 dhd_bus_cmn_readshared(bus
, &d2h_mb_data
, D2H_MB_DATA
, 0);
7504 dhd_bus_cmn_writeshared(bus
, &zero
, sizeof(uint32
), D2H_MB_DATA
, 0);
7506 dhd_bus_handle_mb_data(bus
, d2h_mb_data
);
7509 if (MULTIBP_ENAB(bus
->sih
)) {
7510 dhd_bus_pcie_pwr_req_clear(bus
);
7515 dhdpcie_bus_process_mailbox_intr(dhd_bus_t
*bus
, uint32 intstatus
)
7517 bool resched
= FALSE
;
7519 if (MULTIBP_ENAB(bus
->sih
)) {
7520 dhd_bus_pcie_pwr_req(bus
);
7522 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
7523 (bus
->sih
->buscorerev
== 4)) {
7524 /* Msg stream interrupt */
7525 if (intstatus
& I_BIT1
) {
7526 resched
= dhdpci_bus_read_frames(bus
);
7527 } else if (intstatus
& I_BIT0
) {
7528 /* do nothing for Now */
7531 if (intstatus
& (PCIE_MB_TOPCIE_FN0_0
| PCIE_MB_TOPCIE_FN0_1
))
7532 bus
->api
.handle_mb_data(bus
);
7534 if ((bus
->dhd
->busstate
== DHD_BUS_SUSPEND
) || (bus
->use_mailbox
&&
7535 (bus
->bus_low_power_state
!= DHD_BUS_NO_LOW_POWER_STATE
))) {
7536 DHD_ERROR(("%s: Bus is in power save state. "
7537 "Skip processing rest of ring buffers.\n", __FUNCTION__
));
7541 /* Validate intstatus only for INTX case */
7542 if ((bus
->d2h_intr_method
== PCIE_MSI
) ||
7543 ((bus
->d2h_intr_method
== PCIE_INTX
) && (intstatus
& bus
->d2h_mb_mask
))) {
7544 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7545 if (pm_runtime_get(dhd_bus_to_dev(bus
)) >= 0) {
7546 resched
= dhdpci_bus_read_frames(bus
);
7547 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus
));
7548 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus
));
7551 resched
= dhdpci_bus_read_frames(bus
);
7552 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7557 if (MULTIBP_ENAB(bus
->sih
)) {
7558 dhd_bus_pcie_pwr_req_clear(bus
);
7564 dhdpci_bus_read_frames(dhd_bus_t
*bus
)
7568 /* First check if there a FW trap */
7569 if ((bus
->api
.fw_rev
>= PCIE_SHARED_VERSION_6
) &&
7570 (bus
->dhd
->dongle_trap_data
= dhd_prot_process_trapbuf(bus
->dhd
))) {
7571 dhd_bus_handle_mb_data(bus
, D2H_DEV_FWHALT
);
7575 /* There may be frames in both ctrl buf and data buf; check ctrl buf first */
7576 DHD_PERIM_LOCK_ALL((bus
->dhd
->fwder_unit
% FWDER_MAX_UNIT
));
7578 dhd_prot_process_ctrlbuf(bus
->dhd
);
7579 bus
->last_process_ctrlbuf_time
= OSL_LOCALTIME_NS();
7580 /* Unlock to give chance for resp to be handled */
7581 DHD_PERIM_UNLOCK_ALL((bus
->dhd
->fwder_unit
% FWDER_MAX_UNIT
));
7583 /* Do not process rest of ring buf once bus enters low power state */
7584 if (!bus
->use_mailbox
&& (bus
->bus_low_power_state
!= DHD_BUS_NO_LOW_POWER_STATE
)) {
7585 DHD_ERROR(("%s: Bus is in power save state. "
7586 "Skip processing rest of ring buffers.\n", __FUNCTION__
));
7590 DHD_PERIM_LOCK_ALL((bus
->dhd
->fwder_unit
% FWDER_MAX_UNIT
));
7591 /* update the flow ring cpls */
7592 dhd_update_txflowrings(bus
->dhd
);
7593 bus
->last_process_flowring_time
= OSL_LOCALTIME_NS();
7595 /* With heavy TX traffic, we could get a lot of TxStatus
7598 more
|= dhd_prot_process_msgbuf_txcpl(bus
->dhd
, dhd_txbound
);
7599 bus
->last_process_txcpl_time
= OSL_LOCALTIME_NS();
7601 /* With heavy RX traffic, this routine potentially could spend some time
7602 * processing RX frames without RX bound
7604 more
|= dhd_prot_process_msgbuf_rxcpl(bus
->dhd
, dhd_rxbound
);
7605 bus
->last_process_rxcpl_time
= OSL_LOCALTIME_NS();
7607 /* Process info ring completion messages */
7608 more
|= dhd_prot_process_msgbuf_infocpl(bus
->dhd
, DHD_INFORING_BOUND
);
7609 bus
->last_process_infocpl_time
= OSL_LOCALTIME_NS();
7611 #ifdef IDLE_TX_FLOW_MGMT
7612 if (bus
->enable_idle_flowring_mgmt
) {
7613 /* Look for idle flow rings */
7614 dhd_bus_check_idle_scan(bus
);
7616 #endif /* IDLE_TX_FLOW_MGMT */
7618 /* don't talk to the dongle if fw is about to be reloaded */
7619 if (bus
->dhd
->hang_was_sent
) {
7622 DHD_PERIM_UNLOCK_ALL((bus
->dhd
->fwder_unit
% FWDER_MAX_UNIT
));
7624 #ifdef SUPPORT_LINKDOWN_RECOVERY
7625 if (bus
->read_shm_fail
) {
7626 /* Read interrupt state once again to confirm linkdown */
7627 int intstatus
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
7628 bus
->pcie_mailbox_int
, 0, 0);
7629 if (intstatus
!= (uint32
)-1) {
7630 DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__
));
7631 #ifdef DHD_FW_COREDUMP
7632 if (bus
->dhd
->memdump_enabled
) {
7633 DHD_OS_WAKE_LOCK(bus
->dhd
);
7634 bus
->dhd
->memdump_type
= DUMP_TYPE_READ_SHM_FAIL
;
7635 dhd_bus_mem_dump(bus
->dhd
);
7636 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
7638 #endif /* DHD_FW_COREDUMP */
7640 DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__
));
7641 #ifdef CONFIG_ARCH_MSM
7642 bus
->no_cfg_restore
= 1;
7643 #endif /* CONFIG_ARCH_MSM */
7644 bus
->is_linkdown
= 1;
7647 dhd_prot_debug_info_print(bus
->dhd
);
7648 bus
->dhd
->hang_reason
= HANG_REASON_PCIE_LINK_DOWN
;
7649 dhd_os_send_hang_message(bus
->dhd
);
7652 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7657 dhdpcie_tcm_valid(dhd_bus_t
*bus
)
7662 pciedev_shared_t sh
;
7664 shaddr
= bus
->dongle_ram_base
+ bus
->ramsize
- 4;
7666 /* Read last word in memory to determine address of pciedev_shared structure */
7667 addr
= LTOH32(dhdpcie_bus_rtcm32(bus
, shaddr
));
7669 if ((addr
== 0) || (addr
== bus
->nvram_csm
) || (addr
< bus
->dongle_ram_base
) ||
7671 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
7672 __FUNCTION__
, addr
));
7676 /* Read hndrte_shared structure */
7677 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
, (uint8
*)&sh
,
7678 sizeof(pciedev_shared_t
))) < 0) {
7679 DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv
));
7683 /* Compare any field in pciedev_shared_t */
7684 if (sh
.console_addr
!= bus
->pcie_sh
->console_addr
) {
7685 DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n"));
7693 dhdpcie_update_bus_api_revisions(uint32 firmware_api_version
, uint32 host_api_version
)
7695 snprintf(bus_api_revision
, BUS_API_REV_STR_LEN
, "\nBus API revisions:(FW rev%d)(DHD rev%d)",
7696 firmware_api_version
, host_api_version
);
7701 dhdpcie_check_firmware_compatible(uint32 firmware_api_version
, uint32 host_api_version
)
7703 bool retcode
= FALSE
;
7705 DHD_INFO(("firmware api revision %d, host api revision %d\n",
7706 firmware_api_version
, host_api_version
));
7708 switch (firmware_api_version
) {
7709 case PCIE_SHARED_VERSION_7
:
7710 case PCIE_SHARED_VERSION_6
:
7711 case PCIE_SHARED_VERSION_5
:
7715 if (firmware_api_version
<= host_api_version
)
7722 dhdpcie_readshared(dhd_bus_t
*bus
)
7725 int rv
, dma_indx_wr_buf
, dma_indx_rd_buf
;
7727 pciedev_shared_t
*sh
= bus
->pcie_sh
;
7729 bool idma_en
= FALSE
;
7731 if (MULTIBP_ENAB(bus
->sih
)) {
7732 dhd_bus_pcie_pwr_req(bus
);
7735 shaddr
= bus
->dongle_ram_base
+ bus
->ramsize
- 4;
7736 /* start a timer for 5 seconds */
7737 dhd_timeout_start(&tmo
, MAX_READ_TIMEOUT
);
7739 while (((addr
== 0) || (addr
== bus
->nvram_csm
)) && !dhd_timeout_expired(&tmo
)) {
7740 /* Read last word in memory to determine address of pciedev_shared structure */
7741 addr
= LTOH32(dhdpcie_bus_rtcm32(bus
, shaddr
));
7744 if (addr
== (uint32
)-1) {
7745 DHD_ERROR(("%s: PCIe link might be down\n", __FUNCTION__
));
7746 #ifdef SUPPORT_LINKDOWN_RECOVERY
7747 #ifdef CONFIG_ARCH_MSM
7748 bus
->no_cfg_restore
= 1;
7749 #endif /* CONFIG_ARCH_MSM */
7750 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7751 bus
->is_linkdown
= 1;
7755 if ((addr
== 0) || (addr
== bus
->nvram_csm
) || (addr
< bus
->dongle_ram_base
) ||
7757 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
7758 __FUNCTION__
, addr
));
7759 DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo
.elapsed
));
7760 #ifdef DEBUG_DNGL_INIT_FAIL
7761 #ifdef CUSTOMER_HW4_DEBUG
7762 bus
->dhd
->memdump_enabled
= DUMP_MEMFILE_BUGON
;
7763 #endif /* CUSTOMER_HW4_DEBUG */
7764 bus
->dhd
->memdump_type
= DUMP_TYPE_DONGLE_INIT_FAILURE
;
7765 dhdpcie_mem_dump(bus
);
7766 #endif /* DEBUG_DNGL_INIT_FAIL */
7769 bus
->shared_addr
= (ulong
)addr
;
7770 DHD_ERROR(("PCIe shared addr (0x%08x) read took %u usec "
7771 "before dongle is ready\n", addr
, tmo
.elapsed
));
7774 /* Read hndrte_shared structure */
7775 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
, (uint8
*)sh
,
7776 sizeof(pciedev_shared_t
))) < 0) {
7777 DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv
));
7782 sh
->flags
= ltoh32(sh
->flags
);
7783 sh
->trap_addr
= ltoh32(sh
->trap_addr
);
7784 sh
->assert_exp_addr
= ltoh32(sh
->assert_exp_addr
);
7785 sh
->assert_file_addr
= ltoh32(sh
->assert_file_addr
);
7786 sh
->assert_line
= ltoh32(sh
->assert_line
);
7787 sh
->console_addr
= ltoh32(sh
->console_addr
);
7788 sh
->msgtrace_addr
= ltoh32(sh
->msgtrace_addr
);
7789 sh
->dma_rxoffset
= ltoh32(sh
->dma_rxoffset
);
7790 sh
->rings_info_ptr
= ltoh32(sh
->rings_info_ptr
);
7791 sh
->flags2
= ltoh32(sh
->flags2
);
7793 /* load bus console address */
7794 bus
->console_addr
= sh
->console_addr
;
7796 /* Read the dma rx offset */
7797 bus
->dma_rxoffset
= bus
->pcie_sh
->dma_rxoffset
;
7798 dhd_prot_rx_dataoffset(bus
->dhd
, bus
->dma_rxoffset
);
7800 DHD_INFO(("DMA RX offset from shared Area %d\n", bus
->dma_rxoffset
));
7802 bus
->api
.fw_rev
= sh
->flags
& PCIE_SHARED_VERSION_MASK
;
7803 if (!(dhdpcie_check_firmware_compatible(bus
->api
.fw_rev
, PCIE_SHARED_VERSION
)))
7805 DHD_ERROR(("%s: pcie_shared version %d in dhd "
7806 "is older than pciedev_shared version %d in dongle\n",
7807 __FUNCTION__
, PCIE_SHARED_VERSION
,
7811 dhdpcie_update_bus_api_revisions(bus
->api
.fw_rev
, PCIE_SHARED_VERSION
);
7813 bus
->rw_index_sz
= (sh
->flags
& PCIE_SHARED_2BYTE_INDICES
) ?
7814 sizeof(uint16
) : sizeof(uint32
);
7815 DHD_INFO(("%s: Dongle advertizes %d size indices\n",
7816 __FUNCTION__
, bus
->rw_index_sz
));
7818 #ifdef IDLE_TX_FLOW_MGMT
7819 if (sh
->flags
& PCIE_SHARED_IDLE_FLOW_RING
) {
7820 DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n",
7822 bus
->enable_idle_flowring_mgmt
= TRUE
;
7824 #endif /* IDLE_TX_FLOW_MGMT */
7826 if (IDMA_CAPABLE(bus
)) {
7827 if (bus
->sih
->buscorerev
== 23) {
7834 bus
->dhd
->idma_enable
= (sh
->flags
& PCIE_SHARED_IDMA
) ? TRUE
: FALSE
;
7835 bus
->dhd
->ifrm_enable
= (sh
->flags
& PCIE_SHARED_IFRM
) ? TRUE
: FALSE
;
7838 bus
->dhd
->d2h_sync_mode
= sh
->flags
& PCIE_SHARED_D2H_SYNC_MODE_MASK
;
7840 bus
->dhd
->dar_enable
= (sh
->flags
& PCIE_SHARED_DAR
) ? TRUE
: FALSE
;
7842 /* Does the FW support DMA'ing r/w indices */
7843 if (sh
->flags
& PCIE_SHARED_DMA_INDEX
) {
7844 if (!bus
->dhd
->dma_ring_upd_overwrite
) {
7846 if (!IFRM_ENAB(bus
->dhd
)) {
7847 bus
->dhd
->dma_h2d_ring_upd_support
= TRUE
;
7849 bus
->dhd
->dma_d2h_ring_upd_support
= TRUE
;
7853 if (bus
->dhd
->dma_d2h_ring_upd_support
)
7854 bus
->dhd
->d2h_sync_mode
= 0;
7856 DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
7858 (bus
->dhd
->dma_h2d_ring_upd_support
? 1 : 0),
7859 (bus
->dhd
->dma_d2h_ring_upd_support
? 1 : 0)));
7860 } else if (!(sh
->flags
& PCIE_SHARED_D2H_SYNC_MODE_MASK
)) {
7861 DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n",
7863 return BCME_UNSUPPORTED
;
7865 bus
->dhd
->dma_h2d_ring_upd_support
= FALSE
;
7866 bus
->dhd
->dma_d2h_ring_upd_support
= FALSE
;
7869 /* Does the firmware support fast delete ring? */
7870 if (sh
->flags2
& PCIE_SHARED2_FAST_DELETE_RING
) {
7871 DHD_INFO(("%s: Firmware supports fast delete ring\n",
7873 bus
->dhd
->fast_delete_ring_support
= TRUE
;
7875 DHD_INFO(("%s: Firmware does not support fast delete ring\n",
7877 bus
->dhd
->fast_delete_ring_support
= FALSE
;
7880 /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
7882 ring_info_t ring_info
;
7884 /* boundary check */
7885 if ((sh
->rings_info_ptr
< bus
->dongle_ram_base
) || (sh
->rings_info_ptr
> shaddr
)) {
7886 DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n",
7887 __FUNCTION__
, sh
->rings_info_ptr
));
7891 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, sh
->rings_info_ptr
,
7892 (uint8
*)&ring_info
, sizeof(ring_info_t
))) < 0)
7895 bus
->h2d_mb_data_ptr_addr
= ltoh32(sh
->h2d_mb_data_ptr
);
7896 bus
->d2h_mb_data_ptr_addr
= ltoh32(sh
->d2h_mb_data_ptr
);
7898 if (bus
->api
.fw_rev
>= PCIE_SHARED_VERSION_6
) {
7899 bus
->max_tx_flowrings
= ltoh16(ring_info
.max_tx_flowrings
);
7900 bus
->max_submission_rings
= ltoh16(ring_info
.max_submission_queues
);
7901 bus
->max_completion_rings
= ltoh16(ring_info
.max_completion_rings
);
7902 bus
->max_cmn_rings
= bus
->max_submission_rings
- bus
->max_tx_flowrings
;
7903 bus
->api
.handle_mb_data
= dhdpcie_read_handle_mb_data
;
7904 bus
->use_mailbox
= sh
->flags
& PCIE_SHARED_USE_MAILBOX
;
7907 bus
->max_tx_flowrings
= ltoh16(ring_info
.max_tx_flowrings
);
7908 bus
->max_submission_rings
= bus
->max_tx_flowrings
;
7909 bus
->max_completion_rings
= BCMPCIE_D2H_COMMON_MSGRINGS
;
7910 bus
->max_cmn_rings
= BCMPCIE_H2D_COMMON_MSGRINGS
;
7911 bus
->api
.handle_mb_data
= dhdpcie_handle_mb_data
;
7912 bus
->use_mailbox
= TRUE
;
7914 if (bus
->max_completion_rings
== 0) {
7915 DHD_ERROR(("dongle completion rings are invalid %d\n",
7916 bus
->max_completion_rings
));
7919 if (bus
->max_submission_rings
== 0) {
7920 DHD_ERROR(("dongle submission rings are invalid %d\n",
7921 bus
->max_submission_rings
));
7924 if (bus
->max_tx_flowrings
== 0) {
7925 DHD_ERROR(("dongle txflow rings are invalid %d\n", bus
->max_tx_flowrings
));
7929 /* If both FW and Host support DMA'ing indices, allocate memory and notify FW
7930 * The max_sub_queues is read from FW initialized ring_info
7932 if (bus
->dhd
->dma_h2d_ring_upd_support
|| IDMA_ENAB(bus
->dhd
)) {
7933 dma_indx_wr_buf
= dhd_prot_dma_indx_init(bus
->dhd
, bus
->rw_index_sz
,
7934 H2D_DMA_INDX_WR_BUF
, bus
->max_submission_rings
);
7935 dma_indx_rd_buf
= dhd_prot_dma_indx_init(bus
->dhd
, bus
->rw_index_sz
,
7936 D2H_DMA_INDX_RD_BUF
, bus
->max_completion_rings
);
7938 if ((dma_indx_wr_buf
!= BCME_OK
) || (dma_indx_rd_buf
!= BCME_OK
)) {
7939 DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
7940 "Host will use w/r indices in TCM\n",
7942 bus
->dhd
->dma_h2d_ring_upd_support
= FALSE
;
7943 bus
->dhd
->idma_enable
= FALSE
;
7947 if (bus
->dhd
->dma_d2h_ring_upd_support
) {
7948 dma_indx_wr_buf
= dhd_prot_dma_indx_init(bus
->dhd
, bus
->rw_index_sz
,
7949 D2H_DMA_INDX_WR_BUF
, bus
->max_completion_rings
);
7950 dma_indx_rd_buf
= dhd_prot_dma_indx_init(bus
->dhd
, bus
->rw_index_sz
,
7951 H2D_DMA_INDX_RD_BUF
, bus
->max_submission_rings
);
7953 if ((dma_indx_wr_buf
!= BCME_OK
) || (dma_indx_rd_buf
!= BCME_OK
)) {
7954 DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
7955 "Host will use w/r indices in TCM\n",
7957 bus
->dhd
->dma_d2h_ring_upd_support
= FALSE
;
7961 if (IFRM_ENAB(bus
->dhd
)) {
7962 dma_indx_wr_buf
= dhd_prot_dma_indx_init(bus
->dhd
, bus
->rw_index_sz
,
7963 H2D_IFRM_INDX_WR_BUF
, bus
->max_tx_flowrings
);
7965 if (dma_indx_wr_buf
!= BCME_OK
) {
7966 DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n",
7968 bus
->dhd
->ifrm_enable
= FALSE
;
7972 /* read ringmem and ringstate ptrs from shared area and store in host variables */
7973 dhd_fillup_ring_sharedptr_info(bus
, &ring_info
);
7974 if (dhd_msg_level
& DHD_INFO_VAL
) {
7975 bcm_print_bytes("ring_info_raw", (uchar
*)&ring_info
, sizeof(ring_info_t
));
7977 DHD_INFO(("ring_info\n"));
7979 DHD_ERROR(("%s: max H2D queues %d\n",
7980 __FUNCTION__
, ltoh16(ring_info
.max_tx_flowrings
)));
7982 DHD_INFO(("mail box address\n"));
7983 DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
7984 __FUNCTION__
, bus
->h2d_mb_data_ptr_addr
));
7985 DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n",
7986 __FUNCTION__
, bus
->d2h_mb_data_ptr_addr
));
7989 DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
7990 __FUNCTION__
, bus
->dhd
->d2h_sync_mode
));
7992 bus
->dhd
->d2h_hostrdy_supported
=
7993 ((sh
->flags
& PCIE_SHARED_HOSTRDY_SUPPORT
) == PCIE_SHARED_HOSTRDY_SUPPORT
);
7995 bus
->dhd
->ext_trap_data_supported
=
7996 ((sh
->flags2
& PCIE_SHARED2_EXTENDED_TRAP_DATA
) == PCIE_SHARED2_EXTENDED_TRAP_DATA
);
7998 if ((sh
->flags2
& PCIE_SHARED2_TXSTATUS_METADATA
) == 0)
7999 bus
->dhd
->pcie_txs_metadata_enable
= 0;
8002 bus
->d2h_minidump
= (sh
->flags2
& PCIE_SHARED2_FW_SMALL_MEMDUMP
) ? TRUE
: FALSE
;
8003 DHD_ERROR(("FW supports minidump ? %s \n", bus
->d2h_minidump
? "Y" : "N"));
8004 if (bus
->d2h_minidump_override
) {
8005 bus
->d2h_minidump
= FALSE
;
8007 DHD_ERROR(("d2h_minidump: %d d2h_minidump_override: %d\n",
8008 bus
->d2h_minidump
, bus
->d2h_minidump_override
));
8009 #endif /* D2H_MINIDUMP */
8011 if (MULTIBP_ENAB(bus
->sih
)) {
8012 dhd_bus_pcie_pwr_req_clear(bus
);
8015 } /* dhdpcie_readshared */
8017 /** Read ring mem and ring state ptr info from shared memory area in device memory */
8019 dhd_fillup_ring_sharedptr_info(dhd_bus_t
*bus
, ring_info_t
*ring_info
)
8024 uint32 d2h_w_idx_ptr
, d2h_r_idx_ptr
, h2d_w_idx_ptr
, h2d_r_idx_ptr
;
8025 uint16 max_tx_flowrings
= bus
->max_tx_flowrings
;
8027 /* Ring mem ptr info */
8028 /* Alloated in the order
8029 H2D_MSGRING_CONTROL_SUBMIT 0
8030 H2D_MSGRING_RXPOST_SUBMIT 1
8031 D2H_MSGRING_CONTROL_COMPLETE 2
8032 D2H_MSGRING_TX_COMPLETE 3
8033 D2H_MSGRING_RX_COMPLETE 4
8037 /* ringmemptr holds start of the mem block address space */
8038 tcm_memloc
= ltoh32(ring_info
->ringmem_ptr
);
8040 /* Find out ringmem ptr for each ring common ring */
8041 for (i
= 0; i
<= BCMPCIE_COMMON_MSGRING_MAX_ID
; i
++) {
8042 bus
->ring_sh
[i
].ring_mem_addr
= tcm_memloc
;
8043 /* Update mem block */
8044 tcm_memloc
= tcm_memloc
+ sizeof(ring_mem_t
);
8045 DHD_INFO(("ring id %d ring mem addr 0x%04x \n",
8046 i
, bus
->ring_sh
[i
].ring_mem_addr
));
8050 /* Ring state mem ptr info */
8052 d2h_w_idx_ptr
= ltoh32(ring_info
->d2h_w_idx_ptr
);
8053 d2h_r_idx_ptr
= ltoh32(ring_info
->d2h_r_idx_ptr
);
8054 h2d_w_idx_ptr
= ltoh32(ring_info
->h2d_w_idx_ptr
);
8055 h2d_r_idx_ptr
= ltoh32(ring_info
->h2d_r_idx_ptr
);
8057 /* Store h2d common ring write/read pointers */
8058 for (i
= 0; i
< BCMPCIE_H2D_COMMON_MSGRINGS
; i
++) {
8059 bus
->ring_sh
[i
].ring_state_w
= h2d_w_idx_ptr
;
8060 bus
->ring_sh
[i
].ring_state_r
= h2d_r_idx_ptr
;
8062 /* update mem block */
8063 h2d_w_idx_ptr
= h2d_w_idx_ptr
+ bus
->rw_index_sz
;
8064 h2d_r_idx_ptr
= h2d_r_idx_ptr
+ bus
->rw_index_sz
;
8066 DHD_INFO(("h2d w/r : idx %d write %x read %x \n", i
,
8067 bus
->ring_sh
[i
].ring_state_w
, bus
->ring_sh
[i
].ring_state_r
));
8070 /* Store d2h common ring write/read pointers */
8071 for (j
= 0; j
< BCMPCIE_D2H_COMMON_MSGRINGS
; j
++, i
++) {
8072 bus
->ring_sh
[i
].ring_state_w
= d2h_w_idx_ptr
;
8073 bus
->ring_sh
[i
].ring_state_r
= d2h_r_idx_ptr
;
8075 /* update mem block */
8076 d2h_w_idx_ptr
= d2h_w_idx_ptr
+ bus
->rw_index_sz
;
8077 d2h_r_idx_ptr
= d2h_r_idx_ptr
+ bus
->rw_index_sz
;
8079 DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i
,
8080 bus
->ring_sh
[i
].ring_state_w
, bus
->ring_sh
[i
].ring_state_r
));
8083 /* Store txflow ring write/read pointers */
8084 if (bus
->api
.fw_rev
< PCIE_SHARED_VERSION_6
) {
8085 max_tx_flowrings
-= BCMPCIE_H2D_COMMON_MSGRINGS
;
8087 /* Account for Debug info h2d ring located after the last tx flow ring */
8088 max_tx_flowrings
= max_tx_flowrings
+ 1;
8090 for (j
= 0; j
< max_tx_flowrings
; i
++, j
++)
8092 bus
->ring_sh
[i
].ring_state_w
= h2d_w_idx_ptr
;
8093 bus
->ring_sh
[i
].ring_state_r
= h2d_r_idx_ptr
;
8095 /* update mem block */
8096 h2d_w_idx_ptr
= h2d_w_idx_ptr
+ bus
->rw_index_sz
;
8097 h2d_r_idx_ptr
= h2d_r_idx_ptr
+ bus
->rw_index_sz
;
8099 DHD_INFO(("FLOW Rings h2d w/r : idx %d write %x read %x \n", i
,
8100 bus
->ring_sh
[i
].ring_state_w
,
8101 bus
->ring_sh
[i
].ring_state_r
));
8103 /* store wr/rd pointers for debug info completion ring */
8104 bus
->ring_sh
[i
].ring_state_w
= d2h_w_idx_ptr
;
8105 bus
->ring_sh
[i
].ring_state_r
= d2h_r_idx_ptr
;
8106 d2h_w_idx_ptr
= d2h_w_idx_ptr
+ bus
->rw_index_sz
;
8107 d2h_r_idx_ptr
= d2h_r_idx_ptr
+ bus
->rw_index_sz
;
8108 DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i
,
8109 bus
->ring_sh
[i
].ring_state_w
, bus
->ring_sh
[i
].ring_state_r
));
8111 } /* dhd_fillup_ring_sharedptr_info */
8114 * Initialize bus module: prepare for communication with the dongle. Called after downloading
8115 * firmware into the dongle.
8117 int dhd_bus_init(dhd_pub_t
*dhdp
, bool enforce_mutex
)
8119 dhd_bus_t
*bus
= dhdp
->bus
;
8122 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
8128 if (bus
->sih
->buscorerev
== 66) {
8129 dhd_bus_pcie_pwr_req_clear_reload_war(bus
);
8132 if (MULTIBP_ENAB(bus
->sih
)) {
8133 dhd_bus_pcie_pwr_req(bus
);
8136 /* Configure AER registers to log the TLP header */
8137 dhd_bus_aer_config(bus
);
8139 /* Make sure we're talking to the core. */
8140 bus
->reg
= si_setcore(bus
->sih
, PCIE2_CORE_ID
, 0);
8141 ASSERT(bus
->reg
!= NULL
);
8143 /* before opening up bus for data transfer, check if shared are is intact */
8144 ret
= dhdpcie_readshared(bus
);
8146 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__
));
8150 /* Make sure we're talking to the core. */
8151 bus
->reg
= si_setcore(bus
->sih
, PCIE2_CORE_ID
, 0);
8152 ASSERT(bus
->reg
!= NULL
);
8154 dhd_init_bus_lock(bus
);
8156 /* Set bus state according to enable result */
8157 dhdp
->busstate
= DHD_BUS_DATA
;
8158 bus
->bus_low_power_state
= DHD_BUS_NO_LOW_POWER_STATE
;
8159 dhdp
->dhd_bus_busy_state
= 0;
8161 /* D11 status via PCIe completion header */
8162 if ((ret
= dhdpcie_init_d11status(bus
)) < 0) {
8166 if (!dhd_download_fw_on_driverload
)
8167 dhd_dpc_enable(bus
->dhd
);
8168 /* Enable the interrupt after device is up */
8169 dhdpcie_bus_intr_enable(bus
);
8171 bus
->intr_enabled
= TRUE
;
8173 /* bcmsdh_intr_unmask(bus->sdh); */
8174 #ifdef DHD_PCIE_RUNTIMEPM
8176 bus
->idletime
= (int32
)MAX_IDLE_COUNT
;
8177 init_waitqueue_head(&bus
->rpm_queue
);
8178 mutex_init(&bus
->pm_lock
);
8181 #endif /* DHD_PCIE_RUNTIMEPM */
8183 /* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
8184 if (bus
->api
.fw_rev
< PCIE_SHARED_VERSION_6
) {
8185 bus
->use_d0_inform
= TRUE
;
8187 bus
->use_d0_inform
= FALSE
;
8191 if (MULTIBP_ENAB(bus
->sih
)) {
8192 dhd_bus_pcie_pwr_req_clear(bus
);
8198 dhdpcie_init_shared_addr(dhd_bus_t
*bus
)
8202 addr
= bus
->dongle_ram_base
+ bus
->ramsize
- 4;
8203 #ifdef DHD_PCIE_RUNTIMEPM
8204 dhdpcie_runtime_bus_wake(bus
->dhd
, TRUE
, __builtin_return_address(0));
8205 #endif /* DHD_PCIE_RUNTIMEPM */
8206 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*)&val
, sizeof(val
));
8210 dhdpcie_chipmatch(uint16 vendor
, uint16 device
)
8212 if (vendor
!= PCI_VENDOR_ID_BROADCOM
) {
8213 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__
,
8218 if ((device
== BCM4350_D11AC_ID
) || (device
== BCM4350_D11AC2G_ID
) ||
8219 (device
== BCM4350_D11AC5G_ID
) || (device
== BCM4350_CHIP_ID
) ||
8220 (device
== BCM43569_CHIP_ID
)) {
8224 if ((device
== BCM4354_D11AC_ID
) || (device
== BCM4354_D11AC2G_ID
) ||
8225 (device
== BCM4354_D11AC5G_ID
) || (device
== BCM4354_CHIP_ID
)) {
8229 if ((device
== BCM4356_D11AC_ID
) || (device
== BCM4356_D11AC2G_ID
) ||
8230 (device
== BCM4356_D11AC5G_ID
) || (device
== BCM4356_CHIP_ID
)) {
8234 if ((device
== BCM4371_D11AC_ID
) || (device
== BCM4371_D11AC2G_ID
) ||
8235 (device
== BCM4371_D11AC5G_ID
) || (device
== BCM4371_CHIP_ID
)) {
8239 if ((device
== BCM4345_D11AC_ID
) || (device
== BCM4345_D11AC2G_ID
) ||
8240 (device
== BCM4345_D11AC5G_ID
) || BCM4345_CHIP(device
)) {
8244 if ((device
== BCM43452_D11AC_ID
) || (device
== BCM43452_D11AC2G_ID
) ||
8245 (device
== BCM43452_D11AC5G_ID
)) {
8249 if ((device
== BCM4335_D11AC_ID
) || (device
== BCM4335_D11AC2G_ID
) ||
8250 (device
== BCM4335_D11AC5G_ID
) || (device
== BCM4335_CHIP_ID
)) {
8254 if ((device
== BCM43602_D11AC_ID
) || (device
== BCM43602_D11AC2G_ID
) ||
8255 (device
== BCM43602_D11AC5G_ID
) || (device
== BCM43602_CHIP_ID
)) {
8259 if ((device
== BCM43569_D11AC_ID
) || (device
== BCM43569_D11AC2G_ID
) ||
8260 (device
== BCM43569_D11AC5G_ID
) || (device
== BCM43569_CHIP_ID
)) {
8264 if ((device
== BCM4358_D11AC_ID
) || (device
== BCM4358_D11AC2G_ID
) ||
8265 (device
== BCM4358_D11AC5G_ID
)) {
8269 if ((device
== BCM4349_D11AC_ID
) || (device
== BCM4349_D11AC2G_ID
) ||
8270 (device
== BCM4349_D11AC5G_ID
) || (device
== BCM4349_CHIP_ID
)) {
8274 if ((device
== BCM4355_D11AC_ID
) || (device
== BCM4355_D11AC2G_ID
) ||
8275 (device
== BCM4355_D11AC5G_ID
) || (device
== BCM4355_CHIP_ID
)) {
8279 if ((device
== BCM4359_D11AC_ID
) || (device
== BCM4359_D11AC2G_ID
) ||
8280 (device
== BCM4359_D11AC5G_ID
)) {
8284 if ((device
== BCM43596_D11AC_ID
) || (device
== BCM43596_D11AC2G_ID
) ||
8285 (device
== BCM43596_D11AC5G_ID
)) {
8289 if ((device
== BCM43597_D11AC_ID
) || (device
== BCM43597_D11AC2G_ID
) ||
8290 (device
== BCM43597_D11AC5G_ID
)) {
8294 if ((device
== BCM4364_D11AC_ID
) || (device
== BCM4364_D11AC2G_ID
) ||
8295 (device
== BCM4364_D11AC5G_ID
) || (device
== BCM4364_CHIP_ID
)) {
8299 if ((device
== BCM4361_D11AC_ID
) || (device
== BCM4361_D11AC2G_ID
) ||
8300 (device
== BCM4361_D11AC5G_ID
) || (device
== BCM4361_CHIP_ID
)) {
8303 if ((device
== BCM4347_D11AC_ID
) || (device
== BCM4347_D11AC2G_ID
) ||
8304 (device
== BCM4347_D11AC5G_ID
) || (device
== BCM4347_CHIP_ID
)) {
8308 if ((device
== BCM4365_D11AC_ID
) || (device
== BCM4365_D11AC2G_ID
) ||
8309 (device
== BCM4365_D11AC5G_ID
) || (device
== BCM4365_CHIP_ID
)) {
8313 if ((device
== BCM4366_D11AC_ID
) || (device
== BCM4366_D11AC2G_ID
) ||
8314 (device
== BCM4366_D11AC5G_ID
) || (device
== BCM4366_CHIP_ID
) ||
8315 (device
== BCM43664_CHIP_ID
) || (device
== BCM43666_CHIP_ID
)) {
8319 if ((device
== BCM4369_D11AX_ID
) || (device
== BCM4369_D11AX2G_ID
) ||
8320 (device
== BCM4369_D11AX5G_ID
) || (device
== BCM4369_CHIP_ID
)) {
8324 if ((device
== BCM4375_D11AX_ID
) || (device
== BCM4375_D11AX2G_ID
) ||
8325 (device
== BCM4375_D11AX5G_ID
) || (device
== BCM4375_CHIP_ID
)) {
8329 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__
, vendor
, device
));
8331 } /* dhdpcie_chipmatch */
8334 * Name: dhdpcie_cc_nvmshadow
8337 * A shadow of OTP/SPROM exists in ChipCommon Region
8338 * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
8339 * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
8340 * can also be read from ChipCommon Registers.
8343 dhdpcie_cc_nvmshadow(dhd_bus_t
*bus
, struct bcmstrbuf
*b
)
8345 uint16 dump_offset
= 0;
8346 uint32 dump_size
= 0, otp_size
= 0, sprom_size
= 0;
8348 /* Table for 65nm OTP Size (in bits) */
8349 int otp_size_65nm
[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
8351 volatile uint16
*nvm_shadow
;
8355 chipcregs_t
*chipcregs
;
8357 /* Save the current core */
8358 cur_coreid
= si_coreid(bus
->sih
);
8359 /* Switch to ChipC */
8360 chipcregs
= (chipcregs_t
*)si_setcore(bus
->sih
, CC_CORE_ID
, 0);
8361 ASSERT(chipcregs
!= NULL
);
8363 chipc_corerev
= si_corerev(bus
->sih
);
8365 /* Check ChipcommonCore Rev */
8366 if (chipc_corerev
< 44) {
8367 DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__
, chipc_corerev
));
8368 return BCME_UNSUPPORTED
;
8372 if (((uint16
)bus
->sih
->chip
!= BCM4350_CHIP_ID
) && !BCM4345_CHIP((uint16
)bus
->sih
->chip
) &&
8373 ((uint16
)bus
->sih
->chip
!= BCM4355_CHIP_ID
) &&
8374 ((uint16
)bus
->sih
->chip
!= BCM4364_CHIP_ID
)) {
8375 DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips"
8376 "4350/4345/4355/4364 only\n", __FUNCTION__
));
8377 return BCME_UNSUPPORTED
;
8380 /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
8381 if (chipcregs
->sromcontrol
& SRC_PRESENT
) {
8382 /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
8383 sprom_size
= (1 << (2 * ((chipcregs
->sromcontrol
& SRC_SIZE_MASK
)
8384 >> SRC_SIZE_SHIFT
))) * 1024;
8385 bcm_bprintf(b
, "\nSPROM Present (Size %d bits)\n", sprom_size
);
8388 if (chipcregs
->sromcontrol
& SRC_OTPPRESENT
) {
8389 bcm_bprintf(b
, "\nOTP Present");
8391 if (((chipcregs
->otplayout
& OTPL_WRAP_TYPE_MASK
) >> OTPL_WRAP_TYPE_SHIFT
)
8392 == OTPL_WRAP_TYPE_40NM
) {
8393 /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
8394 /* Chipcommon rev51 is a variation on rev45 and does not support
8395 * the latest OTP configuration.
8397 if (chipc_corerev
!= 51 && chipc_corerev
>= 49) {
8398 otp_size
= (((chipcregs
->otplayout
& OTPL_ROW_SIZE_MASK
)
8399 >> OTPL_ROW_SIZE_SHIFT
) + 1) * 1024;
8400 bcm_bprintf(b
, "(Size %d bits)\n", otp_size
);
8402 otp_size
= (((chipcregs
->capabilities
& CC_CAP_OTPSIZE
)
8403 >> CC_CAP_OTPSIZE_SHIFT
) + 1) * 1024;
8404 bcm_bprintf(b
, "(Size %d bits)\n", otp_size
);
8407 /* This part is untested since newer chips have 40nm OTP */
8408 /* Chipcommon rev51 is a variation on rev45 and does not support
8409 * the latest OTP configuration.
8411 if (chipc_corerev
!= 51 && chipc_corerev
>= 49) {
8412 otp_size
= otp_size_65nm
[(chipcregs
->otplayout
& OTPL_ROW_SIZE_MASK
)
8413 >> OTPL_ROW_SIZE_SHIFT
];
8414 bcm_bprintf(b
, "(Size %d bits)\n", otp_size
);
8416 otp_size
= otp_size_65nm
[(chipcregs
->capabilities
& CC_CAP_OTPSIZE
)
8417 >> CC_CAP_OTPSIZE_SHIFT
];
8418 bcm_bprintf(b
, "(Size %d bits)\n", otp_size
);
8419 DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
8425 /* Chipcommon rev51 is a variation on rev45 and does not support
8426 * the latest OTP configuration.
8428 if (chipc_corerev
!= 51 && chipc_corerev
>= 49) {
8429 if (((chipcregs
->sromcontrol
& SRC_PRESENT
) == 0) &&
8430 ((chipcregs
->otplayout
& OTPL_ROW_SIZE_MASK
) == 0)) {
8431 DHD_ERROR(("%s: SPROM and OTP could not be found "
8432 "sromcontrol = %x, otplayout = %x \n",
8433 __FUNCTION__
, chipcregs
->sromcontrol
, chipcregs
->otplayout
));
8434 return BCME_NOTFOUND
;
8437 if (((chipcregs
->sromcontrol
& SRC_PRESENT
) == 0) &&
8438 ((chipcregs
->capabilities
& CC_CAP_OTPSIZE
) == 0)) {
8439 DHD_ERROR(("%s: SPROM and OTP could not be found "
8440 "sromcontrol = %x, capablities = %x \n",
8441 __FUNCTION__
, chipcregs
->sromcontrol
, chipcregs
->capabilities
));
8442 return BCME_NOTFOUND
;
8446 /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
8447 if ((!(chipcregs
->sromcontrol
& SRC_PRESENT
) || (chipcregs
->sromcontrol
& SRC_OTPSEL
)) &&
8448 (chipcregs
->sromcontrol
& SRC_OTPPRESENT
)) {
8450 bcm_bprintf(b
, "OTP Strap selected.\n"
8451 "\nOTP Shadow in ChipCommon:\n");
8453 dump_size
= otp_size
/ 16 ; /* 16bit words */
8455 } else if (((chipcregs
->sromcontrol
& SRC_OTPSEL
) == 0) &&
8456 (chipcregs
->sromcontrol
& SRC_PRESENT
)) {
8458 bcm_bprintf(b
, "SPROM Strap selected\n"
8459 "\nSPROM Shadow in ChipCommon:\n");
8461 /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
8462 /* dump_size in 16bit words */
8463 dump_size
= sprom_size
> 8 ? (8 * 1024) / 16 : sprom_size
/ 16;
8465 DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
8467 return BCME_NOTFOUND
;
8470 if (bus
->regs
== NULL
) {
8471 DHD_ERROR(("ChipCommon Regs. not initialized\n"));
8472 return BCME_NOTREADY
;
8474 bcm_bprintf(b
, "\n OffSet:");
8476 /* Chipcommon rev51 is a variation on rev45 and does not support
8477 * the latest OTP configuration.
8479 if (chipc_corerev
!= 51 && chipc_corerev
>= 49) {
8480 /* Chip common can read only 8kbits,
8481 * for ccrev >= 49 otp size is around 12 kbits so use GCI core
8483 nvm_shadow
= (volatile uint16
*)si_setcore(bus
->sih
, GCI_CORE_ID
, 0);
8485 /* Point to the SPROM/OTP shadow in ChipCommon */
8486 nvm_shadow
= chipcregs
->sromotp
;
8489 if (nvm_shadow
== NULL
) {
8490 DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__
));
8491 return BCME_NOTFOUND
;
8495 * Read 16 bits / iteration.
8496 * dump_size & dump_offset in 16-bit words
8498 while (dump_offset
< dump_size
) {
8499 if (dump_offset
% 2 == 0)
8500 /* Print the offset in the shadow space in Bytes */
8501 bcm_bprintf(b
, "\n 0x%04x", dump_offset
* 2);
8503 bcm_bprintf(b
, "\t0x%04x", *(nvm_shadow
+ dump_offset
));
8508 /* Switch back to the original core */
8509 si_setcore(bus
->sih
, cur_coreid
, 0);
8512 } /* dhdpcie_cc_nvmshadow */
8514 /** Flow rings are dynamically created and destroyed */
8515 void dhd_bus_clean_flow_ring(dhd_bus_t
*bus
, void *node
)
8518 flow_queue_t
*queue
;
8519 flow_ring_node_t
*flow_ring_node
= (flow_ring_node_t
*)node
;
8520 unsigned long flags
;
8522 queue
= &flow_ring_node
->queue
;
8524 #ifdef DHDTCPACK_SUPPRESS
8525 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
8526 * when there is a newly coming packet from network stack.
8528 dhd_tcpack_info_tbl_clean(bus
->dhd
);
8529 #endif /* DHDTCPACK_SUPPRESS */
8531 /* clean up BUS level info */
8532 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
8534 /* Flush all pending packets in the queue, if any */
8535 while ((pkt
= dhd_flow_queue_dequeue(bus
->dhd
, queue
)) != NULL
) {
8536 PKTFREE(bus
->dhd
->osh
, pkt
, TRUE
);
8538 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue
));
8540 /* Reinitialise flowring's queue */
8541 dhd_flow_queue_reinit(bus
->dhd
, queue
, FLOW_RING_QUEUE_THRESHOLD
);
8542 flow_ring_node
->status
= FLOW_RING_STATUS_CLOSED
;
8543 flow_ring_node
->active
= FALSE
;
8545 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
8547 /* Hold flowring_list_lock to ensure no race condition while accessing the List */
8548 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
8549 dll_delete(&flow_ring_node
->list
);
8550 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
8552 /* Release the flowring object back into the pool */
8553 dhd_prot_flowrings_pool_release(bus
->dhd
,
8554 flow_ring_node
->flowid
, flow_ring_node
->prot_info
);
8556 /* Free the flowid back to the flowid allocator */
8557 dhd_flowid_free(bus
->dhd
, flow_ring_node
->flow_info
.ifindex
,
8558 flow_ring_node
->flowid
);
8562 * Allocate a Flow ring buffer,
8563 * Init Ring buffer, send Msg to device about flow ring creation
8566 dhd_bus_flow_ring_create_request(dhd_bus_t
*bus
, void *arg
)
8568 flow_ring_node_t
*flow_ring_node
= (flow_ring_node_t
*)arg
;
8570 DHD_INFO(("%s :Flow create\n", __FUNCTION__
));
8572 /* Send Msg to device about flow ring creation */
8573 if (dhd_prot_flow_ring_create(bus
->dhd
, flow_ring_node
) != BCME_OK
)
8579 /** Handle response from dongle on a 'flow ring create' request */
8581 dhd_bus_flow_ring_create_response(dhd_bus_t
*bus
, uint16 flowid
, int32 status
)
8583 flow_ring_node_t
*flow_ring_node
;
8584 unsigned long flags
;
8586 DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__
, flowid
));
8588 /* Boundary check of the flowid */
8589 if (flowid
>= bus
->dhd
->num_flow_rings
) {
8590 DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__
,
8591 flowid
, bus
->dhd
->num_flow_rings
));
8595 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flowid
);
8596 if (!flow_ring_node
) {
8597 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__
));
8601 ASSERT(flow_ring_node
->flowid
== flowid
);
8602 if (flow_ring_node
->flowid
!= flowid
) {
8603 DHD_ERROR(("%s: flowid %d is different from the flowid "
8604 "of the flow_ring_node %d\n", __FUNCTION__
, flowid
,
8605 flow_ring_node
->flowid
));
8609 if (status
!= BCME_OK
) {
8610 DHD_ERROR(("%s Flow create Response failure error status = %d \n",
8611 __FUNCTION__
, status
));
8612 /* Call Flow clean up */
8613 dhd_bus_clean_flow_ring(bus
, flow_ring_node
);
8617 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
8618 flow_ring_node
->status
= FLOW_RING_STATUS_OPEN
;
8619 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
8621 /* Now add the Flow ring node into the active list
8622 * Note that this code to add the newly created node to the active
8623 * list was living in dhd_flowid_lookup. But note that after
8624 * adding the node to the active list the contents of node is being
8625 * filled in dhd_prot_flow_ring_create.
8626 * If there is a D2H interrupt after the node gets added to the
8627 * active list and before the node gets populated with values
8628 * from the Bottom half dhd_update_txflowrings would be called.
8629 * which will then try to walk through the active flow ring list,
8630 * pickup the nodes and operate on them. Now note that since
8631 * the function dhd_prot_flow_ring_create is not finished yet
8632 * the contents of flow_ring_node can still be NULL leading to
8633 * crashes. Hence the flow_ring_node should be added to the
8634 * active list only after its truely created, which is after
8635 * receiving the create response message from the Host.
8637 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
8638 dll_prepend(&bus
->flowring_active_list
, &flow_ring_node
->list
);
8639 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
8641 dhd_bus_schedule_queue(bus
, flowid
, FALSE
); /* from queue to flowring */
8647 dhd_bus_flow_ring_delete_request(dhd_bus_t
*bus
, void *arg
)
8650 flow_queue_t
*queue
;
8651 flow_ring_node_t
*flow_ring_node
;
8652 unsigned long flags
;
8654 DHD_INFO(("%s :Flow Delete\n", __FUNCTION__
));
8656 flow_ring_node
= (flow_ring_node_t
*)arg
;
8658 #ifdef DHDTCPACK_SUPPRESS
8659 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
8660 * when there is a newly coming packet from network stack.
8662 dhd_tcpack_info_tbl_clean(bus
->dhd
);
8663 #endif /* DHDTCPACK_SUPPRESS */
8664 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
8665 if (flow_ring_node
->status
== FLOW_RING_STATUS_DELETE_PENDING
) {
8666 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
8667 DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__
, flow_ring_node
->flowid
));
8670 flow_ring_node
->status
= FLOW_RING_STATUS_DELETE_PENDING
;
8672 queue
= &flow_ring_node
->queue
; /* queue associated with flow ring */
8674 /* Flush all pending packets in the queue, if any */
8675 while ((pkt
= dhd_flow_queue_dequeue(bus
->dhd
, queue
)) != NULL
) {
8676 PKTFREE(bus
->dhd
->osh
, pkt
, TRUE
);
8678 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue
));
8680 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
8682 /* Send Msg to device about flow ring deletion */
8683 dhd_prot_flow_ring_delete(bus
->dhd
, flow_ring_node
);
8689 dhd_bus_flow_ring_delete_response(dhd_bus_t
*bus
, uint16 flowid
, uint32 status
)
8691 flow_ring_node_t
*flow_ring_node
;
8693 DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__
, flowid
));
8695 /* Boundary check of the flowid */
8696 if (flowid
>= bus
->dhd
->num_flow_rings
) {
8697 DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__
,
8698 flowid
, bus
->dhd
->num_flow_rings
));
8702 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flowid
);
8703 if (!flow_ring_node
) {
8704 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__
));
8708 ASSERT(flow_ring_node
->flowid
== flowid
);
8709 if (flow_ring_node
->flowid
!= flowid
) {
8710 DHD_ERROR(("%s: flowid %d is different from the flowid "
8711 "of the flow_ring_node %d\n", __FUNCTION__
, flowid
,
8712 flow_ring_node
->flowid
));
8716 if (status
!= BCME_OK
) {
8717 DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
8718 __FUNCTION__
, status
));
8722 if (flow_ring_node
->status
!= FLOW_RING_STATUS_DELETE_PENDING
) {
8723 DHD_ERROR(("%s: invalid state flowid = %d, status = %d\n",
8724 __FUNCTION__
, flowid
, flow_ring_node
->status
));
8728 /* Call Flow clean up */
8729 dhd_bus_clean_flow_ring(bus
, flow_ring_node
);
8735 int dhd_bus_flow_ring_flush_request(dhd_bus_t
*bus
, void *arg
)
8738 flow_queue_t
*queue
;
8739 flow_ring_node_t
*flow_ring_node
;
8740 unsigned long flags
;
8742 DHD_INFO(("%s :Flow Flush\n", __FUNCTION__
));
8744 flow_ring_node
= (flow_ring_node_t
*)arg
;
8746 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
8747 queue
= &flow_ring_node
->queue
; /* queue associated with flow ring */
8748 /* Flow ring status will be set back to FLOW_RING_STATUS_OPEN
8749 * once flow ring flush response is received for this flowring node.
8751 flow_ring_node
->status
= FLOW_RING_STATUS_FLUSH_PENDING
;
8753 #ifdef DHDTCPACK_SUPPRESS
8754 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
8755 * when there is a newly coming packet from network stack.
8757 dhd_tcpack_info_tbl_clean(bus
->dhd
);
8758 #endif /* DHDTCPACK_SUPPRESS */
8760 /* Flush all pending packets in the queue, if any */
8761 while ((pkt
= dhd_flow_queue_dequeue(bus
->dhd
, queue
)) != NULL
) {
8762 PKTFREE(bus
->dhd
->osh
, pkt
, TRUE
);
8764 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue
));
8766 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
8768 /* Send Msg to device about flow ring flush */
8769 dhd_prot_flow_ring_flush(bus
->dhd
, flow_ring_node
);
8775 dhd_bus_flow_ring_flush_response(dhd_bus_t
*bus
, uint16 flowid
, uint32 status
)
8777 flow_ring_node_t
*flow_ring_node
;
8779 if (status
!= BCME_OK
) {
8780 DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
8781 __FUNCTION__
, status
));
8785 /* Boundary check of the flowid */
8786 if (flowid
>= bus
->dhd
->num_flow_rings
) {
8787 DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__
,
8788 flowid
, bus
->dhd
->num_flow_rings
));
8792 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flowid
);
8793 if (!flow_ring_node
) {
8794 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__
));
8798 ASSERT(flow_ring_node
->flowid
== flowid
);
8799 if (flow_ring_node
->flowid
!= flowid
) {
8800 DHD_ERROR(("%s: flowid %d is different from the flowid "
8801 "of the flow_ring_node %d\n", __FUNCTION__
, flowid
,
8802 flow_ring_node
->flowid
));
8806 flow_ring_node
->status
= FLOW_RING_STATUS_OPEN
;
8811 dhd_bus_max_h2d_queues(struct dhd_bus
*bus
)
8813 return bus
->max_submission_rings
;
8816 /* To be symmetric with SDIO */
8818 dhd_bus_pktq_flush(dhd_pub_t
*dhdp
)
8824 dhd_bus_set_linkdown(dhd_pub_t
*dhdp
, bool val
)
8826 dhdp
->bus
->is_linkdown
= val
;
8830 dhd_bus_get_linkdown(dhd_pub_t
*dhdp
)
8832 return dhdp
->bus
->is_linkdown
;
8835 #ifdef IDLE_TX_FLOW_MGMT
8836 /* resume request */
8838 dhd_bus_flow_ring_resume_request(dhd_bus_t
*bus
, void *arg
)
8840 flow_ring_node_t
*flow_ring_node
= (flow_ring_node_t
*)arg
;
8842 DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__
, flow_ring_node
->flowid
));
8844 flow_ring_node
->status
= FLOW_RING_STATUS_RESUME_PENDING
;
8846 /* Send Msg to device about flow ring resume */
8847 dhd_prot_flow_ring_resume(bus
->dhd
, flow_ring_node
);
8852 /* add the node back to active flowring */
8854 dhd_bus_flow_ring_resume_response(dhd_bus_t
*bus
, uint16 flowid
, int32 status
)
8857 flow_ring_node_t
*flow_ring_node
;
8859 DHD_TRACE(("%s :flowid %d \n", __FUNCTION__
, flowid
));
8861 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flowid
);
8862 ASSERT(flow_ring_node
->flowid
== flowid
);
8864 if (status
!= BCME_OK
) {
8865 DHD_ERROR(("%s Error Status = %d \n",
8866 __FUNCTION__
, status
));
8870 DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n",
8871 __FUNCTION__
, flow_ring_node
->flowid
, flow_ring_node
->queue
.len
));
8873 flow_ring_node
->status
= FLOW_RING_STATUS_OPEN
;
8875 dhd_bus_schedule_queue(bus
, flowid
, FALSE
);
8879 /* scan the flow rings in active list for idle time out */
8881 dhd_bus_check_idle_scan(dhd_bus_t
*bus
)
8883 uint64 time_stamp
; /* in millisec */
8886 time_stamp
= OSL_SYSUPTIME();
8887 diff
= time_stamp
- bus
->active_list_last_process_ts
;
8889 if (diff
> IDLE_FLOW_LIST_TIMEOUT
) {
8890 dhd_bus_idle_scan(bus
);
8891 bus
->active_list_last_process_ts
= OSL_SYSUPTIME();
8897 /* scan the nodes in active list till it finds a non idle node */
8899 dhd_bus_idle_scan(dhd_bus_t
*bus
)
8902 flow_ring_node_t
*flow_ring_node
;
8903 uint64 time_stamp
, diff
;
8904 unsigned long flags
;
8905 uint16 ringid
[MAX_SUSPEND_REQ
];
8908 time_stamp
= OSL_SYSUPTIME();
8909 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
8911 for (item
= dll_tail_p(&bus
->flowring_active_list
);
8912 !dll_end(&bus
->flowring_active_list
, item
); item
= prev
) {
8913 prev
= dll_prev_p(item
);
8915 flow_ring_node
= dhd_constlist_to_flowring(item
);
8917 if (flow_ring_node
->flowid
== (bus
->max_submission_rings
- 1))
8920 if (flow_ring_node
->status
!= FLOW_RING_STATUS_OPEN
) {
8921 /* Takes care of deleting zombie rings */
8922 /* delete from the active list */
8923 DHD_INFO(("deleting flow id %u from active list\n",
8924 flow_ring_node
->flowid
));
8925 __dhd_flow_ring_delete_from_active_list(bus
, flow_ring_node
);
8929 diff
= time_stamp
- flow_ring_node
->last_active_ts
;
8931 if ((diff
> IDLE_FLOW_RING_TIMEOUT
) && !(flow_ring_node
->queue
.len
)) {
8932 DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node
->flowid
));
8933 /* delete from the active list */
8934 __dhd_flow_ring_delete_from_active_list(bus
, flow_ring_node
);
8935 flow_ring_node
->status
= FLOW_RING_STATUS_SUSPENDED
;
8936 ringid
[count
] = flow_ring_node
->flowid
;
8938 if (count
== MAX_SUSPEND_REQ
) {
8939 /* create a batch message now!! */
8940 dhd_prot_flow_ring_batch_suspend_request(bus
->dhd
, ringid
, count
);
8946 /* No more scanning, break from here! */
8952 dhd_prot_flow_ring_batch_suspend_request(bus
->dhd
, ringid
, count
);
8955 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
8960 void dhd_flow_ring_move_to_active_list_head(struct dhd_bus
*bus
, flow_ring_node_t
*flow_ring_node
)
8962 unsigned long flags
;
8965 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
8966 /* check if the node is already at head, otherwise delete it and prepend */
8967 list
= dll_head_p(&bus
->flowring_active_list
);
8968 if (&flow_ring_node
->list
!= list
) {
8969 dll_delete(&flow_ring_node
->list
);
8970 dll_prepend(&bus
->flowring_active_list
, &flow_ring_node
->list
);
8973 /* update flow ring timestamp */
8974 flow_ring_node
->last_active_ts
= OSL_SYSUPTIME();
8976 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
8981 void dhd_flow_ring_add_to_active_list(struct dhd_bus
*bus
, flow_ring_node_t
*flow_ring_node
)
8983 unsigned long flags
;
8985 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
8987 dll_prepend(&bus
->flowring_active_list
, &flow_ring_node
->list
);
8988 /* update flow ring timestamp */
8989 flow_ring_node
->last_active_ts
= OSL_SYSUPTIME();
8991 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
8995 void __dhd_flow_ring_delete_from_active_list(struct dhd_bus
*bus
, flow_ring_node_t
*flow_ring_node
)
8997 dll_delete(&flow_ring_node
->list
);
9000 void dhd_flow_ring_delete_from_active_list(struct dhd_bus
*bus
, flow_ring_node_t
*flow_ring_node
)
9002 unsigned long flags
;
9004 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
9006 __dhd_flow_ring_delete_from_active_list(bus
, flow_ring_node
);
9008 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
9012 #endif /* IDLE_TX_FLOW_MGMT */
9015 dhdpcie_bus_clock_start(struct dhd_bus
*bus
)
9017 return dhdpcie_start_host_pcieclock(bus
);
9021 dhdpcie_bus_clock_stop(struct dhd_bus
*bus
)
9023 return dhdpcie_stop_host_pcieclock(bus
);
9027 dhdpcie_bus_disable_device(struct dhd_bus
*bus
)
9029 return dhdpcie_disable_device(bus
);
9033 dhdpcie_bus_enable_device(struct dhd_bus
*bus
)
9035 return dhdpcie_enable_device(bus
);
9039 dhdpcie_bus_alloc_resource(struct dhd_bus
*bus
)
9041 return dhdpcie_alloc_resource(bus
);
9045 dhdpcie_bus_free_resource(struct dhd_bus
*bus
)
9047 dhdpcie_free_resource(bus
);
9051 dhd_bus_request_irq(struct dhd_bus
*bus
)
9053 return dhdpcie_bus_request_irq(bus
);
9057 dhdpcie_bus_dongle_attach(struct dhd_bus
*bus
)
9059 return dhdpcie_dongle_attach(bus
);
9063 dhd_bus_release_dongle(struct dhd_bus
*bus
)
9065 bool dongle_isolation
;
9068 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
9075 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
9077 #endif /* DEBUGGER || DHD_DSCOPE */
9079 dongle_isolation
= bus
->dhd
->dongle_isolation
;
9080 dhdpcie_bus_release_dongle(bus
, osh
, dongle_isolation
, TRUE
);
9088 dhdpcie_cto_init(struct dhd_bus
*bus
, bool enable
)
9093 dhdpcie_bus_cfg_write_dword(bus
, PCI_INT_MASK
, 4,
9094 PCI_CTO_INT_MASK
| PCI_SBIM_MASK_SERR
);
9095 val
= dhdpcie_bus_cfg_read_dword(bus
, PCI_SPROM_CONTROL
, 4);
9096 dhdpcie_bus_cfg_write_dword(bus
, PCI_SPROM_CONTROL
, 4, val
| SPROM_BACKPLANE_EN
);
9097 if (bus
->cto_threshold
== 0) {
9098 bus
->cto_threshold
= PCIE_CTO_TO_THRESH_DEFAULT
;
9101 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9102 OFFSETOF(sbpcieregs_t
, ctoctrl
), ~0,
9103 ((bus
->cto_threshold
<< PCIE_CTO_TO_THRESHOLD_SHIFT
) &
9104 PCIE_CTO_TO_THRESHHOLD_MASK
) |
9105 ((PCIE_CTO_CLKCHKCNT_VAL
<< PCIE_CTO_CLKCHKCNT_SHIFT
) &
9106 PCIE_CTO_CLKCHKCNT_MASK
) |
9107 PCIE_CTO_ENAB_MASK
);
9109 dhdpcie_bus_cfg_write_dword(bus
, PCI_INT_MASK
, 4, 0);
9110 val
= dhdpcie_bus_cfg_read_dword(bus
, PCI_SPROM_CONTROL
, 4);
9111 dhdpcie_bus_cfg_write_dword(bus
, PCI_SPROM_CONTROL
, 4, val
& ~SPROM_BACKPLANE_EN
);
9113 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9114 OFFSETOF(sbpcieregs_t
, ctoctrl
), ~0, 0);
9119 dhdpcie_cto_error_recovery(struct dhd_bus
*bus
)
9121 uint32 pci_intmask
, err_status
, dar_val
;
9125 pci_intmask
= dhdpcie_bus_cfg_read_dword(bus
, PCI_INT_MASK
, 4);
9126 dhdpcie_bus_cfg_write_dword(bus
, PCI_INT_MASK
, 4, pci_intmask
& ~PCI_CTO_INT_MASK
);
9128 DHD_OS_WAKE_LOCK(bus
->dhd
);
9130 DHD_ERROR(("--- CTO Triggered --- %d\n", bus
->pwr_req_ref
));
9133 * DAR still accessible
9135 dar_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9136 DAR_CLK_CTRL(bus
->sih
->buscorerev
), 0, 0);
9137 DHD_ERROR((" 0x%x:0x%x\n", (uint32
) DAR_CLK_CTRL(bus
->sih
->buscorerev
), dar_val
));
9139 dar_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9140 DAR_PCIE_PWR_CTRL(bus
->sih
->buscorerev
), 0, 0);
9141 DHD_ERROR((" 0x%x:0x%x\n", (uint32
) DAR_PCIE_PWR_CTRL(bus
->sih
->buscorerev
), dar_val
));
9143 dar_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9144 DAR_INTSTAT(bus
->sih
->buscorerev
), 0, 0);
9145 DHD_ERROR((" 0x%x:0x%x\n", (uint32
) DAR_INTSTAT(bus
->sih
->buscorerev
), dar_val
));
9147 dar_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9148 DAR_ERRLOG(bus
->sih
->buscorerev
), 0, 0);
9149 DHD_ERROR((" 0x%x:0x%x\n", (uint32
) DAR_ERRLOG(bus
->sih
->buscorerev
), dar_val
));
9151 dar_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9152 DAR_ERRADDR(bus
->sih
->buscorerev
), 0, 0);
9153 DHD_ERROR((" 0x%x:0x%x\n", (uint32
) DAR_ERRADDR(bus
->sih
->buscorerev
), dar_val
));
9155 dar_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9156 DAR_PCIMailBoxInt(bus
->sih
->buscorerev
), 0, 0);
9157 DHD_ERROR((" 0x%x:0x%x\n", (uint32
) DAR_PCIMailBoxInt(bus
->sih
->buscorerev
), dar_val
));
9159 /* reset backplane */
9160 val
= dhdpcie_bus_cfg_read_dword(bus
, PCI_SPROM_CONTROL
, 4);
9161 dhdpcie_bus_cfg_write_dword(bus
, PCI_SPROM_CONTROL
, 4, val
| SPROM_CFG_TO_SB_RST
);
9163 /* clear timeout error */
9165 err_status
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9166 DAR_ERRLOG(bus
->sih
->buscorerev
),
9168 if (err_status
& PCIE_CTO_ERR_MASK
) {
9169 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9170 DAR_ERRLOG(bus
->sih
->buscorerev
),
9171 ~0, PCIE_CTO_ERR_MASK
);
9175 OSL_DELAY(CTO_TO_CLEAR_WAIT_MS
* 1000);
9177 if (i
> CTO_TO_CLEAR_WAIT_MAX_CNT
) {
9178 DHD_ERROR(("cto recovery fail\n"));
9180 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
9185 /* clear interrupt status */
9186 dhdpcie_bus_cfg_write_dword(bus
, PCI_INT_STATUS
, 4, PCI_CTO_INT_MASK
);
9188 /* Halt ARM & remove reset */
9189 /* TBD : we can add ARM Halt here in case */
9191 /* reset SPROM_CFG_TO_SB_RST */
9192 val
= dhdpcie_bus_cfg_read_dword(bus
, PCI_SPROM_CONTROL
, 4);
9194 DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
9195 PCI_SPROM_CONTROL
, SPROM_CFG_TO_SB_RST
, val
));
9196 dhdpcie_bus_cfg_write_dword(bus
, PCI_SPROM_CONTROL
, 4, val
& ~SPROM_CFG_TO_SB_RST
);
9198 val
= dhdpcie_bus_cfg_read_dword(bus
, PCI_SPROM_CONTROL
, 4);
9199 DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
9200 PCI_SPROM_CONTROL
, SPROM_CFG_TO_SB_RST
, val
));
9202 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
9206 dhdpcie_ssreset_dis_enum_rst(struct dhd_bus
*bus
)
9210 val
= dhdpcie_bus_cfg_read_dword(bus
, PCIE_CFG_SUBSYSTEM_CONTROL
, 4);
9211 dhdpcie_bus_cfg_write_dword(bus
, PCIE_CFG_SUBSYSTEM_CONTROL
, 4,
9212 val
| (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT
));
9215 #if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
9217 dhdpcie_init_d11status(struct dhd_bus
*bus
)
9223 if (bus
->pcie_sh
->flags2
& PCIE_SHARED2_D2H_D11_TX_STATUS
) {
9224 flags2
= bus
->pcie_sh
->flags2
;
9225 addr
= bus
->shared_addr
+ OFFSETOF(pciedev_shared_t
, flags2
);
9226 flags2
|= PCIE_SHARED2_H2D_D11_TX_STATUS
;
9227 ret
= dhdpcie_bus_membytes(bus
, TRUE
, addr
,
9228 (uint8
*)&flags2
, sizeof(flags2
));
9230 DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
9234 bus
->pcie_sh
->flags2
= flags2
;
9235 bus
->dhd
->d11_tx_status
= TRUE
;
9242 dhdpcie_init_d11status(struct dhd_bus
*bus
)
9246 #endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
9248 #ifdef BCMPCIE_OOB_HOST_WAKE
9250 dhd_bus_oob_intr_register(dhd_pub_t
*dhdp
)
9252 return dhdpcie_oob_intr_register(dhdp
->bus
);
9256 dhd_bus_oob_intr_unregister(dhd_pub_t
*dhdp
)
9258 dhdpcie_oob_intr_unregister(dhdp
->bus
);
9262 dhd_bus_oob_intr_set(dhd_pub_t
*dhdp
, bool enable
)
9264 dhdpcie_oob_intr_set(dhdp
->bus
, enable
);
9266 #endif /* BCMPCIE_OOB_HOST_WAKE */
9269 dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t
*bus
)
9271 return bus
->dhd
->d2h_hostrdy_supported
;
9275 dhd_pcie_dump_core_regs(dhd_pub_t
* pub
, uint32 index
, uint32 first_addr
, uint32 last_addr
)
9277 dhd_bus_t
*bus
= pub
->bus
;
9278 uint32 coreoffset
= index
<< 12;
9279 uint32 core_addr
= SI_ENUM_BASE(bus
->sih
) + coreoffset
;
9282 while (first_addr
<= last_addr
) {
9283 core_addr
= SI_ENUM_BASE(bus
->sih
) + coreoffset
+ first_addr
;
9284 if (si_backplane_access(bus
->sih
, core_addr
, 4, &value
, TRUE
) != BCME_OK
) {
9285 DHD_ERROR(("Invalid size/addr combination \n"));
9287 DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr
, value
));
9288 first_addr
= first_addr
+ 4;
9293 dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t
*bus
)
9297 else if (bus
->idma_enabled
) {
9298 return bus
->dhd
->idma_enable
;
9305 dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t
*bus
)
9309 else if (bus
->ifrm_enabled
) {
9310 return bus
->dhd
->ifrm_enable
;
9317 dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t
*bus
)
9321 } else if (bus
->dar_enabled
) {
9322 return bus
->dhd
->dar_enable
;
9329 dhdpcie_bus_enab_pcie_dw(dhd_bus_t
*bus
, uint8 dw_option
)
9331 DHD_ERROR(("ENABLING DW:%d\n", dw_option
));
9332 bus
->dw_option
= dw_option
;
9336 dhd_bus_dump_trap_info(dhd_bus_t
*bus
, struct bcmstrbuf
*strbuf
)
9338 trap_t
*tr
= &bus
->dhd
->last_trap_info
;
9340 "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
9341 " lp 0x%x, rpc 0x%x"
9342 "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
9343 "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, "
9344 "r10 0x%x, r11 0x%x, r12 0x%x\n\n",
9345 ltoh32(tr
->type
), ltoh32(tr
->epc
), ltoh32(tr
->cpsr
), ltoh32(tr
->spsr
),
9346 ltoh32(tr
->r13
), ltoh32(tr
->r14
), ltoh32(tr
->pc
),
9347 ltoh32(bus
->pcie_sh
->trap_addr
),
9348 ltoh32(tr
->r0
), ltoh32(tr
->r1
), ltoh32(tr
->r2
), ltoh32(tr
->r3
),
9349 ltoh32(tr
->r4
), ltoh32(tr
->r5
), ltoh32(tr
->r6
), ltoh32(tr
->r7
),
9350 ltoh32(tr
->r8
), ltoh32(tr
->r9
), ltoh32(tr
->r10
),
9351 ltoh32(tr
->r11
), ltoh32(tr
->r12
));
9355 dhd_bus_readwrite_bp_addr(dhd_pub_t
*dhdp
, uint addr
, uint size
, uint
* data
, bool read
)
9358 struct dhd_bus
*bus
= dhdp
->bus
;
9360 if (si_backplane_access(bus
->sih
, addr
, size
, data
, read
) != BCME_OK
) {
9361 DHD_ERROR(("Invalid size/addr combination \n"));
9362 bcmerror
= BCME_ERROR
;
9369 dhd_get_idletime(dhd_pub_t
*dhd
)
9371 return dhd
->bus
->idletime
;
9374 #ifdef DHD_SSSR_DUMP
9377 dhd_sbreg_op(dhd_pub_t
*dhd
, uint addr
, uint
*val
, bool read
)
9380 si_backplane_access(dhd
->bus
->sih
, addr
, sizeof(uint
), val
, read
);
9381 DHD_ERROR(("%s: addr:0x%x val:0x%x read:%d\n", __FUNCTION__
, addr
, *val
, read
));
9386 dhdpcie_get_sssr_fifo_dump(dhd_pub_t
*dhd
, uint
*buf
, uint fifo_size
,
9387 uint addr_reg
, uint data_reg
)
9393 DHD_ERROR(("%s\n", __FUNCTION__
));
9396 DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__
));
9401 DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__
));
9405 /* Set the base address offset to 0 */
9408 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9411 /* Read 4 bytes at once and loop for fifo_size / 4 */
9412 for (i
= 0; i
< fifo_size
/ 4; i
++) {
9413 si_backplane_access(dhd
->bus
->sih
, addr
, sizeof(uint
), &val
, TRUE
);
9421 dhdpcie_get_sssr_dig_dump(dhd_pub_t
*dhd
, uint
*buf
, uint fifo_size
,
9427 si_t
*sih
= dhd
->bus
->sih
;
9429 DHD_ERROR(("%s\n", __FUNCTION__
));
9432 DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__
));
9437 DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__
));
9443 if ((!dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_size
) &&
9444 dhd
->sssr_reg_info
.length
> OFFSETOF(sssr_reg_info_v1_t
, dig_mem_info
)) {
9445 dhdpcie_bus_membytes(dhd
->bus
, FALSE
, addr_reg
, (uint8
*)buf
, fifo_size
);
9447 /* Check if vasip clk is disabled, if yes enable it */
9448 addr
= dhd
->sssr_reg_info
.vasip_regs
.wrapper_regs
.ioctrl
;
9449 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
9452 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9456 /* Read 4 bytes at once and loop for fifo_size / 4 */
9457 for (i
= 0; i
< fifo_size
/ 4; i
++, addr
+= 4) {
9458 si_backplane_access(sih
, addr
, sizeof(uint
), &val
, TRUE
);
9466 chipcregs_t
*chipcregs
;
9468 /* Save the current core */
9469 cur_coreid
= si_coreid(sih
);
9471 /* Switch to ChipC */
9472 chipcregs
= (chipcregs_t
*)si_setcore(sih
, CC_CORE_ID
, 0);
9474 chipc_corerev
= si_corerev(sih
);
9476 if (chipc_corerev
== 64) {
9477 W_REG(si_osh(sih
), &chipcregs
->sr_memrw_addr
, 0);
9479 /* Read 4 bytes at once and loop for fifo_size / 4 */
9480 for (i
= 0; i
< fifo_size
/ 4; i
++) {
9481 buf
[i
] = R_REG(si_osh(sih
), &chipcregs
->sr_memrw_data
);
9486 /* Switch back to the original core */
9487 si_setcore(sih
, cur_coreid
, 0);
9493 #if defined(BCMPCIE) && defined(DHD_LOG_DUMP)
9495 dhdpcie_get_etd_preserve_logs(dhd_pub_t
*dhd
,
9496 uint8
*ext_trap_data
, void *event_decode_data
)
9498 hnd_ext_trap_hdr_t
*hdr
= NULL
;
9500 eventlog_trapdata_info_t
*etd_evtlog
= NULL
;
9501 eventlog_trap_buf_info_t
*evtlog_buf_arr
= NULL
;
9507 if (!ext_trap_data
|| !event_decode_data
|| !dhd
)
9510 if (!dhd
->concise_dbg_buf
)
9513 /* First word is original trap_data, skip */
9514 ext_trap_data
+= sizeof(uint32
);
9516 hdr
= (hnd_ext_trap_hdr_t
*)ext_trap_data
;
9517 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_LOG_DATA
);
9519 uint32 baseaddr
= 0;
9520 uint32 endaddr
= dhd
->bus
->dongle_ram_base
+ dhd
->bus
->ramsize
- 4;
9522 etd_evtlog
= (eventlog_trapdata_info_t
*)tlv
->data
;
9523 DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; "
9524 "seq_num=%x; log_arr_addr=%x\n", __FUNCTION__
,
9525 (etd_evtlog
->num_elements
),
9526 ntoh32(etd_evtlog
->seq_num
), (etd_evtlog
->log_arr_addr
)));
9527 arr_size
= (uint32
)sizeof(*evtlog_buf_arr
) * (etd_evtlog
->num_elements
);
9529 DHD_ERROR(("%s: num event logs is zero! \n", __FUNCTION__
));
9532 evtlog_buf_arr
= MALLOCZ(dhd
->osh
, arr_size
);
9533 if (!evtlog_buf_arr
) {
9534 DHD_ERROR(("%s: out of memory !\n", __FUNCTION__
));
9538 /* boundary check */
9539 baseaddr
= etd_evtlog
->log_arr_addr
;
9540 if ((baseaddr
< dhd
->bus
->dongle_ram_base
) ||
9541 ((baseaddr
+ arr_size
) > endaddr
)) {
9542 DHD_ERROR(("%s: Error reading invalid address\n",
9547 /* read the eventlog_trap_buf_info_t array from dongle memory */
9548 err
= dhdpcie_bus_membytes(dhd
->bus
, FALSE
,
9549 (ulong
)(etd_evtlog
->log_arr_addr
),
9550 (uint8
*)evtlog_buf_arr
, arr_size
);
9551 if (err
!= BCME_OK
) {
9552 DHD_ERROR(("%s: Error reading event log array from dongle !\n",
9556 /* ntoh is required only for seq_num, because in the original
9557 * case of event logs from info ring, it is sent from dongle in that way
9558 * so for ETD also dongle follows same convention
9560 seqnum
= ntoh32(etd_evtlog
->seq_num
);
9561 memset(dhd
->concise_dbg_buf
, 0, CONCISE_DUMP_BUFLEN
);
9562 for (i
= 0; i
< (etd_evtlog
->num_elements
); ++i
) {
9563 /* boundary check */
9564 baseaddr
= evtlog_buf_arr
[i
].buf_addr
;
9565 if ((baseaddr
< dhd
->bus
->dongle_ram_base
) ||
9566 ((baseaddr
+ evtlog_buf_arr
[i
].len
) > endaddr
)) {
9567 DHD_ERROR(("%s: Error reading invalid address\n",
9571 /* read each individual event log buf from dongle memory */
9572 err
= dhdpcie_bus_membytes(dhd
->bus
, FALSE
,
9573 ((ulong
)evtlog_buf_arr
[i
].buf_addr
),
9574 dhd
->concise_dbg_buf
, (evtlog_buf_arr
[i
].len
));
9575 if (err
!= BCME_OK
) {
9576 DHD_ERROR(("%s: Error reading event log buffer from dongle !\n",
9580 dhd_dbg_msgtrace_log_parser(dhd
, dhd
->concise_dbg_buf
,
9581 event_decode_data
, (evtlog_buf_arr
[i
].len
),
9582 FALSE
, hton32(seqnum
));
9586 MFREE(dhd
->osh
, evtlog_buf_arr
, arr_size
);
9588 DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__
));
9591 #endif /* BCMPCIE && DHD_LOG_DUMP */
9594 dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t
*dhd
)
9599 DHD_ERROR(("%s\n", __FUNCTION__
));
9601 /* conditionally clear bits [11:8] of PowerCtrl */
9602 addr
= dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.powerctrl
;
9603 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
9604 if (!(val
& dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.powerctrl_mask
)) {
9605 addr
= dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.powerctrl
;
9606 val
= dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.powerctrl_mask
;
9607 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9613 dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t
*dhd
)
9618 DHD_ERROR(("%s\n", __FUNCTION__
));
9620 /* conditionally clear bits [11:8] of PowerCtrl */
9621 addr
= dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.powerctrl
;
9622 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
9623 if (val
& dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.powerctrl_mask
) {
9624 addr
= dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.powerctrl
;
9626 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9632 dhdpcie_clear_intmask_and_timer(dhd_pub_t
*dhd
)
9637 DHD_ERROR(("%s\n", __FUNCTION__
));
9639 /* clear chipcommon intmask */
9640 addr
= dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.intmask
;
9642 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9644 /* clear PMUIntMask0 */
9645 addr
= dhd
->sssr_reg_info
.pmu_regs
.base_regs
.pmuintmask0
;
9647 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9649 /* clear PMUIntMask1 */
9650 addr
= dhd
->sssr_reg_info
.pmu_regs
.base_regs
.pmuintmask1
;
9652 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9654 /* clear res_req_timer */
9655 addr
= dhd
->sssr_reg_info
.pmu_regs
.base_regs
.resreqtimer
;
9657 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9659 /* clear macresreqtimer */
9660 addr
= dhd
->sssr_reg_info
.pmu_regs
.base_regs
.macresreqtimer
;
9662 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9664 /* clear macresreqtimer1 */
9665 addr
= dhd
->sssr_reg_info
.pmu_regs
.base_regs
.macresreqtimer1
;
9667 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9669 /* clear VasipClkEn */
9670 if (dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_size
) {
9671 addr
= dhd
->sssr_reg_info
.vasip_regs
.wrapper_regs
.ioctrl
;
9673 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9680 dhdpcie_d11_check_outofreset(dhd_pub_t
*dhd
)
9686 DHD_ERROR(("%s\n", __FUNCTION__
));
9688 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
9689 /* Check if bit 0 of resetctrl is cleared */
9690 addr
= dhd
->sssr_reg_info
.mac_regs
[i
].wrapper_regs
.resetctrl
;
9692 DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n",
9694 /* ignore invalid address */
9695 dhd
->sssr_d11_outofreset
[i
] = FALSE
;
9698 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
9700 dhd
->sssr_d11_outofreset
[i
] = TRUE
;
9702 dhd
->sssr_d11_outofreset
[i
] = FALSE
;
9704 DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
9705 __FUNCTION__
, i
, dhd
->sssr_d11_outofreset
[i
]));
9711 dhdpcie_d11_clear_clk_req(dhd_pub_t
*dhd
)
9717 DHD_ERROR(("%s\n", __FUNCTION__
));
9719 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
9720 if (dhd
->sssr_d11_outofreset
[i
]) {
9721 /* clear request clk only if itopoobb is non zero */
9722 addr
= dhd
->sssr_reg_info
.mac_regs
[i
].wrapper_regs
.itopoobb
;
9723 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
9725 /* clear clockcontrolstatus */
9726 addr
= dhd
->sssr_reg_info
.mac_regs
[i
].base_regs
.clockcontrolstatus
;
9728 dhd
->sssr_reg_info
.mac_regs
[i
].base_regs
.clockcontrolstatus_val
;
9729 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9737 dhdpcie_arm_clear_clk_req(dhd_pub_t
*dhd
)
9742 DHD_ERROR(("%s\n", __FUNCTION__
));
9744 /* Check if bit 0 of resetctrl is cleared */
9745 addr
= dhd
->sssr_reg_info
.arm_regs
.wrapper_regs
.resetctrl
;
9746 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
9748 /* clear request clk only if itopoobb is non zero */
9749 addr
= dhd
->sssr_reg_info
.arm_regs
.wrapper_regs
.itopoobb
;
9750 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
9752 /* clear clockcontrolstatus */
9753 addr
= dhd
->sssr_reg_info
.arm_regs
.base_regs
.clockcontrolstatus
;
9754 val
= dhd
->sssr_reg_info
.arm_regs
.base_regs
.clockcontrolstatus_val
;
9755 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9762 dhdpcie_pcie_clear_clk_req(dhd_pub_t
*dhd
)
9767 DHD_ERROR(("%s\n", __FUNCTION__
));
9769 /* clear request clk only if itopoobb is non zero */
9770 addr
= dhd
->sssr_reg_info
.pcie_regs
.wrapper_regs
.itopoobb
;
9771 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
9773 /* clear clockcontrolstatus */
9774 addr
= dhd
->sssr_reg_info
.pcie_regs
.base_regs
.clockcontrolstatus
;
9775 val
= dhd
->sssr_reg_info
.pcie_regs
.base_regs
.clockcontrolstatus_val
;
9776 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9782 dhdpcie_pcie_send_ltrsleep(dhd_pub_t
*dhd
)
9787 DHD_ERROR(("%s\n", __FUNCTION__
));
9789 addr
= dhd
->sssr_reg_info
.pcie_regs
.base_regs
.ltrstate
;
9791 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9794 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9800 dhdpcie_clear_clk_req(dhd_pub_t
*dhd
)
9802 DHD_ERROR(("%s\n", __FUNCTION__
));
9804 dhdpcie_arm_clear_clk_req(dhd
);
9806 dhdpcie_d11_clear_clk_req(dhd
);
9808 dhdpcie_pcie_clear_clk_req(dhd
);
9814 dhdpcie_bring_d11_outofreset(dhd_pub_t
*dhd
)
9820 DHD_ERROR(("%s\n", __FUNCTION__
));
9822 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
9823 if (dhd
->sssr_d11_outofreset
[i
]) {
9824 /* disable core by setting bit 0 */
9825 addr
= dhd
->sssr_reg_info
.mac_regs
[i
].wrapper_regs
.resetctrl
;
9827 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9830 addr
= dhd
->sssr_reg_info
.mac_regs
[i
].wrapper_regs
.ioctrl
;
9831 val
= dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[0];
9832 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9834 val
= dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[1];
9835 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9837 /* enable core by clearing bit 0 */
9838 addr
= dhd
->sssr_reg_info
.mac_regs
[i
].wrapper_regs
.resetctrl
;
9840 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9842 addr
= dhd
->sssr_reg_info
.mac_regs
[i
].wrapper_regs
.ioctrl
;
9843 val
= dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[2];
9844 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9846 val
= dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[3];
9847 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9849 val
= dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[4];
9850 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9857 dhdpcie_sssr_dump_get_before_sr(dhd_pub_t
*dhd
)
9861 DHD_ERROR(("%s\n", __FUNCTION__
));
9863 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
9864 if (dhd
->sssr_d11_outofreset
[i
]) {
9865 dhdpcie_get_sssr_fifo_dump(dhd
, dhd
->sssr_d11_before
[i
],
9866 dhd
->sssr_reg_info
.mac_regs
[i
].sr_size
,
9867 dhd
->sssr_reg_info
.mac_regs
[i
].base_regs
.xmtaddress
,
9868 dhd
->sssr_reg_info
.mac_regs
[i
].base_regs
.xmtdata
);
9872 if (dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_size
) {
9873 dhdpcie_get_sssr_dig_dump(dhd
, dhd
->sssr_dig_buf_before
,
9874 dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_size
,
9875 dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_addr
);
9876 } else if ((dhd
->sssr_reg_info
.length
> OFFSETOF(sssr_reg_info_v1_t
, dig_mem_info
)) &&
9877 dhd
->sssr_reg_info
.dig_mem_info
.dig_sr_addr
) {
9878 dhdpcie_get_sssr_dig_dump(dhd
, dhd
->sssr_dig_buf_before
,
9879 dhd
->sssr_reg_info
.dig_mem_info
.dig_sr_size
,
9880 dhd
->sssr_reg_info
.dig_mem_info
.dig_sr_addr
);
9887 dhdpcie_sssr_dump_get_after_sr(dhd_pub_t
*dhd
)
9891 DHD_ERROR(("%s\n", __FUNCTION__
));
9893 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
9894 if (dhd
->sssr_d11_outofreset
[i
]) {
9895 dhdpcie_get_sssr_fifo_dump(dhd
, dhd
->sssr_d11_after
[i
],
9896 dhd
->sssr_reg_info
.mac_regs
[i
].sr_size
,
9897 dhd
->sssr_reg_info
.mac_regs
[i
].base_regs
.xmtaddress
,
9898 dhd
->sssr_reg_info
.mac_regs
[i
].base_regs
.xmtdata
);
9902 if (dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_size
) {
9903 dhdpcie_get_sssr_dig_dump(dhd
, dhd
->sssr_dig_buf_after
,
9904 dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_size
,
9905 dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_addr
);
9906 } else if ((dhd
->sssr_reg_info
.length
> OFFSETOF(sssr_reg_info_v1_t
, dig_mem_info
)) &&
9907 dhd
->sssr_reg_info
.dig_mem_info
.dig_sr_addr
) {
9908 dhdpcie_get_sssr_dig_dump(dhd
, dhd
->sssr_dig_buf_after
,
9909 dhd
->sssr_reg_info
.dig_mem_info
.dig_sr_size
,
9910 dhd
->sssr_reg_info
.dig_mem_info
.dig_sr_addr
);
9917 dhdpcie_sssr_dump(dhd_pub_t
*dhd
)
9919 if (!dhd
->sssr_inited
) {
9920 DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__
));
9924 if (dhd
->bus
->is_linkdown
) {
9925 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__
));
9929 dhdpcie_d11_check_outofreset(dhd
);
9931 DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__
));
9932 if (dhdpcie_sssr_dump_get_before_sr(dhd
) != BCME_OK
) {
9933 DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__
));
9937 dhdpcie_clear_intmask_and_timer(dhd
);
9938 dhdpcie_suspend_chipcommon_powerctrl(dhd
);
9939 dhdpcie_clear_clk_req(dhd
);
9940 dhdpcie_pcie_send_ltrsleep(dhd
);
9942 /* Wait for some time before Restore */
9945 dhdpcie_resume_chipcommon_powerctrl(dhd
);
9946 dhdpcie_bring_d11_outofreset(dhd
);
9948 DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__
));
9949 if (dhdpcie_sssr_dump_get_after_sr(dhd
) != BCME_OK
) {
9950 DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__
));
9954 dhd_schedule_sssr_dump(dhd
);
9960 dhd_bus_sssr_dump(dhd_pub_t
*dhd
)
9962 return dhdpcie_sssr_dump(dhd
);
9964 #endif /* DHD_SSSR_DUMP */
9966 #ifdef DHD_WAKE_STATUS
9968 dhd_bus_get_wakecount(dhd_pub_t
*dhd
)
9970 return &dhd
->bus
->wake_counts
;
9973 dhd_bus_get_bus_wake(dhd_pub_t
*dhd
)
9975 return bcmpcie_set_get_wake(dhd
->bus
, 0);
9977 #endif /* DHD_WAKE_STATUS */
9979 #define OTP_ADDRESS (SI_ENUM_BASE_DEFAULT + CC_SROM_OTP)
9980 #define OTP_USER_AREA_OFFSET 0x80
9981 #define OTP_USER_AREA_ADDR (OTP_ADDRESS + OTP_USER_AREA_OFFSET)
9982 #define OTP_VERSION_TUPLE_ID 0x15
9983 #define OTP_VENDOR_TUPLE_ID 0x80
9984 #define OTP_CIS_REGION_END_TUPLE_ID 0XFF
9985 #define PMU_RES_STATE_REG_ADDR (SI_ENUM_BASE_DEFAULT + PMU_RES_STATE)
9986 #define PMU_MINRESMASK_REG_ADDR (SI_ENUM_BASE_DEFAULT + MINRESMASKREG)
9987 #define OTP_CTRL1_REG_ADDR (SI_ENUM_BASE_DEFAULT + 0xF4)
9988 #define SPROM_CTRL_REG_ADDR (SI_ENUM_BASE_DEFAULT + CC_SROM_CTRL)
9989 #define CHIP_COMMON_STATUS_REG_ADDR (SI_ENUM_BASE_DEFAULT + 0x2C)
9990 #define PMU_OTP_PWR_ON_MASK 0xC47
9993 dhdpcie_get_nvpath_otp(dhd_bus_t
*bus
, char* program
, char *nv_path
)
9999 char module_name
[5];
10000 char module_vendor
= 0;
10001 char module_rev
[4];
10002 uint8 tuple_id
= 0;
10003 uint8 tuple_len
= 0;
10004 uint32 cur_offset
= 0;
10005 uint32 version_tuple_offset
= 0;
10006 char module_info
[64];
10008 bool srom_present
= 0, otp_present
= 0;
10009 uint32 sprom_ctrl
= 0;
10010 uint32 otp_ctrl
= 0, minres_mask
= 0;
10011 int i
= 0, j
= 0, status
= BCME_ERROR
;
10013 if (!nv_path
|| !bus
) {
10017 /* read chip id first */
10018 if (si_backplane_access(bus
->sih
, SI_ENUM_BASE_DEFAULT
, 4, &val
, TRUE
) != BCME_OK
) {
10019 DHD_ERROR(("%s: bkplane access error ! \n", __FUNCTION__
));
10022 chip_id
= val
& 0xffff;
10025 /* read SpromCtrl register */
10026 si_backplane_access(bus
->sih
, SPROM_CTRL_REG_ADDR
, 4, &sprom_ctrl
, TRUE
);
10029 /* proceed only if OTP is present - i.e, the 5th bit OtpPresent is set
10030 * and chip is 4355 or 4364
10032 if ((val
& 0x20) && (chip_id
== 0x4355 || chip_id
== 0x4364)) {
10035 /* Check if the 4th bit (sprom_present) in CC Status REG is set */
10036 si_backplane_access(bus
->sih
, CHIP_COMMON_STATUS_REG_ADDR
, 4, &val
, TRUE
);
10041 /* OTP power up sequence */
10042 /* 1. cache otp ctrl and enable OTP clock through OTPCtrl1 register */
10043 si_backplane_access(bus
->sih
, OTP_CTRL1_REG_ADDR
, 4, &otp_ctrl
, TRUE
);
10045 si_backplane_access(bus
->sih
, OTP_CTRL1_REG_ADDR
, 4, &val
, FALSE
);
10047 /* 2. enable OTP power through min res mask register in PMU */
10048 si_backplane_access(bus
->sih
, PMU_MINRESMASK_REG_ADDR
, 4, &minres_mask
, TRUE
);
10049 val
= minres_mask
| PMU_OTP_PWR_ON_MASK
;
10050 si_backplane_access(bus
->sih
, PMU_MINRESMASK_REG_ADDR
, 4, &val
, FALSE
);
10052 /* 3. if srom is present, need to set OtpSelect 4th bit
10053 * in SpromCtrl register to read otp
10055 if (srom_present
) {
10057 val
= sprom_ctrl
| 0x10;
10058 si_backplane_access(bus
->sih
, SPROM_CTRL_REG_ADDR
, 4, &val
, FALSE
);
10061 /* Wait for PMU to power up. */
10063 si_backplane_access(bus
->sih
, PMU_RES_STATE_REG_ADDR
, 4, &val
, TRUE
);
10064 DHD_INFO(("%s: PMU_RES_STATE_REG_ADDR %x \n", __FUNCTION__
, val
));
10066 si_backplane_access(bus
->sih
, SI_ENUM_BASE_DEFAULT
, 4, &val
, TRUE
);
10067 DHD_INFO(("%s: _SI_ENUM_BASE %x \n", __FUNCTION__
, val
));
10069 si_backplane_access(bus
->sih
, OTP_ADDRESS
, 2, &val
, TRUE
);
10070 DHD_INFO(("%s: OTP_ADDRESS %x \n", __FUNCTION__
, val
));
10072 cur_offset
= OTP_USER_AREA_ADDR
+ 0x40;
10073 /* read required data from otp to construct FW string name
10074 * data like - chip info, module info. This is present in the
10075 * form of a Vendor CIS Tuple whose format is provided by Olympic.
10076 * The data is in the form of ASCII character strings.
10077 * The Vendor tuple along with other CIS tuples are present
10078 * in the OTP user area. A CIS tuple is a TLV format.
10079 * (T = 1-byte, L = 1-byte, V = n-bytes)
10082 /* Find the version tuple */
10083 while (tuple_id
!= OTP_CIS_REGION_END_TUPLE_ID
) {
10084 si_backplane_access(bus
->sih
, cur_offset
,
10085 2, (uint
*)otp_data
, TRUE
);
10087 tuple_id
= otp_data
[0];
10088 tuple_len
= otp_data
[1];
10089 if (tuple_id
== OTP_VERSION_TUPLE_ID
) {
10090 version_tuple_offset
= cur_offset
;
10093 /* if its NULL tuple, skip */
10097 cur_offset
+= tuple_len
+ 2;
10100 /* skip the major, minor ver. numbers, manufacturer and product names */
10101 cur_offset
= version_tuple_offset
+ 6;
10103 /* read the chip info */
10104 si_backplane_access(bus
->sih
, cur_offset
,
10105 2, (uint
*)otp_data
, TRUE
);
10106 if (otp_data
[0] == 's' && otp_data
[1] == '=') {
10107 /* read the stepping */
10110 si_backplane_access(bus
->sih
, cur_offset
,
10111 2, (uint
*)stepping
, TRUE
);
10112 /* read module info */
10113 memset(module_info
, 0, 64);
10115 si_backplane_access(bus
->sih
, cur_offset
,
10116 2, (uint
*)otp_data
, TRUE
);
10117 while (otp_data
[0] != OTP_CIS_REGION_END_TUPLE_ID
&&
10118 otp_data
[1] != OTP_CIS_REGION_END_TUPLE_ID
) {
10119 memcpy(&module_info
[i
], otp_data
, 2);
10122 si_backplane_access(bus
->sih
, cur_offset
,
10123 2, (uint
*)otp_data
, TRUE
);
10125 /* replace any null characters found at the beginning
10126 * and middle of the string
10128 for (j
= 0; j
< i
; ++j
) {
10129 if (module_info
[j
] == 0)
10130 module_info
[j
] = ' ';
10132 DHD_ERROR(("OTP chip_info: s=%c%c; module info: %s \n",
10133 stepping
[0], stepping
[1], module_info
));
10134 /* extract the module name, revision and vendor
10135 * information from the module info string
10137 for (i
= 0; module_info
[i
]; i
++) {
10138 if (module_info
[i
] == 'M' && module_info
[i
+ 1] == '=') {
10139 memcpy(module_name
, &module_info
[i
+ 2], 4);
10140 module_name
[4] = 0;
10143 else if (module_info
[i
] == 'm' && module_info
[i
+ 1] == '=') {
10144 memcpy(module_rev
, &module_info
[i
+ 2], 3);
10148 else if (module_info
[i
] == 'V' && module_info
[i
+ 1] == '=') {
10149 module_vendor
= module_info
[i
+ 2];
10154 /* construct the complete file path to nvram as per
10155 * olympic conventions
10157 strncpy(progname
, program
, sizeof(progname
));
10158 sprintf(nv_path
, "P-%s_M-%s_V-%c__m-%s.txt", progname
, module_name
,
10159 module_vendor
, module_rev
);
10160 DHD_ERROR(("%s NVRAM path = %s\n", __FUNCTION__
, nv_path
));
10164 /* restore back the registers to their previous values */
10165 if (srom_present
) {
10166 si_backplane_access(bus
->sih
, SPROM_CTRL_REG_ADDR
, 4, &sprom_ctrl
, FALSE
);
10170 si_backplane_access(bus
->sih
, PMU_MINRESMASK_REG_ADDR
, 4,
10171 &minres_mask
, FALSE
);
10172 si_backplane_access(bus
->sih
, OTP_CTRL1_REG_ADDR
, 4, &otp_ctrl
, FALSE
);
10179 /* Writes random number(s) to the TCM. FW upon initialization reads this register
10180 * to fetch the random number, and uses it to randomize heap address space layout.
10183 dhdpcie_wrt_rnd(struct dhd_bus
*bus
)
10185 bcm_rand_metadata_t rnd_data
;
10186 uint8 rand_buf
[BCM_ENTROPY_HOST_NBYTES
];
10187 uint32 count
= BCM_ENTROPY_HOST_NBYTES
;
10189 uint32 addr
= bus
->dongle_ram_base
+ (bus
->ramsize
- BCM_NVRAM_OFFSET_TCM
) -
10190 ((bus
->nvram_csm
& 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR
+ sizeof(rnd_data
));
10192 memset(rand_buf
, 0, BCM_ENTROPY_HOST_NBYTES
);
10193 rnd_data
.signature
= htol32(BCM_NVRAM_RNG_SIGNATURE
);
10194 rnd_data
.count
= htol32(count
);
10195 /* write the metadata about random number */
10196 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*)&rnd_data
, sizeof(rnd_data
));
10197 /* scale back by number of random number counts */
10200 /* Now get & write the random number(s) */
10201 ret
= dhd_get_random_bytes(rand_buf
, count
);
10202 if (ret
!= BCME_OK
) {
10205 dhdpcie_bus_membytes(bus
, TRUE
, addr
, rand_buf
, count
);
10210 #ifdef D2H_MINIDUMP
10212 dhd_bus_is_minidump_enabled(dhd_pub_t
*dhdp
)
10214 return dhdp
->bus
->d2h_minidump
;
10216 #endif /* D2H_MINIDUMP */
10219 dhd_pcie_intr_count_dump(dhd_pub_t
*dhd
)
10221 struct dhd_bus
*bus
= dhd
->bus
;
10222 uint64 current_time
;
10224 DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters ------- \r\n"));
10225 DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n",
10226 bus
->resume_intr_enable_count
, bus
->dpc_intr_enable_count
));
10227 DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n",
10228 bus
->isr_intr_disable_count
, bus
->suspend_intr_disable_count
));
10229 #ifdef BCMPCIE_OOB_HOST_WAKE
10230 DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu oob_intr_disable_count=%lu\n",
10231 bus
->oob_intr_count
, bus
->oob_intr_enable_count
,
10232 bus
->oob_intr_disable_count
));
10233 DHD_ERROR(("oob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT
"\n",
10234 dhdpcie_get_oob_irq_num(bus
),
10235 GET_SEC_USEC(bus
->last_oob_irq_time
)));
10236 #endif /* BCMPCIE_OOB_HOST_WAKE */
10237 DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
10238 bus
->dpc_return_busdown_count
, bus
->non_ours_irq_count
));
10240 current_time
= OSL_LOCALTIME_NS();
10241 DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT
"\n",
10242 GET_SEC_USEC(current_time
)));
10243 DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
10244 " isr_exit_time="SEC_USEC_FMT
"\n",
10245 GET_SEC_USEC(bus
->isr_entry_time
),
10246 GET_SEC_USEC(bus
->isr_exit_time
)));
10247 DHD_ERROR(("dpc_sched_time="SEC_USEC_FMT
10248 " last_non_ours_irq_time="SEC_USEC_FMT
"\n",
10249 GET_SEC_USEC(bus
->dpc_sched_time
),
10250 GET_SEC_USEC(bus
->last_non_ours_irq_time
)));
10251 DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT
10252 " last_process_ctrlbuf_time="SEC_USEC_FMT
"\n",
10253 GET_SEC_USEC(bus
->dpc_entry_time
),
10254 GET_SEC_USEC(bus
->last_process_ctrlbuf_time
)));
10255 DHD_ERROR(("last_process_flowring_time="SEC_USEC_FMT
10256 " last_process_txcpl_time="SEC_USEC_FMT
"\n",
10257 GET_SEC_USEC(bus
->last_process_flowring_time
),
10258 GET_SEC_USEC(bus
->last_process_txcpl_time
)));
10259 DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT
10260 " last_process_infocpl_time="SEC_USEC_FMT
"\n",
10261 GET_SEC_USEC(bus
->last_process_rxcpl_time
),
10262 GET_SEC_USEC(bus
->last_process_infocpl_time
)));
10263 DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT
10264 " resched_dpc_time="SEC_USEC_FMT
"\n",
10265 GET_SEC_USEC(bus
->dpc_exit_time
),
10266 GET_SEC_USEC(bus
->resched_dpc_time
)));
10267 DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT
"\n",
10268 GET_SEC_USEC(bus
->last_d3_inform_time
)));
10270 DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT
10271 " last_suspend_end_time="SEC_USEC_FMT
"\n",
10272 GET_SEC_USEC(bus
->last_suspend_start_time
),
10273 GET_SEC_USEC(bus
->last_suspend_end_time
)));
10274 DHD_ERROR(("last_resume_start_time="SEC_USEC_FMT
10275 " last_resume_end_time="SEC_USEC_FMT
"\n",
10276 GET_SEC_USEC(bus
->last_resume_start_time
),
10277 GET_SEC_USEC(bus
->last_resume_end_time
)));
10281 dhd_bus_intr_count_dump(dhd_pub_t
*dhd
)
10283 dhd_pcie_intr_count_dump(dhd
);
10287 dhd_pcie_dma_info_dump(dhd_pub_t
*dhd
)
10289 if (dhd
->bus
->is_linkdown
) {
10290 DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
10291 "due to PCIe link down ------- \r\n"));
10295 DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
10298 DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
10299 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x200, 0, 0),
10300 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x204, 0, 0)));
10301 DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
10302 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x208, 0, 0),
10303 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x20C, 0, 0)));
10304 DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
10305 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x210, 0, 0),
10306 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x214, 0, 0)));
10308 DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
10309 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x220, 0, 0),
10310 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x224, 0, 0)));
10311 DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
10312 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x228, 0, 0),
10313 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x22C, 0, 0)));
10314 DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
10315 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x230, 0, 0),
10316 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x234, 0, 0)));
10319 DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
10320 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x240, 0, 0),
10321 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x244, 0, 0)));
10322 DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
10323 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x248, 0, 0),
10324 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x24C, 0, 0)));
10325 DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
10326 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x250, 0, 0),
10327 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x254, 0, 0)));
10329 DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
10330 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x260, 0, 0),
10331 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x264, 0, 0)));
10332 DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
10333 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x268, 0, 0),
10334 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x26C, 0, 0)));
10335 DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
10336 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x270, 0, 0),
10337 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x274, 0, 0)));
10343 dhd_pcie_dump_int_regs(dhd_pub_t
*dhd
)
10345 uint32 intstatus
= 0;
10346 uint32 intmask
= 0;
10347 uint32 d2h_db0
= 0;
10348 uint32 d2h_mb_data
= 0;
10350 DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
10351 intstatus
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10352 dhd
->bus
->pcie_mailbox_int
, 0, 0);
10353 if (intstatus
== (uint32
)-1) {
10354 DHD_ERROR(("intstatus=0x%x \n", intstatus
));
10358 intmask
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10359 dhd
->bus
->pcie_mailbox_mask
, 0, 0);
10360 if (intmask
== (uint32
) -1) {
10361 DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus
, intmask
));
10365 d2h_db0
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10366 PCID2H_MailBox
, 0, 0);
10367 if (d2h_db0
== (uint32
)-1) {
10368 DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
10369 intstatus
, intmask
, d2h_db0
));
10373 DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
10374 intstatus
, intmask
, d2h_db0
));
10375 dhd_bus_cmn_readshared(dhd
->bus
, &d2h_mb_data
, D2H_MB_DATA
, 0);
10376 DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data
,
10377 dhd
->bus
->def_intmask
));
10383 dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t
*dhd
)
10385 DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
10386 DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n",
10387 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
10388 PCIE_EXTCAP_AER_UCERR_OFFSET
, TRUE
, FALSE
, 0)));
10389 #ifdef EXTENDED_PCIE_DEBUG_DUMP
10390 DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n",
10391 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
10392 PCIE_EXTCAP_ERR_HEADER_LOG_0
, TRUE
, FALSE
, 0),
10393 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
10394 PCIE_EXTCAP_ERR_HEADER_LOG_1
, TRUE
, FALSE
, 0),
10395 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
10396 PCIE_EXTCAP_ERR_HEADER_LOG_2
, TRUE
, FALSE
, 0),
10397 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
10398 PCIE_EXTCAP_ERR_HEADER_LOG_3
, TRUE
, FALSE
, 0)));
10399 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
10403 dhd_pcie_debug_info_dump(dhd_pub_t
*dhd
)
10405 int host_irq_disabled
;
10407 DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd
->bus
->bus_low_power_state
));
10408 host_irq_disabled
= dhdpcie_irq_disabled(dhd
->bus
);
10409 DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled
));
10410 dhd_print_tasklet_status(dhd
);
10411 dhd_pcie_intr_count_dump(dhd
);
10413 DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
10414 dhdpcie_dump_resource(dhd
->bus
);
10416 dhd_pcie_dump_rc_conf_space_cap(dhd
);
10418 DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
10419 dhd_debug_get_rc_linkcap(dhd
->bus
)));
10421 #ifdef CUSTOMER_HW4_DEBUG
10422 if (dhd
->bus
->is_linkdown
) {
10423 DHD_ERROR(("Skip dumping the PCIe registers due to PCIe Link down\n"));
10426 #endif /* CUSTOMER_HW4_DEBUG */
10428 DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n"));
10429 DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x "
10430 "PCIE_CFG_PMCSR(0x%x)=0x%x\n",
10431 PCIECFGREG_STATUS_CMD
,
10432 dhd_pcie_config_read(dhd
->bus
->osh
, PCIECFGREG_STATUS_CMD
, sizeof(uint32
)),
10433 PCIECFGREG_BASEADDR0
,
10434 dhd_pcie_config_read(dhd
->bus
->osh
, PCIECFGREG_BASEADDR0
, sizeof(uint32
)),
10435 PCIECFGREG_BASEADDR1
,
10436 dhd_pcie_config_read(dhd
->bus
->osh
, PCIECFGREG_BASEADDR1
, sizeof(uint32
)),
10438 dhd_pcie_config_read(dhd
->bus
->osh
, PCIE_CFG_PMCSR
, sizeof(uint32
))));
10439 DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x "
10440 "L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
,
10441 dhd_pcie_config_read(dhd
->bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
,
10442 sizeof(uint32
)), PCIECFGGEN_DEV_STATUS_CTRL2
,
10443 dhd_pcie_config_read(dhd
->bus
->osh
, PCIECFGGEN_DEV_STATUS_CTRL2
,
10444 sizeof(uint32
)), PCIECFGREG_PML1_SUB_CTRL1
,
10445 dhd_pcie_config_read(dhd
->bus
->osh
, PCIECFGREG_PML1_SUB_CTRL1
,
10447 #ifdef EXTENDED_PCIE_DEBUG_DUMP
10448 DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
10449 dhdpcie_ep_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
10450 PCIE_EXTCAP_AER_UCERR_OFFSET
, TRUE
, FALSE
, 0)));
10451 DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
10452 "hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1
,
10453 dhd_pcie_config_read(dhd
->bus
->osh
, PCI_TLP_HDR_LOG1
, sizeof(uint32
)),
10455 dhd_pcie_config_read(dhd
->bus
->osh
, PCI_TLP_HDR_LOG2
, sizeof(uint32
)),
10457 dhd_pcie_config_read(dhd
->bus
->osh
, PCI_TLP_HDR_LOG3
, sizeof(uint32
)),
10459 dhd_pcie_config_read(dhd
->bus
->osh
, PCI_TLP_HDR_LOG4
, sizeof(uint32
))));
10460 if (dhd
->bus
->sih
->buscorerev
>= 24) {
10461 DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
10462 "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL
,
10463 dhd_pcie_config_read(dhd
->bus
->osh
, PCIECFGREG_DEV_STATUS_CTRL
,
10464 sizeof(uint32
)), PCIE_CFG_SUBSYSTEM_CONTROL
,
10465 dhd_pcie_config_read(dhd
->bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
,
10466 sizeof(uint32
)), PCIECFGREG_PML1_SUB_CTRL2
,
10467 dhd_pcie_config_read(dhd
->bus
->osh
, PCIECFGREG_PML1_SUB_CTRL2
,
10470 DHD_ERROR(("\n ------- DUMPING PCIE DAR Registers ------- \r\n"));
10471 DHD_ERROR(("clkctl(0x%x)=0x%x pwrctl(0x%x)=0x%x H2D_DB0(0x%x)=0x%x\n",
10472 PCIDARClkCtl(dhd
->bus
->sih
->buscorerev
),
10473 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10474 PCIDARClkCtl(dhd
->bus
->sih
->buscorerev
), 0, 0),
10475 PCIDARPwrCtl(dhd
->bus
->sih
->buscorerev
),
10476 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10477 PCIDARPwrCtl(dhd
->bus
->sih
->buscorerev
), 0, 0),
10478 PCIDARH2D_DB0(dhd
->bus
->sih
->buscorerev
),
10479 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10480 PCIDARH2D_DB0(dhd
->bus
->sih
->buscorerev
), 0, 0)));
10482 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
10483 if (!dhd_pcie_dump_int_regs(dhd
)) {
10484 DHD_ERROR(("Skip dumping the PCIe Core registers due to invalid int regs\n"));
10488 DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n"));
10490 #ifdef EXTENDED_PCIE_DEBUG_DUMP
10491 if (dhd
->bus
->sih
->buscorerev
>= 24) {
10492 DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x\n",
10493 PCIDARErrlog(dhd
->bus
->sih
->buscorerev
),
10494 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10495 PCIDARErrlog(dhd
->bus
->sih
->buscorerev
), 0, 0),
10496 PCIDARErrlog_Addr(dhd
->bus
->sih
->buscorerev
),
10497 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10498 PCIDARErrlog_Addr(dhd
->bus
->sih
->buscorerev
), 0, 0)));
10499 DHD_ERROR(("FunctionINtstatus(0x%x)=0x%x, Mailboxint(0x%x)=0x%x\n",
10500 PCIDARFunctionIntstatus(dhd
->bus
->sih
->buscorerev
),
10501 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10502 PCIDARFunctionIntstatus(dhd
->bus
->sih
->buscorerev
), 0, 0),
10503 PCIDARMailboxint(dhd
->bus
->sih
->buscorerev
),
10504 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10505 PCIDARMailboxint(dhd
->bus
->sih
->buscorerev
), 0, 0)));
10507 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
10509 DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
10510 "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0
,
10511 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_DBG_CLKREQ0
),
10512 PCIECFGREG_PHY_DBG_CLKREQ1
,
10513 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_DBG_CLKREQ1
),
10514 PCIECFGREG_PHY_DBG_CLKREQ2
,
10515 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_DBG_CLKREQ2
),
10516 PCIECFGREG_PHY_DBG_CLKREQ3
,
10517 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_DBG_CLKREQ3
)));
10519 #ifdef EXTENDED_PCIE_DEBUG_DUMP
10520 if (dhd
->bus
->sih
->buscorerev
>= 24) {
10521 DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x "
10522 "ltssm_hist_2(0x%x)=0x%x "
10523 "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0
,
10524 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_LTSSM_HIST_0
),
10525 PCIECFGREG_PHY_LTSSM_HIST_1
,
10526 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_LTSSM_HIST_1
),
10527 PCIECFGREG_PHY_LTSSM_HIST_2
,
10528 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_LTSSM_HIST_2
),
10529 PCIECFGREG_PHY_LTSSM_HIST_3
,
10530 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_LTSSM_HIST_3
)));
10531 DHD_ERROR(("clkctl(0x%x)=0x%x pwrctl(0x%x)=0x%x H2D_DB0(0x%x)=0x%x\n",
10533 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, PCIE_CLK_CTRL
, 0, 0),
10535 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, PCIE_PWR_CTRL
, 0, 0),
10537 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10538 PCIH2D_MailBox
, 0, 0)));
10539 DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n",
10541 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_TREFUP
),
10542 PCIECFGREG_TREFUP_EXT
,
10543 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_TREFUP_EXT
)));
10544 DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x "
10545 "Function_Intstatus(0x%x)=0x%x "
10546 "Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x "
10547 "Power_Intmask(0x%x)=0x%x\n",
10548 PCIE_CORE_REG_ERRLOG
,
10549 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10550 PCIE_CORE_REG_ERRLOG
, 0, 0),
10551 PCIE_CORE_REG_ERR_ADDR
,
10552 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10553 PCIE_CORE_REG_ERR_ADDR
, 0, 0),
10554 PCIFunctionIntstatus(dhd
->bus
->sih
->buscorerev
),
10555 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10556 PCIFunctionIntstatus(dhd
->bus
->sih
->buscorerev
), 0, 0),
10557 PCIFunctionIntmask(dhd
->bus
->sih
->buscorerev
),
10558 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10559 PCIFunctionIntmask(dhd
->bus
->sih
->buscorerev
), 0, 0),
10560 PCIPowerIntstatus(dhd
->bus
->sih
->buscorerev
),
10561 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10562 PCIPowerIntstatus(dhd
->bus
->sih
->buscorerev
), 0, 0),
10563 PCIPowerIntmask(dhd
->bus
->sih
->buscorerev
),
10564 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10565 PCIPowerIntmask(dhd
->bus
->sih
->buscorerev
), 0, 0)));
10566 DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x "
10567 "err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n",
10568 (uint
)OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg1
),
10569 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10570 OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg1
), 0, 0),
10571 (uint
)OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg2
),
10572 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10573 OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg2
), 0, 0),
10574 (uint
)OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg3
),
10575 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10576 OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg3
), 0, 0),
10577 (uint
)OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg4
),
10578 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10579 OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg4
), 0, 0)));
10580 DHD_ERROR(("err_code(0x%x)=0x%x\n",
10581 (uint
)OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_code_logreg
),
10582 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10583 OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_code_logreg
), 0, 0)));
10585 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
10587 dhd_pcie_dma_info_dump(dhd
);
10593 * TLV ID for Host whitelist Region.
10595 #define BCM_NVRAM_WHTLST_SIGNATURE 0xFEED4B1Du
10598 * For the time being only one whitelist region supported and 64 Bit high and
10599 * 64 bit low address should be written.
10601 #define BCM_HOST_WHITELIST_NBYTES 16u
10603 /* Writes host whitelist region to the TCM. FW upon initialization reads this register
10604 * to fetch whitelist regions, and validate DMA descriptors before programming
10605 * against these whitelist regions.
10608 dhdpcie_wrt_host_whitelist_region(struct dhd_bus
*bus
)
10611 bcm_host_whitelist_metadata_t whitelist_data
;
10612 uint8 whtlst_buff
[BCM_HOST_WHITELIST_NBYTES
];
10613 bcm_rand_metadata_t rnd_data
;
10614 uint32 addr
= bus
->dongle_ram_base
+ (uint32
)((bus
->ramsize
- BCM_NVRAM_OFFSET_TCM
) -
10615 ((bus
->nvram_csm
& 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR
+ sizeof(rnd_data
) +
10616 BCM_ENTROPY_HOST_NBYTES
+ sizeof(whitelist_data
)));
10617 whitelist_data
.signature
= htol32(BCM_NVRAM_WHTLST_SIGNATURE
);
10618 whitelist_data
.count
= htol32(BCM_HOST_WHITELIST_NBYTES
);
10619 ret
= dhd_get_host_whitelist_region((void*)whtlst_buff
,
10620 whitelist_data
.count
);
10621 if (ret
== BCME_RANGE
) {
10622 DHD_INFO(("%s: No Whitelist region programmed !\n",
10626 if (ret
== BCME_OK
) {
10627 /* write the metadata about whitelist region */
10628 ret
= dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*)&whitelist_data
,
10629 sizeof(whitelist_data
));
10630 if (ret
== BCME_OK
) {
10631 /* scale back by number of whitelist region counts */
10632 addr
-= BCM_HOST_WHITELIST_NBYTES
;
10634 /* Now write whitelist region(s) */
10635 ret
= dhdpcie_bus_membytes(bus
, TRUE
, addr
, whtlst_buff
,
10636 BCM_HOST_WHITELIST_NBYTES
);
10643 dhd_bus_force_bt_quiesce_enabled(struct dhd_bus
*bus
)
10645 return bus
->force_bt_quiesce
;