2 * DHD Bus Module for PCIE
4 * Copyright (C) 1999-2019, Broadcom.
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
25 * <<Broadcom-WL-IPTag/Open:>>
27 * $Id: dhd_pcie.c 797197 2018-12-29 03:31:21Z $
38 #include <hnd_debug.h>
40 #include <hnd_armtrap.h>
41 #if defined(DHD_DEBUG)
43 #endif /* defined(DHD_DEBUG) */
44 #include <dngl_stats.h>
45 #include <pcie_core.h>
48 #include <dhd_flowring.h>
49 #include <dhd_proto.h>
51 #include <dhd_debug.h>
52 #include <dhd_daemon.h>
55 #include <bcmmsgbuf.h>
59 #include <bcmendian.h>
60 #ifdef DHDTCPACK_SUPPRESS
62 #endif /* DHDTCPACK_SUPPRESS */
65 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
66 #include <linux/pm_runtime.h>
67 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
69 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
71 #endif /* DEBUGGER || DHD_DSCOPE */
73 #define EXTENDED_PCIE_DEBUG_DUMP 1 /* Enable Extended pcie registers dump */
75 #define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
76 #define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */
78 #define ARMCR4REG_BANKIDX (0x40/sizeof(uint32))
79 #define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32))
80 /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
82 /* CTO Prevention Recovery */
84 #define CTO_TO_CLEAR_WAIT_MS 10000
85 #define CTO_TO_CLEAR_WAIT_MAX_CNT 100
87 #define CTO_TO_CLEAR_WAIT_MS 1000
88 #define CTO_TO_CLEAR_WAIT_MAX_CNT 10
91 /* Fetch address of a member in the pciedev_shared structure in dongle memory */
92 #define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member) \
93 (bus)->shared_addr + OFFSETOF(pciedev_shared_t, member)
95 /* Fetch address of a member in rings_info_ptr structure in dongle memory */
96 #define DHD_RING_INFO_MEMBER_ADDR(bus, member) \
97 (bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member)
99 /* Fetch address of a member in the ring_mem structure in dongle memory */
100 #define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member) \
101 (bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member)
103 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
104 extern unsigned int system_rev
;
105 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
107 /* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */
108 uint dma_ring_indices
= 0;
109 /* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */
111 /* This can be overwritten by module parameter(force_trap_bad_h2d_phase)
112 * defined in dhd_linux.c
114 bool force_trap_bad_h2d_phase
= 0;
116 int dhd_dongle_memsize
;
117 int dhd_dongle_ramsize
;
118 struct dhd_bus
*g_dhd_bus
= NULL
;
119 static int dhdpcie_checkdied(dhd_bus_t
*bus
, char *data
, uint size
);
120 static int dhdpcie_bus_readconsole(dhd_bus_t
*bus
);
121 #if defined(DHD_FW_COREDUMP)
122 static int dhdpcie_mem_dump(dhd_bus_t
*bus
);
123 #endif /* DHD_FW_COREDUMP */
125 static int dhdpcie_bus_membytes(dhd_bus_t
*bus
, bool write
, ulong address
, uint8
*data
, uint size
);
126 static int dhdpcie_bus_doiovar(dhd_bus_t
*bus
, const bcm_iovar_t
*vi
, uint32 actionid
,
127 const char *name
, void *params
,
128 int plen
, void *arg
, int len
, int val_size
);
129 static int dhdpcie_bus_lpback_req(struct dhd_bus
*bus
, uint32 intval
);
130 static int dhdpcie_bus_dmaxfer_req(struct dhd_bus
*bus
,
131 uint32 len
, uint32 srcdelay
, uint32 destdelay
,
132 uint32 d11_lpbk
, uint32 core_num
, uint32 wait
);
133 static int dhdpcie_bus_download_state(dhd_bus_t
*bus
, bool enter
);
134 static int _dhdpcie_download_firmware(struct dhd_bus
*bus
);
135 static int dhdpcie_download_firmware(dhd_bus_t
*bus
, osl_t
*osh
);
136 static int dhdpcie_bus_write_vars(dhd_bus_t
*bus
);
137 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t
*bus
, uint32 intstatus
);
138 static bool dhdpci_bus_read_frames(dhd_bus_t
*bus
);
139 static int dhdpcie_readshared(dhd_bus_t
*bus
);
140 static void dhdpcie_init_shared_addr(dhd_bus_t
*bus
);
141 static bool dhdpcie_dongle_attach(dhd_bus_t
*bus
);
142 static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t
*bus
, int mem_size
);
143 static void dhdpcie_bus_release_dongle(dhd_bus_t
*bus
, osl_t
*osh
,
144 bool dongle_isolation
, bool reset_flag
);
145 static void dhdpcie_bus_release_malloc(dhd_bus_t
*bus
, osl_t
*osh
);
146 static int dhdpcie_downloadvars(dhd_bus_t
*bus
, void *arg
, int len
);
147 static uint8
dhdpcie_bus_rtcm8(dhd_bus_t
*bus
, ulong offset
);
148 static void dhdpcie_bus_wtcm8(dhd_bus_t
*bus
, ulong offset
, uint8 data
);
149 static void dhdpcie_bus_wtcm16(dhd_bus_t
*bus
, ulong offset
, uint16 data
);
150 static uint16
dhdpcie_bus_rtcm16(dhd_bus_t
*bus
, ulong offset
);
151 static void dhdpcie_bus_wtcm32(dhd_bus_t
*bus
, ulong offset
, uint32 data
);
152 static uint32
dhdpcie_bus_rtcm32(dhd_bus_t
*bus
, ulong offset
);
153 #ifdef DHD_SUPPORT_64BIT
154 static void dhdpcie_bus_wtcm64(dhd_bus_t
*bus
, ulong offset
, uint64 data
) __attribute__ ((used
));
155 static uint64
dhdpcie_bus_rtcm64(dhd_bus_t
*bus
, ulong offset
) __attribute__ ((used
));
156 #endif /* DHD_SUPPORT_64BIT */
157 static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t
*bus
, uint32 data
);
158 static void dhdpcie_bus_reg_unmap(osl_t
*osh
, volatile char *addr
, int size
);
159 static int dhdpcie_cc_nvmshadow(dhd_bus_t
*bus
, struct bcmstrbuf
*b
);
160 static void dhdpcie_fw_trap(dhd_bus_t
*bus
);
161 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t
*bus
, ring_info_t
*ring_info
);
162 extern void dhd_dpc_enable(dhd_pub_t
*dhdp
);
163 extern void dhd_dpc_kill(dhd_pub_t
*dhdp
);
165 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
166 static void dhdpcie_handle_mb_data(dhd_bus_t
*bus
);
167 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
169 #ifdef IDLE_TX_FLOW_MGMT
170 static void dhd_bus_check_idle_scan(dhd_bus_t
*bus
);
171 static void dhd_bus_idle_scan(dhd_bus_t
*bus
);
172 #endif /* IDLE_TX_FLOW_MGMT */
174 #ifdef EXYNOS_PCIE_DEBUG
175 extern void exynos_pcie_register_dump(int ch_num
);
176 #endif /* EXYNOS_PCIE_DEBUG */
178 #define PCI_VENDOR_ID_BROADCOM 0x14e4
180 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
181 #define MAX_D3_ACK_TIMEOUT 100
182 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
184 #define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */
185 static bool dhdpcie_check_firmware_compatible(uint32 f_api_version
, uint32 h_api_version
);
186 static void dhdpcie_cto_error_recovery(struct dhd_bus
*bus
);
188 static int dhdpcie_init_d11status(struct dhd_bus
*bus
);
190 static int dhdpcie_wrt_rnd(struct dhd_bus
*bus
);
192 extern uint16
dhd_prot_get_h2d_max_txpost(dhd_pub_t
*dhd
);
193 extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t
*dhd
, uint16 max_txpost
);
195 static int dhdpcie_wrt_host_whitelist_region(struct dhd_bus
*bus
);
198 static int dhdpcie_sssr_dump(dhd_pub_t
*dhd
);
199 #endif /* DHD_SSSR_DUMP */
205 IOV_SET_DOWNLOAD_STATE
,
217 IOV_LTRSLEEPON_UNLOOAD
,
223 IOV_DUMP_RINGUPD_BLOCK
,
228 #ifdef DHD_PCIE_RUNTIMEPM
230 #endif /* DHD_PCIE_RUNTIMEPM */
237 IOV_H2D_ENABLE_TRAP_BADPHASE
,
238 IOV_H2D_TXPOST_MAX_ITEM
,
248 IOV_DNGL_CAPS
, /**< returns string with dongle capabilities */
249 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
250 IOV_GDB_SERVER
, /**< starts gdb server on given interface */
251 #endif /* DEBUGGER || DHD_DSCOPE */
255 IOV_MINIDUMP_OVERRIDE
,
256 #endif /* D2H_MINIDUMP */
257 IOV_PCIE_LAST
/**< unused IOVAR */
260 const bcm_iovar_t dhdpcie_iovars
[] = {
261 {"intr", IOV_INTR
, 0, 0, IOVT_BOOL
, 0 },
262 {"memsize", IOV_MEMSIZE
, 0, 0, IOVT_UINT32
, 0 },
263 {"dwnldstate", IOV_SET_DOWNLOAD_STATE
, 0, 0, IOVT_BOOL
, 0 },
264 {"vars", IOV_VARS
, 0, 0, IOVT_BUFFER
, 0 },
265 {"devreset", IOV_DEVRESET
, 0, 0, IOVT_UINT8
, 0 },
266 {"pcie_device_trap", IOV_FORCE_FW_TRAP
, 0, 0, 0, 0 },
267 {"pcie_lpbk", IOV_PCIE_LPBK
, 0, 0, IOVT_UINT32
, 0 },
268 {"cc_nvmshadow", IOV_CC_NVMSHADOW
, 0, 0, IOVT_BUFFER
, 0 },
269 {"ramsize", IOV_RAMSIZE
, 0, 0, IOVT_UINT32
, 0 },
270 {"ramstart", IOV_RAMSTART
, 0, 0, IOVT_UINT32
, 0 },
271 {"pcie_dmaxfer", IOV_PCIE_DMAXFER
, 0, 0, IOVT_BUFFER
, 3 * sizeof(int32
) },
272 {"pcie_suspend", IOV_PCIE_SUSPEND
, 0, 0, IOVT_UINT32
, 0 },
273 {"sleep_allowed", IOV_SLEEP_ALLOWED
, 0, 0, IOVT_BOOL
, 0 },
274 {"dngl_isolation", IOV_DONGLEISOLATION
, 0, 0, IOVT_UINT32
, 0 },
275 {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD
, 0, 0, IOVT_UINT32
, 0 },
276 {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK
, 0, 0, IOVT_BUFFER
, 0 },
277 {"dma_ring_indices", IOV_DMA_RINGINDICES
, 0, 0, IOVT_UINT32
, 0},
278 {"metadata_dbg", IOV_METADATA_DBG
, 0, 0, IOVT_BOOL
, 0 },
279 {"rx_metadata_len", IOV_RX_METADATALEN
, 0, 0, IOVT_UINT32
, 0 },
280 {"tx_metadata_len", IOV_TX_METADATALEN
, 0, 0, IOVT_UINT32
, 0 },
281 {"db1_for_mb", IOV_DB1_FOR_MB
, 0, 0, IOVT_UINT32
, 0 },
282 {"txp_thresh", IOV_TXP_THRESHOLD
, 0, 0, IOVT_UINT32
, 0 },
283 {"buzzz_dump", IOV_BUZZZ_DUMP
, 0, 0, IOVT_UINT32
, 0 },
284 {"flow_prio_map", IOV_FLOW_PRIO_MAP
, 0, 0, IOVT_UINT32
, 0 },
285 #ifdef DHD_PCIE_RUNTIMEPM
286 {"idletime", IOV_IDLETIME
, 0, 0, IOVT_INT32
, 0 },
287 #endif /* DHD_PCIE_RUNTIMEPM */
288 {"rxbound", IOV_RXBOUND
, 0, 0, IOVT_UINT32
, 0 },
289 {"txbound", IOV_TXBOUND
, 0, 0, IOVT_UINT32
, 0 },
290 {"fw_hang_report", IOV_HANGREPORT
, 0, 0, IOVT_BOOL
, 0 },
291 {"h2d_mb_data", IOV_H2D_MAILBOXDATA
, 0, 0, IOVT_UINT32
, 0 },
292 {"inforings", IOV_INFORINGS
, 0, 0, IOVT_UINT32
, 0 },
293 {"h2d_phase", IOV_H2D_PHASE
, 0, 0, IOVT_UINT32
, 0 },
294 {"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE
, 0, 0,
296 {"h2d_max_txpost", IOV_H2D_TXPOST_MAX_ITEM
, 0, 0, IOVT_UINT32
, 0 },
297 {"trap_data", IOV_TRAPDATA
, 0, 0, IOVT_BUFFER
, 0 },
298 {"trap_data_raw", IOV_TRAPDATA_RAW
, 0, 0, IOVT_BUFFER
, 0 },
299 {"cto_prevention", IOV_CTO_PREVENTION
, 0, 0, IOVT_UINT32
, 0 },
300 {"pcie_wd_reset", IOV_PCIE_WD_RESET
, 0, 0, IOVT_BOOL
, 0 },
301 {"dump_dongle", IOV_DUMP_DONGLE
, 0, 0, IOVT_BUFFER
,
302 MAX(sizeof(dump_dongle_in_t
), sizeof(dump_dongle_out_t
))},
303 {"clear_ring", IOV_CLEAR_RING
, 0, 0, IOVT_UINT32
, 0 },
304 {"idma_enable", IOV_IDMA_ENABLE
, 0, 0, IOVT_UINT32
, 0 },
305 {"ifrm_enable", IOV_IFRM_ENABLE
, 0, 0, IOVT_UINT32
, 0 },
306 {"dar_enable", IOV_DAR_ENABLE
, 0, 0, IOVT_UINT32
, 0 },
307 {"cap", IOV_DNGL_CAPS
, 0, 0, IOVT_BUFFER
, 0},
308 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
309 {"gdb_server", IOV_GDB_SERVER
, 0, 0, IOVT_UINT32
, 0 },
310 #endif /* DEBUGGER || DHD_DSCOPE */
311 {"inb_dw_enable", IOV_INB_DW_ENABLE
, 0, 0, IOVT_UINT32
, 0 },
312 {"cto_threshold", IOV_CTO_THRESHOLD
, 0, 0, IOVT_UINT32
, 0 },
314 {"minidump_override", IOV_MINIDUMP_OVERRIDE
, 0, 0, IOVT_UINT32
, 0 },
315 #endif /* D2H_MINIDUMP */
316 {NULL
, 0, 0, 0, 0, 0 }
319 #define MAX_READ_TIMEOUT 5 * 1000 * 1000
322 #define DHD_RXBOUND 64
325 #define DHD_TXBOUND 64
328 #define DHD_INFORING_BOUND 32
329 #define DHD_BTLOGRING_BOUND 32
331 uint dhd_rxbound
= DHD_RXBOUND
;
332 uint dhd_txbound
= DHD_TXBOUND
;
334 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
335 /** the GDB debugger layer will call back into this (bus) layer to read/write dongle memory */
336 static struct dhd_gdb_bus_ops_s bus_ops
= {
337 .read_u16
= dhdpcie_bus_rtcm16
,
338 .read_u32
= dhdpcie_bus_rtcm32
,
339 .write_u32
= dhdpcie_bus_wtcm32
,
341 #endif /* DEBUGGER || DHD_DSCOPE */
344 dhd_bus_get_flr_force_fail(struct dhd_bus
*bus
)
346 return bus
->flr_force_fail
;
350 * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to
351 * link with the bus driver, in order to look for or await the device.
354 dhd_bus_register(void)
356 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
358 return dhdpcie_bus_register();
362 dhd_bus_unregister(void)
364 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
366 dhdpcie_bus_unregister();
370 /** returns a host virtual address */
372 dhdpcie_bus_reg_map(osl_t
*osh
, ulong addr
, int size
)
374 return (uint32
*)REG_MAP(addr
, size
);
378 dhdpcie_bus_reg_unmap(osl_t
*osh
, volatile char *addr
, int size
)
385 * retrun H2D Doorbell registers address
386 * use DAR registers instead of enum register for corerev >= 23 (4347B0)
389 dhd_bus_db0_addr_get(struct dhd_bus
*bus
)
391 uint addr
= PCIH2D_MailBox
;
392 uint dar_addr
= DAR_PCIH2D_DB0_0(bus
->sih
->buscorerev
);
394 return ((DAR_ACTIVE(bus
->dhd
)) ? dar_addr
: addr
);
398 dhd_bus_db0_addr_2_get(struct dhd_bus
*bus
)
400 return ((DAR_ACTIVE(bus
->dhd
)) ? DAR_PCIH2D_DB2_0(bus
->sih
->buscorerev
) : PCIH2D_MailBox_2
);
404 dhd_bus_db1_addr_get(struct dhd_bus
*bus
)
406 return ((DAR_ACTIVE(bus
->dhd
)) ? DAR_PCIH2D_DB0_1(bus
->sih
->buscorerev
) : PCIH2D_DB1
);
410 dhd_bus_db1_addr_1_get(struct dhd_bus
*bus
)
412 return ((DAR_ACTIVE(bus
->dhd
)) ? DAR_PCIH2D_DB1_1(bus
->sih
->buscorerev
) : PCIH2D_DB1_1
);
416 _dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus
*bus
)
421 * If multiple de-asserts, decrement ref and return
422 * Clear power request when only one pending
423 * so initial request is not removed unexpectedly
425 if (bus
->pwr_req_ref
> 1) {
430 ASSERT(bus
->pwr_req_ref
== 1);
432 if (MULTIBP_ENAB(bus
->sih
)) {
433 /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
434 mask
= SRPWR_DMN1_ARMBPSD_MASK
;
436 mask
= SRPWR_DMN0_PCIE_MASK
| SRPWR_DMN1_ARMBPSD_MASK
;
439 si_srpwr_request(bus
->sih
, mask
, 0);
440 bus
->pwr_req_ref
= 0;
444 dhd_bus_pcie_pwr_req_clear(struct dhd_bus
*bus
)
446 unsigned long flags
= 0;
448 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
449 _dhd_bus_pcie_pwr_req_clear_cmn(bus
);
450 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
454 dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus
*bus
)
456 _dhd_bus_pcie_pwr_req_clear_cmn(bus
);
460 _dhd_bus_pcie_pwr_req_cmn(struct dhd_bus
*bus
)
464 /* If multiple request entries, increment reference and return */
465 if (bus
->pwr_req_ref
> 0) {
470 ASSERT(bus
->pwr_req_ref
== 0);
472 if (MULTIBP_ENAB(bus
->sih
)) {
473 /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
474 mask
= SRPWR_DMN1_ARMBPSD_MASK
;
475 val
= SRPWR_DMN1_ARMBPSD_MASK
;
477 mask
= SRPWR_DMN0_PCIE_MASK
| SRPWR_DMN1_ARMBPSD_MASK
;
478 val
= SRPWR_DMN0_PCIE_MASK
| SRPWR_DMN1_ARMBPSD_MASK
;
481 si_srpwr_request(bus
->sih
, mask
, val
);
483 bus
->pwr_req_ref
= 1;
487 dhd_bus_pcie_pwr_req(struct dhd_bus
*bus
)
489 unsigned long flags
= 0;
491 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
492 _dhd_bus_pcie_pwr_req_cmn(bus
);
493 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
497 _dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus
*bus
)
501 mask
= SRPWR_DMN_ALL_MASK
;
502 val
= SRPWR_DMN_ALL_MASK
;
504 si_srpwr_request(bus
->sih
, mask
, val
);
508 dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus
*bus
)
510 unsigned long flags
= 0;
512 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
513 _dhd_bus_pcie_pwr_req_pd0123_cmn(bus
);
514 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
518 _dhd_bus_pcie_pwr_req_clear_pd23_cmn(struct dhd_bus
*bus
)
522 mask
= SRPWR_DMN3_MACMAIN_MASK
| SRPWR_DMN2_MACAUX_MASK
;
524 si_srpwr_request(bus
->sih
, mask
, 0);
528 dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus
*bus
)
530 unsigned long flags
= 0;
532 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
533 _dhd_bus_pcie_pwr_req_clear_pd23_cmn(bus
);
534 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
538 dhd_bus_pcie_pwr_req_nolock(struct dhd_bus
*bus
)
540 _dhd_bus_pcie_pwr_req_cmn(bus
);
544 dhdpcie_chip_support_msi(dhd_bus_t
*bus
)
546 DHD_ERROR(("%s: buscorerev=%d chipid=0x%x\n",
547 __FUNCTION__
, bus
->sih
->buscorerev
, si_chipid(bus
->sih
)));
548 if (bus
->sih
->buscorerev
<= 14 ||
549 si_chipid(bus
->sih
) == BCM4375_CHIP_ID
||
550 si_chipid(bus
->sih
) == BCM4361_CHIP_ID
||
551 si_chipid(bus
->sih
) == BCM4359_CHIP_ID
) {
559 * Called once for each hardware (dongle) instance that this DHD manages.
561 * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
562 * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
563 * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
565 * 'tcm' is the *host* virtual address at which tcm is mapped.
567 int dhdpcie_bus_attach(osl_t
*osh
, dhd_bus_t
**bus_ptr
,
568 volatile char *regs
, volatile char *tcm
, void *pci_dev
)
570 dhd_bus_t
*bus
= NULL
;
573 DHD_TRACE(("%s: ENTER\n", __FUNCTION__
));
576 if (!(bus
= MALLOCZ(osh
, sizeof(dhd_bus_t
)))) {
577 DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__
));
578 ret
= BCME_NORESOURCE
;
585 /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
586 bus
->dev
= (struct pci_dev
*)pci_dev
;
588 dll_init(&bus
->flowring_active_list
);
589 #ifdef IDLE_TX_FLOW_MGMT
590 bus
->active_list_last_process_ts
= OSL_SYSUPTIME();
591 #endif /* IDLE_TX_FLOW_MGMT */
593 /* Attach pcie shared structure */
594 if (!(bus
->pcie_sh
= MALLOCZ(osh
, sizeof(pciedev_shared_t
)))) {
595 DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__
));
596 ret
= BCME_NORESOURCE
;
600 /* dhd_common_init(osh); */
602 if (dhdpcie_dongle_attach(bus
)) {
603 DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__
));
608 /* software resources */
609 if (!(bus
->dhd
= dhd_attach(osh
, bus
, PCMSGBUF_HDRLEN
))) {
610 DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__
));
611 ret
= BCME_NORESOURCE
;
614 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
615 bus
->db1_for_mb
= TRUE
;
616 bus
->dhd
->hang_report
= TRUE
;
617 bus
->use_mailbox
= FALSE
;
618 bus
->use_d0_inform
= FALSE
;
619 bus
->intr_enabled
= FALSE
;
620 bus
->flr_force_fail
= FALSE
;
621 /* update the dma indices if set through module parameter. */
622 if (dma_ring_indices
!= 0) {
623 dhdpcie_set_dma_ring_indices(bus
->dhd
, dma_ring_indices
);
625 /* update h2d phase support if set through module parameter */
626 bus
->dhd
->h2d_phase_supported
= h2d_phase
? TRUE
: FALSE
;
627 /* update force trap on bad phase if set through module parameter */
628 bus
->dhd
->force_dongletrap_on_bad_h2d_phase
=
629 force_trap_bad_h2d_phase
? TRUE
: FALSE
;
630 #ifdef IDLE_TX_FLOW_MGMT
631 bus
->enable_idle_flowring_mgmt
= FALSE
;
632 #endif /* IDLE_TX_FLOW_MGMT */
633 bus
->irq_registered
= FALSE
;
635 #ifdef DHD_MSI_SUPPORT
636 bus
->d2h_intr_method
= enable_msi
&& dhdpcie_chip_support_msi(bus
) ?
637 PCIE_MSI
: PCIE_INTX
;
639 bus
->d2h_intr_method
= PCIE_INTX
;
640 #endif /* DHD_MSI_SUPPORT */
642 DHD_TRACE(("%s: EXIT SUCCESS\n",
649 DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__
));
651 if (bus
&& bus
->pcie_sh
) {
652 MFREE(osh
, bus
->pcie_sh
, sizeof(pciedev_shared_t
));
656 MFREE(osh
, bus
, sizeof(dhd_bus_t
));
663 dhd_bus_skip_clm(dhd_pub_t
*dhdp
)
665 switch (dhd_bus_chip_id(dhdp
)) {
666 case BCM4369_CHIP_ID
:
674 dhd_bus_chip(struct dhd_bus
*bus
)
676 ASSERT(bus
->sih
!= NULL
);
677 return bus
->sih
->chip
;
681 dhd_bus_chiprev(struct dhd_bus
*bus
)
684 ASSERT(bus
->sih
!= NULL
);
685 return bus
->sih
->chiprev
;
689 dhd_bus_pub(struct dhd_bus
*bus
)
695 dhd_bus_sih(struct dhd_bus
*bus
)
697 return (void *)bus
->sih
;
701 dhd_bus_txq(struct dhd_bus
*bus
)
706 /** Get Chip ID version */
707 uint
dhd_bus_chip_id(dhd_pub_t
*dhdp
)
709 dhd_bus_t
*bus
= dhdp
->bus
;
710 return bus
->sih
->chip
;
713 /** Get Chip Rev ID version */
714 uint
dhd_bus_chiprev_id(dhd_pub_t
*dhdp
)
716 dhd_bus_t
*bus
= dhdp
->bus
;
717 return bus
->sih
->chiprev
;
720 /** Get Chip Pkg ID version */
721 uint
dhd_bus_chippkg_id(dhd_pub_t
*dhdp
)
723 dhd_bus_t
*bus
= dhdp
->bus
;
724 return bus
->sih
->chippkg
;
727 /* Log the lastest DPC schedule time */
729 dhd_bus_set_dpc_sched_time(dhd_pub_t
*dhdp
)
731 dhdp
->bus
->dpc_sched_time
= OSL_LOCALTIME_NS();
734 /* Check if there is DPC scheduling errors */
736 dhd_bus_query_dpc_sched_errors(dhd_pub_t
*dhdp
)
738 dhd_bus_t
*bus
= dhdp
->bus
;
741 if (bus
->dpc_entry_time
< bus
->isr_exit_time
) {
742 /* Kernel doesn't schedule the DPC after processing PCIe IRQ */
744 } else if (bus
->dpc_entry_time
< bus
->resched_dpc_time
) {
745 /* Kernel doesn't schedule the DPC after DHD tries to reschedule
746 * the DPC due to pending work items to be processed.
754 /* print out minimum timestamp info */
755 DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
756 " isr_exit_time="SEC_USEC_FMT
757 " dpc_entry_time="SEC_USEC_FMT
758 "\ndpc_exit_time="SEC_USEC_FMT
759 " dpc_sched_time="SEC_USEC_FMT
760 " resched_dpc_time="SEC_USEC_FMT
"\n",
761 GET_SEC_USEC(bus
->isr_entry_time
),
762 GET_SEC_USEC(bus
->isr_exit_time
),
763 GET_SEC_USEC(bus
->dpc_entry_time
),
764 GET_SEC_USEC(bus
->dpc_exit_time
),
765 GET_SEC_USEC(bus
->dpc_sched_time
),
766 GET_SEC_USEC(bus
->resched_dpc_time
)));
772 /** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
774 dhdpcie_bus_intstatus(dhd_bus_t
*bus
)
776 uint32 intstatus
= 0;
779 if (bus
->bus_low_power_state
== DHD_BUS_D3_ACK_RECIEVED
) {
780 DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__
));
783 if ((bus
->sih
->buscorerev
== 6) || (bus
->sih
->buscorerev
== 4) ||
784 (bus
->sih
->buscorerev
== 2)) {
785 intstatus
= dhdpcie_bus_cfg_read_dword(bus
, PCIIntstatus
, 4);
786 dhdpcie_bus_cfg_write_dword(bus
, PCIIntstatus
, 4, intstatus
);
789 /* this is a PCIE core register..not a config register... */
790 intstatus
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_int
, 0, 0);
792 /* this is a PCIE core register..not a config register... */
793 intmask
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_mask
, 0, 0);
794 /* Is device removed. intstatus & intmask read 0xffffffff */
795 if (intstatus
== (uint32
)-1 || intmask
== (uint32
)-1) {
796 DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__
));
797 DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n",
798 __FUNCTION__
, intstatus
, intmask
));
799 bus
->is_linkdown
= TRUE
;
800 dhd_pcie_debug_info_dump(bus
->dhd
);
801 #ifdef CUSTOMER_HW4_DEBUG
802 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
803 #ifdef SUPPORT_LINKDOWN_RECOVERY
804 #ifdef CONFIG_ARCH_MSM
805 bus
->no_cfg_restore
= 1;
806 #endif /* CONFIG_ARCH_MSM */
807 #endif /* SUPPORT_LINKDOWN_RECOVERY */
808 bus
->dhd
->hang_reason
= HANG_REASON_PCIE_LINK_DOWN
;
809 dhd_os_send_hang_message(bus
->dhd
);
810 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
811 #endif /* CUSTOMER_HW4_DEBUG */
815 intstatus
&= intmask
;
818 * The fourth argument to si_corereg is the "mask" fields of the register to update
819 * and the fifth field is the "value" to update. Now if we are interested in only
820 * few fields of the "mask" bit map, we should not be writing back what we read
821 * By doing so, we might clear/ack interrupts that are not handled yet.
823 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_int
, bus
->def_intmask
,
826 intstatus
&= bus
->def_intmask
;
833 * Name: dhdpcie_bus_isr
835 * 1: IN int irq -- interrupt vector
836 * 2: IN void *arg -- handle to private data structure
838 * Status (TRUE or FALSE)
841 * Interrupt Service routine checks for the status register,
842 * disable interrupt and queue DPC if mail box interrupts are raised.
845 dhdpcie_bus_isr(dhd_bus_t
*bus
)
847 uint32 intstatus
= 0;
850 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
851 /* verify argument */
853 DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__
));
857 if (bus
->dhd
->dongle_reset
) {
858 DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__
));
862 if (bus
->dhd
->busstate
== DHD_BUS_DOWN
) {
863 DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__
));
867 /* avoid processing of interrupts until msgbuf prot is inited */
868 if (!bus
->intr_enabled
) {
869 DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__
));
873 if (PCIECTO_ENAB(bus
)) {
874 /* read pci_intstatus */
875 intstatus
= dhdpcie_bus_cfg_read_dword(bus
, PCI_INT_STATUS
, 4);
877 if (intstatus
& PCI_CTO_INT_MASK
) {
878 /* reset backplane and cto,
879 * then access through pcie is recovered.
881 dhdpcie_cto_error_recovery(bus
);
886 if (bus
->d2h_intr_method
== PCIE_MSI
) {
887 /* For MSI, as intstatus is cleared by firmware, no need to read */
888 goto skip_intstatus_read
;
891 intstatus
= dhdpcie_bus_intstatus(bus
);
893 /* Check if the interrupt is ours or not */
894 if (intstatus
== 0) {
895 DHD_LOG_MEM(("%s : this interrupt is not ours\n", __FUNCTION__
));
896 bus
->non_ours_irq_count
++;
897 bus
->last_non_ours_irq_time
= OSL_LOCALTIME_NS();
901 /* save the intstatus */
902 /* read interrupt status register!! Status bits will be cleared in DPC !! */
903 bus
->intstatus
= intstatus
;
905 /* return error for 0xFFFFFFFF */
906 if (intstatus
== (uint32
)-1) {
907 DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n",
908 __FUNCTION__
, intstatus
));
909 dhdpcie_disable_irq_nosync(bus
);
914 /* Overall operation:
915 * - Mask further interrupts
916 * - Read/ack intstatus
917 * - Take action based on bits and state
918 * - Reenable interrupts (as per state)
921 /* Count the interrupt call */
926 bus
->isr_intr_disable_count
++;
928 /* For Linux, Macos etc (otherthan NDIS) instead of disabling
929 * dongle interrupt by clearing the IntMask, disable directly
930 * interrupt from the host side, so that host will not recieve
931 * any interrupts at all, even though dongle raises interrupts
933 dhdpcie_disable_irq_nosync(bus
); /* Disable interrupt!! */
937 #if defined(PCIE_ISR_THREAD)
939 DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__
));
940 DHD_OS_WAKE_LOCK(bus
->dhd
);
941 while (dhd_bus_dpc(bus
));
942 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
944 bus
->dpc_sched
= TRUE
;
945 dhd_sched_dpc(bus
->dhd
); /* queue DPC now!! */
946 #endif /* defined(SDIO_ISR_THREAD) */
948 DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__
));
953 DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__
));
958 dhdpcie_set_pwr_state(dhd_bus_t
*bus
, uint state
)
960 uint32 cur_state
= 0;
962 osl_t
*osh
= bus
->osh
;
964 pm_csr
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_PM_CSR
, sizeof(uint32
));
965 cur_state
= pm_csr
& PCIECFGREG_PM_CSR_STATE_MASK
;
967 if (cur_state
== state
) {
968 DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__
, cur_state
));
972 if (state
> PCIECFGREG_PM_CSR_STATE_D3_HOT
)
975 /* Validate the state transition
976 * if already in a lower power state, return error
978 if (state
!= PCIECFGREG_PM_CSR_STATE_D0
&&
979 cur_state
<= PCIECFGREG_PM_CSR_STATE_D3_COLD
&&
981 DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__
));
985 pm_csr
&= ~PCIECFGREG_PM_CSR_STATE_MASK
;
988 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_PM_CSR
, sizeof(uint32
), pm_csr
);
990 /* need to wait for the specified mandatory pcie power transition delay time */
991 if (state
== PCIECFGREG_PM_CSR_STATE_D3_HOT
||
992 cur_state
== PCIECFGREG_PM_CSR_STATE_D3_HOT
)
993 OSL_DELAY(DHDPCIE_PM_D3_DELAY
);
994 else if (state
== PCIECFGREG_PM_CSR_STATE_D2
||
995 cur_state
== PCIECFGREG_PM_CSR_STATE_D2
)
996 OSL_DELAY(DHDPCIE_PM_D2_DELAY
);
998 /* read back the power state and verify */
999 pm_csr
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_PM_CSR
, sizeof(uint32
));
1000 cur_state
= pm_csr
& PCIECFGREG_PM_CSR_STATE_MASK
;
1001 if (cur_state
!= state
) {
1002 DHD_ERROR(("%s: power transition failed ! Current state is %u \n",
1003 __FUNCTION__
, cur_state
));
1006 DHD_ERROR(("%s: power transition to %u success \n",
1007 __FUNCTION__
, cur_state
));
1014 dhdpcie_config_check(dhd_bus_t
*bus
)
1017 int ret
= BCME_ERROR
;
1019 for (i
= 0; i
< DHDPCIE_CONFIG_CHECK_RETRY_COUNT
; i
++) {
1020 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCI_CFG_VID
, sizeof(uint32
));
1021 if ((val
& 0xFFFF) == VENDOR_BROADCOM
) {
1025 OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS
* 1000);
1032 dhdpcie_config_restore(dhd_bus_t
*bus
, bool restore_pmcsr
)
1035 osl_t
*osh
= bus
->osh
;
1037 if (BCME_OK
!= dhdpcie_config_check(bus
)) {
1041 for (i
= PCI_CFG_REV
>> 2; i
< DHDPCIE_CONFIG_HDR_SIZE
; i
++) {
1042 OSL_PCI_WRITE_CONFIG(osh
, i
<< 2, sizeof(uint32
), bus
->saved_config
.header
[i
]);
1044 OSL_PCI_WRITE_CONFIG(osh
, PCI_CFG_CMD
, sizeof(uint32
), bus
->saved_config
.header
[1]);
1047 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_PM_CSR
,
1048 sizeof(uint32
), bus
->saved_config
.pmcsr
);
1050 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_MSI_CAP
, sizeof(uint32
), bus
->saved_config
.msi_cap
);
1051 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_MSI_ADDR_L
, sizeof(uint32
),
1052 bus
->saved_config
.msi_addr0
);
1053 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_MSI_ADDR_H
,
1054 sizeof(uint32
), bus
->saved_config
.msi_addr1
);
1055 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_MSI_DATA
,
1056 sizeof(uint32
), bus
->saved_config
.msi_data
);
1058 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_DEV_STATUS_CTRL
,
1059 sizeof(uint32
), bus
->saved_config
.exp_dev_ctrl_stat
);
1060 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGGEN_DEV_STATUS_CTRL2
,
1061 sizeof(uint32
), bus
->saved_config
.exp_dev_ctrl_stat2
);
1062 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_LINK_STATUS_CTRL
,
1063 sizeof(uint32
), bus
->saved_config
.exp_link_ctrl_stat
);
1064 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_LINK_STATUS_CTRL2
,
1065 sizeof(uint32
), bus
->saved_config
.exp_link_ctrl_stat2
);
1067 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_PML1_SUB_CTRL1
,
1068 sizeof(uint32
), bus
->saved_config
.l1pm0
);
1069 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_PML1_SUB_CTRL2
,
1070 sizeof(uint32
), bus
->saved_config
.l1pm1
);
1072 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCI_BAR0_WIN
, sizeof(uint32
),
1073 bus
->saved_config
.bar0_win
);
1074 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCI_BAR1_WIN
, sizeof(uint32
),
1075 bus
->saved_config
.bar1_win
);
1081 dhdpcie_config_save(dhd_bus_t
*bus
)
1084 osl_t
*osh
= bus
->osh
;
1086 if (BCME_OK
!= dhdpcie_config_check(bus
)) {
1090 for (i
= 0; i
< DHDPCIE_CONFIG_HDR_SIZE
; i
++) {
1091 bus
->saved_config
.header
[i
] = OSL_PCI_READ_CONFIG(osh
, i
<< 2, sizeof(uint32
));
1094 bus
->saved_config
.pmcsr
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_PM_CSR
, sizeof(uint32
));
1096 bus
->saved_config
.msi_cap
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_MSI_CAP
,
1098 bus
->saved_config
.msi_addr0
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_MSI_ADDR_L
,
1100 bus
->saved_config
.msi_addr1
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_MSI_ADDR_H
,
1102 bus
->saved_config
.msi_data
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_MSI_DATA
,
1105 bus
->saved_config
.exp_dev_ctrl_stat
= OSL_PCI_READ_CONFIG(osh
,
1106 PCIECFGREG_DEV_STATUS_CTRL
, sizeof(uint32
));
1107 bus
->saved_config
.exp_dev_ctrl_stat2
= OSL_PCI_READ_CONFIG(osh
,
1108 PCIECFGGEN_DEV_STATUS_CTRL2
, sizeof(uint32
));
1109 bus
->saved_config
.exp_link_ctrl_stat
= OSL_PCI_READ_CONFIG(osh
,
1110 PCIECFGREG_LINK_STATUS_CTRL
, sizeof(uint32
));
1111 bus
->saved_config
.exp_link_ctrl_stat2
= OSL_PCI_READ_CONFIG(osh
,
1112 PCIECFGREG_LINK_STATUS_CTRL2
, sizeof(uint32
));
1114 bus
->saved_config
.l1pm0
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_PML1_SUB_CTRL1
,
1116 bus
->saved_config
.l1pm1
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_PML1_SUB_CTRL2
,
1119 bus
->saved_config
.bar0_win
= OSL_PCI_READ_CONFIG(osh
, PCI_BAR0_WIN
,
1121 bus
->saved_config
.bar1_win
= OSL_PCI_READ_CONFIG(osh
, PCI_BAR1_WIN
,
1127 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1128 dhd_pub_t
*link_recovery
= NULL
;
1129 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1132 dhdpcie_bus_intr_init(dhd_bus_t
*bus
)
1134 uint buscorerev
= bus
->sih
->buscorerev
;
1135 bus
->pcie_mailbox_int
= PCIMailBoxInt(buscorerev
);
1136 bus
->pcie_mailbox_mask
= PCIMailBoxMask(buscorerev
);
1137 bus
->d2h_mb_mask
= PCIE_MB_D2H_MB_MASK(buscorerev
);
1138 bus
->def_intmask
= PCIE_MB_D2H_MB_MASK(buscorerev
);
1139 if (buscorerev
< 64) {
1140 bus
->def_intmask
|= PCIE_MB_TOPCIE_FN0_0
| PCIE_MB_TOPCIE_FN0_1
;
1145 dhd_bus_aspm_enable_rc_ep(dhd_bus_t
*bus
, bool enable
)
1147 uint32 linkctrl_rc
, linkctrl_ep
;
1148 linkctrl_rc
= dhdpcie_rc_access_cap(bus
, PCIE_CAP_ID_EXP
, PCIE_CAP_LINKCTRL_OFFSET
, FALSE
,
1150 linkctrl_ep
= dhdpcie_ep_access_cap(bus
, PCIE_CAP_ID_EXP
, PCIE_CAP_LINKCTRL_OFFSET
, FALSE
,
1152 DHD_ERROR(("%s: %s val before rc-0x%x:ep-0x%x\n", __FUNCTION__
,
1153 (enable
? "ENABLE" : "DISABLE"), linkctrl_rc
, linkctrl_ep
));
1155 /* Enable only L1 ASPM (bit 1) first RC then EP */
1156 dhdpcie_rc_access_cap(bus
, PCIE_CAP_ID_EXP
, PCIE_CAP_LINKCTRL_OFFSET
, FALSE
,
1157 TRUE
, (linkctrl_rc
| PCIE_ASPM_L1_ENAB
));
1158 dhdpcie_ep_access_cap(bus
, PCIE_CAP_ID_EXP
, PCIE_CAP_LINKCTRL_OFFSET
, FALSE
,
1159 TRUE
, (linkctrl_ep
| PCIE_ASPM_L1_ENAB
));
1161 /* Disable complete ASPM (bit 1 and bit 0) first EP then RC */
1162 dhdpcie_ep_access_cap(bus
, PCIE_CAP_ID_EXP
, PCIE_CAP_LINKCTRL_OFFSET
, FALSE
,
1163 TRUE
, (linkctrl_ep
& (~PCIE_ASPM_ENAB
)));
1164 dhdpcie_rc_access_cap(bus
, PCIE_CAP_ID_EXP
, PCIE_CAP_LINKCTRL_OFFSET
, FALSE
,
1165 TRUE
, (linkctrl_rc
& (~PCIE_ASPM_ENAB
)));
1167 linkctrl_rc
= dhdpcie_rc_access_cap(bus
, PCIE_CAP_ID_EXP
, PCIE_CAP_LINKCTRL_OFFSET
, FALSE
,
1169 linkctrl_ep
= dhdpcie_ep_access_cap(bus
, PCIE_CAP_ID_EXP
, PCIE_CAP_LINKCTRL_OFFSET
, FALSE
,
1171 DHD_ERROR(("%s: %s val after rc-0x%x:ep-0x%x\n", __FUNCTION__
,
1172 (enable
? "ENABLE" : "DISABLE"), linkctrl_rc
, linkctrl_ep
));
1176 dhd_bus_l1ss_enable_rc_ep(dhd_bus_t
*bus
, bool enable
)
1178 uint32 l1ssctrl_rc
, l1ssctrl_ep
;
1180 /* Disable ASPM of RC and EP */
1181 dhd_bus_aspm_enable_rc_ep(bus
, FALSE
);
1183 /* Extendend Capacility Reg */
1184 l1ssctrl_rc
= dhdpcie_rc_access_cap(bus
, PCIE_EXTCAP_ID_L1SS
,
1185 PCIE_EXTCAP_L1SS_CONTROL_OFFSET
, TRUE
, FALSE
, 0);
1186 l1ssctrl_ep
= dhdpcie_ep_access_cap(bus
, PCIE_EXTCAP_ID_L1SS
,
1187 PCIE_EXTCAP_L1SS_CONTROL_OFFSET
, TRUE
, FALSE
, 0);
1188 DHD_ERROR(("%s: %s val before rc-0x%x:ep-0x%x\n", __FUNCTION__
,
1189 (enable
? "ENABLE" : "DISABLE"), l1ssctrl_rc
, l1ssctrl_ep
));
1191 /* Enable RC then EP */
1192 dhdpcie_rc_access_cap(bus
, PCIE_EXTCAP_ID_L1SS
, PCIE_EXTCAP_L1SS_CONTROL_OFFSET
,
1193 TRUE
, TRUE
, (l1ssctrl_rc
| PCIE_EXT_L1SS_ENAB
));
1194 dhdpcie_ep_access_cap(bus
, PCIE_EXTCAP_ID_L1SS
, PCIE_EXTCAP_L1SS_CONTROL_OFFSET
,
1195 TRUE
, TRUE
, (l1ssctrl_ep
| PCIE_EXT_L1SS_ENAB
));
1197 /* Disable EP then RC */
1198 dhdpcie_ep_access_cap(bus
, PCIE_EXTCAP_ID_L1SS
, PCIE_EXTCAP_L1SS_CONTROL_OFFSET
,
1199 TRUE
, TRUE
, (l1ssctrl_ep
& (~PCIE_EXT_L1SS_ENAB
)));
1200 dhdpcie_rc_access_cap(bus
, PCIE_EXTCAP_ID_L1SS
, PCIE_EXTCAP_L1SS_CONTROL_OFFSET
,
1201 TRUE
, TRUE
, (l1ssctrl_rc
& (~PCIE_EXT_L1SS_ENAB
)));
1203 l1ssctrl_rc
= dhdpcie_rc_access_cap(bus
, PCIE_EXTCAP_ID_L1SS
,
1204 PCIE_EXTCAP_L1SS_CONTROL_OFFSET
, TRUE
, FALSE
, 0);
1205 l1ssctrl_ep
= dhdpcie_ep_access_cap(bus
, PCIE_EXTCAP_ID_L1SS
,
1206 PCIE_EXTCAP_L1SS_CONTROL_OFFSET
, TRUE
, FALSE
, 0);
1207 DHD_ERROR(("%s: %s val after rc-0x%x:ep-0x%x\n", __FUNCTION__
,
1208 (enable
? "ENABLE" : "DISABLE"), l1ssctrl_rc
, l1ssctrl_ep
));
1210 /* Enable ASPM of RC and EP */
1211 dhd_bus_aspm_enable_rc_ep(bus
, TRUE
);
1215 dhdpcie_dongle_reset(dhd_bus_t
*bus
)
1217 #ifndef DHD_USE_BP_RESET
1219 #endif /* !DHD_USE_BP_RESET */
1221 /* if the pcie link is down, watchdog reset
1222 * should not be done, as it may hang
1224 if (bus
->is_linkdown
) {
1228 #ifdef DHD_USE_BP_RESET
1229 dhd_bus_perform_bp_reset(bus
);
1231 wd_en
= (bus
->sih
->buscorerev
== 66) ? WD_SSRESET_PCIE_F0_EN
:
1232 (WD_SSRESET_PCIE_F0_EN
| WD_SSRESET_PCIE_ALL_FN_EN
);
1233 pcie_watchdog_reset(bus
->osh
, bus
->sih
, WD_ENABLE_MASK
, wd_en
);
1234 #endif /* DHD_USE_BP_RESET */
1238 dhdpcie_dongle_attach(dhd_bus_t
*bus
)
1240 osl_t
*osh
= bus
->osh
;
1241 volatile void *regsva
= (volatile void*)bus
->regs
;
1244 sbpcieregs_t
*sbpcieregs
;
1245 bool dongle_isolation
;
1247 DHD_TRACE(("%s: ENTER\n", __FUNCTION__
));
1249 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1250 link_recovery
= bus
->dhd
;
1251 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1253 bus
->alp_only
= TRUE
;
1256 /* Checking PCIe bus status with reading configuration space */
1257 val
= OSL_PCI_READ_CONFIG(osh
, PCI_CFG_VID
, sizeof(uint32
));
1258 if ((val
& 0xFFFF) != VENDOR_BROADCOM
) {
1259 DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__
));
1262 devid
= (val
>> 16) & 0xFFFF;
1263 bus
->cl_devid
= devid
;
1265 /* Set bar0 window to si_enum_base */
1266 dhdpcie_bus_cfg_set_bar0_win(bus
, si_enum_base(devid
));
1269 * Checking PCI_SPROM_CONTROL register for preventing invalid address access
1270 * due to switch address space from PCI_BUS to SI_BUS.
1272 val
= OSL_PCI_READ_CONFIG(osh
, PCI_SPROM_CONTROL
, sizeof(uint32
));
1273 if (val
== 0xffffffff) {
1274 DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__
));
1278 /* si_attach() will provide an SI handle and scan the backplane */
1279 if (!(bus
->sih
= si_attach((uint
)devid
, osh
, regsva
, PCI_BUS
, bus
,
1280 &bus
->vars
, &bus
->varsz
))) {
1281 DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__
));
1285 if (MULTIBP_ENAB(bus
->sih
) && (bus
->sih
->buscorerev
>= 66)) {
1286 DHD_ERROR(("Enable CTO\n"));
1287 bus
->cto_enable
= TRUE
;
1288 dhdpcie_cto_init(bus
, bus
->cto_enable
);
1290 * HW JIRA - CRWLPCIEGEN2-672
1291 * Producer Index Feature which is used by F1 gets reset on F0 FLR
1294 if (bus
->sih
->buscorerev
== 66) {
1295 dhdpcie_ssreset_dis_enum_rst(bus
);
1298 /* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset
1299 * dhdpcie_bus_release_dongle() --> si_detach()
1300 * dhdpcie_dongle_attach() --> si_attach()
1302 bus
->pwr_req_ref
= 0;
1305 if (MULTIBP_ENAB(bus
->sih
)) {
1306 dhd_bus_pcie_pwr_req_nolock(bus
);
1309 /* Olympic EFI requirement - stop driver load if FW is already running
1310 * need to do this here before pcie_watchdog_reset, because
1311 * pcie_watchdog_reset will put the ARM back into halt state
1313 if (!dhdpcie_is_arm_halted(bus
)) {
1314 DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n",
1319 BCM_REFERENCE(dongle_isolation
);
1321 /* Dongle reset during power on can be invoked in case of module type driver */
1322 if (dhd_download_fw_on_driverload
) {
1323 /* Enable CLKREQ# */
1324 dhdpcie_clkreq(bus
->osh
, 1, 1);
1327 * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset
1328 * without checking dongle_isolation flag, but if it is called via some other path
1329 * like quiesce FLR, then based on dongle_isolation flag, watchdog_reset should
1332 if (bus
->dhd
== NULL
) {
1333 /* dhd_attach not yet happened, do watchdog reset */
1334 dongle_isolation
= FALSE
;
1336 dongle_isolation
= bus
->dhd
->dongle_isolation
;
1339 * Issue CC watchdog to reset all the cores on the chip - similar to rmmod dhd
1340 * This is required to avoid spurious interrupts to the Host and bring back
1341 * dongle to a sane state (on host soft-reboot / watchdog-reboot).
1343 if (dongle_isolation
== FALSE
) {
1344 dhdpcie_dongle_reset(bus
);
1349 si_setcore(bus
->sih
, PCIE2_CORE_ID
, 0);
1350 sbpcieregs
= (sbpcieregs_t
*)(bus
->regs
);
1352 /* WAR where the BAR1 window may not be sized properly */
1353 W_REG(osh
, &sbpcieregs
->configaddr
, 0x4e0);
1354 val
= R_REG(osh
, &sbpcieregs
->configdata
);
1355 W_REG(osh
, &sbpcieregs
->configdata
, val
);
1357 /* Get info on the ARM and SOCRAM cores... */
1358 /* Should really be qualified by device id */
1359 if ((si_setcore(bus
->sih
, ARM7S_CORE_ID
, 0)) ||
1360 (si_setcore(bus
->sih
, ARMCM3_CORE_ID
, 0)) ||
1361 (si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0)) ||
1362 (si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0))) {
1363 bus
->armrev
= si_corerev(bus
->sih
);
1365 DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__
));
1369 if (si_setcore(bus
->sih
, SYSMEM_CORE_ID
, 0)) {
1370 /* Only set dongle RAMSIZE to default value when BMC vs ARM usage of SYSMEM is not
1373 if (!bus
->ramsize_adjusted
) {
1374 if (!(bus
->orig_ramsize
= si_sysmem_size(bus
->sih
))) {
1375 DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__
));
1378 switch ((uint16
)bus
->sih
->chip
) {
1380 /* also populate base address */
1381 bus
->dongle_ram_base
= CA7_4365_RAM_BASE
;
1382 bus
->orig_ramsize
= 0x1c0000; /* Reserve 1.75MB for CA7 */
1386 } else if (!si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0)) {
1387 if (!(bus
->orig_ramsize
= si_socram_size(bus
->sih
))) {
1388 DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__
));
1392 /* cr4 has a different way to find the RAM size from TCM's */
1393 if (!(bus
->orig_ramsize
= si_tcm_size(bus
->sih
))) {
1394 DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__
));
1397 /* also populate base address */
1398 switch ((uint16
)bus
->sih
->chip
) {
1399 case BCM4339_CHIP_ID
:
1400 case BCM4335_CHIP_ID
:
1401 bus
->dongle_ram_base
= CR4_4335_RAM_BASE
;
1403 case BCM4358_CHIP_ID
:
1404 case BCM4354_CHIP_ID
:
1405 case BCM43567_CHIP_ID
:
1406 case BCM43569_CHIP_ID
:
1407 case BCM4350_CHIP_ID
:
1408 case BCM43570_CHIP_ID
:
1409 bus
->dongle_ram_base
= CR4_4350_RAM_BASE
;
1411 case BCM4360_CHIP_ID
:
1412 bus
->dongle_ram_base
= CR4_4360_RAM_BASE
;
1415 case BCM4364_CHIP_ID
:
1416 bus
->dongle_ram_base
= CR4_4364_RAM_BASE
;
1420 bus
->dongle_ram_base
= (bus
->sih
->chiprev
< 6) /* changed at 4345C0 */
1421 ? CR4_4345_LT_C0_RAM_BASE
: CR4_4345_GE_C0_RAM_BASE
;
1424 bus
->dongle_ram_base
= CR4_43602_RAM_BASE
;
1426 case BCM4349_CHIP_GRPID
:
1427 /* RAM based changed from 4349c0(revid=9) onwards */
1428 bus
->dongle_ram_base
= ((bus
->sih
->chiprev
< 9) ?
1429 CR4_4349_RAM_BASE
: CR4_4349_RAM_BASE_FROM_REV_9
);
1431 case BCM4347_CHIP_ID
:
1432 case BCM4357_CHIP_ID
:
1433 case BCM4361_CHIP_ID
:
1434 bus
->dongle_ram_base
= CR4_4347_RAM_BASE
;
1436 case BCM4375_CHIP_ID
:
1437 case BCM4369_CHIP_ID
:
1438 bus
->dongle_ram_base
= CR4_4369_RAM_BASE
;
1441 bus
->dongle_ram_base
= 0;
1442 DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
1443 __FUNCTION__
, bus
->dongle_ram_base
));
1446 bus
->ramsize
= bus
->orig_ramsize
;
1447 if (dhd_dongle_memsize
)
1448 dhdpcie_bus_dongle_setmemsize(bus
, dhd_dongle_memsize
);
1450 if (bus
->ramsize
> DONGLE_TCM_MAP_SIZE
) {
1451 DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n",
1452 __FUNCTION__
, bus
->ramsize
, bus
->ramsize
));
1456 DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
1457 bus
->ramsize
, bus
->orig_ramsize
, bus
->dongle_ram_base
));
1459 bus
->srmemsize
= si_socram_srmem_size(bus
->sih
);
1461 dhdpcie_bus_intr_init(bus
);
1463 /* Set the poll and/or interrupt flags */
1464 bus
->intr
= (bool)dhd_intr
;
1465 #ifdef DHD_DISABLE_ASPM
1466 dhd_bus_aspm_enable_rc_ep(bus
, FALSE
);
1467 #endif /* DHD_DISABLE_ASPM */
1469 bus
->idma_enabled
= TRUE
;
1470 bus
->ifrm_enabled
= TRUE
;
1471 DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__
));
1473 if (MULTIBP_ENAB(bus
->sih
)) {
1474 dhd_bus_pcie_pwr_req_clear_nolock(bus
);
1477 bus
->force_bt_quiesce
= TRUE
;
1482 if (bus
->sih
!= NULL
) {
1483 if (MULTIBP_ENAB(bus
->sih
)) {
1484 dhd_bus_pcie_pwr_req_clear_nolock(bus
);
1486 /* for EFI even if there is an error, load still succeeds
1487 * so si_detach should not be called here, it is called during unload
1489 si_detach(bus
->sih
);
1492 DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__
));
1497 dhpcie_bus_unmask_interrupt(dhd_bus_t
*bus
)
1499 dhdpcie_bus_cfg_write_dword(bus
, PCIIntmask
, 4, I_MB
);
1503 dhpcie_bus_mask_interrupt(dhd_bus_t
*bus
)
1505 dhdpcie_bus_cfg_write_dword(bus
, PCIIntmask
, 4, 0x0);
1509 /* Non atomic function, caller should hold appropriate lock */
1511 dhdpcie_bus_intr_enable(dhd_bus_t
*bus
)
1513 DHD_TRACE(("%s Enter\n", __FUNCTION__
));
1514 if (bus
&& bus
->sih
&& !bus
->is_linkdown
) {
1515 /* Skip after recieving D3 ACK */
1516 if (bus
->bus_low_power_state
== DHD_BUS_D3_ACK_RECIEVED
) {
1519 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
1520 (bus
->sih
->buscorerev
== 4)) {
1521 dhpcie_bus_unmask_interrupt(bus
);
1523 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_mask
,
1524 bus
->def_intmask
, bus
->def_intmask
);
1527 DHD_TRACE(("%s Exit\n", __FUNCTION__
));
1530 /* Non atomic function, caller should hold appropriate lock */
1532 dhdpcie_bus_intr_disable(dhd_bus_t
*bus
)
1534 DHD_TRACE(("%s Enter\n", __FUNCTION__
));
1535 if (bus
&& bus
->sih
&& !bus
->is_linkdown
) {
1536 /* Skip after recieving D3 ACK */
1537 if (bus
->bus_low_power_state
== DHD_BUS_D3_ACK_RECIEVED
) {
1540 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
1541 (bus
->sih
->buscorerev
== 4)) {
1542 dhpcie_bus_mask_interrupt(bus
);
1544 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_mask
,
1545 bus
->def_intmask
, 0);
1548 DHD_TRACE(("%s Exit\n", __FUNCTION__
));
1552 * dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
1553 * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
1554 * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
1555 * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
1556 * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
1559 dhdpcie_advertise_bus_cleanup(dhd_pub_t
*dhdp
)
1561 unsigned long flags
;
1564 #ifdef DHD_PCIE_RUNTIMEPM
1565 dhdpcie_runtime_bus_wake(dhdp
, TRUE
, dhdpcie_advertise_bus_cleanup
);
1566 #endif /* DHD_PCIE_RUNTIMEPM */
1568 dhdp
->dhd_watchdog_ms_backup
= dhd_watchdog_ms
;
1569 if (dhdp
->dhd_watchdog_ms_backup
) {
1570 DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n",
1572 dhd_os_wd_timer(dhdp
, 0);
1574 if (dhdp
->busstate
!= DHD_BUS_DOWN
) {
1575 DHD_GENERAL_LOCK(dhdp
, flags
);
1576 dhdp
->busstate
= DHD_BUS_DOWN_IN_PROGRESS
;
1577 DHD_GENERAL_UNLOCK(dhdp
, flags
);
1580 timeleft
= dhd_os_busbusy_wait_negation(dhdp
, &dhdp
->dhd_bus_busy_state
);
1581 if ((timeleft
== 0) || (timeleft
== 1)) {
1582 DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
1583 __FUNCTION__
, dhdp
->dhd_bus_busy_state
));
1591 dhdpcie_bus_remove_prep(dhd_bus_t
*bus
)
1593 unsigned long flags
;
1594 DHD_TRACE(("%s Enter\n", __FUNCTION__
));
1596 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
1597 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
1598 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
1600 dhd_os_sdlock(bus
->dhd
);
1602 if (bus
->sih
&& !bus
->dhd
->dongle_isolation
) {
1603 if (bus
->sih
->buscorerev
== 66) {
1604 dhd_bus_pcie_pwr_req_reload_war(bus
);
1607 /* Has insmod fails after rmmod issue in Brix Android */
1609 /* if the pcie link is down, watchdog reset
1610 * should not be done, as it may hang
1613 if (!bus
->is_linkdown
) {
1614 dhdpcie_dongle_reset(bus
);
1617 bus
->dhd
->is_pcie_watchdog_reset
= TRUE
;
1620 dhd_os_sdunlock(bus
->dhd
);
1622 DHD_TRACE(("%s Exit\n", __FUNCTION__
));
1626 dhd_init_bus_lock(dhd_bus_t
*bus
)
1628 if (!bus
->bus_lock
) {
1629 bus
->bus_lock
= dhd_os_spin_lock_init(bus
->dhd
->osh
);
1634 dhd_deinit_bus_lock(dhd_bus_t
*bus
)
1636 if (bus
->bus_lock
) {
1637 dhd_os_spin_lock_deinit(bus
->dhd
->osh
, bus
->bus_lock
);
1638 bus
->bus_lock
= NULL
;
1642 /** Detach and free everything */
1644 dhdpcie_bus_release(dhd_bus_t
*bus
)
1646 bool dongle_isolation
= FALSE
;
1648 unsigned long flags_bus
;
1650 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
1658 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
1660 #endif /* DEBUGGER || DHD_DSCOPE */
1661 dhdpcie_advertise_bus_cleanup(bus
->dhd
);
1662 dongle_isolation
= bus
->dhd
->dongle_isolation
;
1663 bus
->dhd
->is_pcie_watchdog_reset
= FALSE
;
1664 dhdpcie_bus_remove_prep(bus
);
1667 DHD_BUS_LOCK(bus
->bus_lock
, flags_bus
);
1668 dhdpcie_bus_intr_disable(bus
);
1669 DHD_BUS_UNLOCK(bus
->bus_lock
, flags_bus
);
1670 dhdpcie_free_irq(bus
);
1672 dhd_deinit_bus_lock(bus
);
1674 * dhdpcie_bus_release_dongle free bus->sih handle, which is needed to
1675 * access Dongle registers.
1676 * dhd_detach will communicate with dongle to delete flowring ..etc.
1677 * So dhdpcie_bus_release_dongle should be called only after the dhd_detach.
1679 dhd_detach(bus
->dhd
);
1680 dhdpcie_bus_release_dongle(bus
, osh
, dongle_isolation
, TRUE
);
1684 /* unmap the regs and tcm here!! */
1686 dhdpcie_bus_reg_unmap(osh
, bus
->regs
, DONGLE_REG_MAP_SIZE
);
1690 dhdpcie_bus_reg_unmap(osh
, bus
->tcm
, DONGLE_TCM_MAP_SIZE
);
1694 dhdpcie_bus_release_malloc(bus
, osh
);
1695 /* Detach pcie shared structure */
1697 MFREE(osh
, bus
->pcie_sh
, sizeof(pciedev_shared_t
));
1698 bus
->pcie_sh
= NULL
;
1701 if (bus
->console
.buf
!= NULL
) {
1702 MFREE(osh
, bus
->console
.buf
, bus
->console
.bufsize
);
1705 /* Finally free bus info */
1706 MFREE(osh
, bus
, sizeof(dhd_bus_t
));
1711 DHD_TRACE(("%s: Exit\n", __FUNCTION__
));
1712 } /* dhdpcie_bus_release */
1715 dhdpcie_bus_release_dongle(dhd_bus_t
*bus
, osl_t
*osh
, bool dongle_isolation
, bool reset_flag
)
1717 DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__
,
1718 bus
->dhd
, bus
->dhd
->dongle_reset
));
1720 if ((bus
->dhd
&& bus
->dhd
->dongle_reset
) && reset_flag
) {
1721 DHD_TRACE(("%s Exit\n", __FUNCTION__
));
1725 if (bus
->is_linkdown
) {
1726 DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__
));
1732 if (!dongle_isolation
&&
1733 (bus
->dhd
&& !bus
->dhd
->is_pcie_watchdog_reset
)) {
1734 dhdpcie_dongle_reset(bus
);
1737 if (bus
->ltrsleep_on_unload
) {
1738 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
1739 OFFSETOF(sbpcieregs_t
, u
.pcie2
.ltr_state
), ~0, 0);
1742 if (bus
->sih
->buscorerev
== 13)
1743 pcie_serdes_iddqdisable(bus
->osh
, bus
->sih
,
1744 (sbpcieregs_t
*) bus
->regs
);
1746 if (dhd_download_fw_on_driverload
) {
1747 /* Disable CLKREQ# */
1748 dhdpcie_clkreq(bus
->osh
, 1, 0);
1751 if (bus
->sih
!= NULL
) {
1752 si_detach(bus
->sih
);
1755 if (bus
->vars
&& bus
->varsz
)
1756 MFREE(osh
, bus
->vars
, bus
->varsz
);
1760 DHD_TRACE(("%s Exit\n", __FUNCTION__
));
1764 dhdpcie_bus_cfg_read_dword(dhd_bus_t
*bus
, uint32 addr
, uint32 size
)
1766 uint32 data
= OSL_PCI_READ_CONFIG(bus
->osh
, addr
, size
);
1770 /** 32 bit config write */
1772 dhdpcie_bus_cfg_write_dword(dhd_bus_t
*bus
, uint32 addr
, uint32 size
, uint32 data
)
1774 OSL_PCI_WRITE_CONFIG(bus
->osh
, addr
, size
, data
);
1778 dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t
*bus
, uint32 data
)
1780 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCI_BAR0_WIN
, 4, data
);
1784 dhdpcie_bus_dongle_setmemsize(struct dhd_bus
*bus
, int mem_size
)
1786 int32 min_size
= DONGLE_MIN_MEMSIZE
;
1787 /* Restrict the memsize to user specified limit */
1788 DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
1789 dhd_dongle_memsize
, min_size
));
1790 if ((dhd_dongle_memsize
> min_size
) &&
1791 (dhd_dongle_memsize
< (int32
)bus
->orig_ramsize
))
1792 bus
->ramsize
= dhd_dongle_memsize
;
1796 dhdpcie_bus_release_malloc(dhd_bus_t
*bus
, osl_t
*osh
)
1798 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
1800 if (bus
->dhd
&& bus
->dhd
->dongle_reset
)
1803 if (bus
->vars
&& bus
->varsz
) {
1804 MFREE(osh
, bus
->vars
, bus
->varsz
);
1808 DHD_TRACE(("%s: Exit\n", __FUNCTION__
));
1813 /** Stop bus module: clear pending frames, disable data flow */
1814 void dhd_bus_stop(struct dhd_bus
*bus
, bool enforce_mutex
)
1816 unsigned long flags
, flags_bus
;
1818 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
1823 if (bus
->dhd
->busstate
== DHD_BUS_DOWN
) {
1824 DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__
));
1828 DHD_DISABLE_RUNTIME_PM(bus
->dhd
);
1830 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
1831 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
1832 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
1834 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1835 atomic_set(&bus
->dhd
->block_bus
, TRUE
);
1836 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1838 DHD_BUS_LOCK(bus
->bus_lock
, flags_bus
);
1839 dhdpcie_bus_intr_disable(bus
);
1840 DHD_BUS_UNLOCK(bus
->bus_lock
, flags_bus
);
1842 if (!bus
->is_linkdown
) {
1844 status
= dhdpcie_bus_cfg_read_dword(bus
, PCIIntstatus
, 4);
1845 dhdpcie_bus_cfg_write_dword(bus
, PCIIntstatus
, 4, status
);
1848 if (!dhd_download_fw_on_driverload
) {
1849 dhd_dpc_kill(bus
->dhd
);
1852 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1853 pm_runtime_disable(dhd_bus_to_dev(bus
));
1854 pm_runtime_set_suspended(dhd_bus_to_dev(bus
));
1855 pm_runtime_enable(dhd_bus_to_dev(bus
));
1856 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1858 /* Clear rx control and wake any waiters */
1859 dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT
);
1860 dhd_wakeup_ioctl_event(bus
->dhd
, IOCTL_RETURN_ON_BUS_STOP
);
1867 * Watchdog timer function.
1868 * @param dhd Represents a specific hardware (dongle) instance that this DHD manages
1870 bool dhd_bus_watchdog(dhd_pub_t
*dhd
)
1872 unsigned long flags
;
1873 dhd_bus_t
*bus
= dhd
->bus
;
1875 DHD_GENERAL_LOCK(dhd
, flags
);
1876 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd
) ||
1877 DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd
)) {
1878 DHD_GENERAL_UNLOCK(dhd
, flags
);
1881 DHD_BUS_BUSY_SET_IN_WD(dhd
);
1882 DHD_GENERAL_UNLOCK(dhd
, flags
);
1884 #ifdef DHD_PCIE_RUNTIMEPM
1885 dhdpcie_runtime_bus_wake(dhd
, TRUE
, __builtin_return_address(0));
1886 #endif /* DHD_PCIE_RUNTIMEPM */
1888 /* Poll for console output periodically */
1889 if (dhd
->busstate
== DHD_BUS_DATA
&&
1890 dhd
->dhd_console_ms
!= 0 &&
1891 bus
->bus_low_power_state
== DHD_BUS_NO_LOW_POWER_STATE
) {
1892 bus
->console
.count
+= dhd_watchdog_ms
;
1893 if (bus
->console
.count
>= dhd
->dhd_console_ms
) {
1894 bus
->console
.count
-= dhd
->dhd_console_ms
;
1896 if (MULTIBP_ENAB(bus
->sih
)) {
1897 dhd_bus_pcie_pwr_req(bus
);
1900 /* Make sure backplane clock is on */
1901 if (dhdpcie_bus_readconsole(bus
) < 0) {
1902 dhd
->dhd_console_ms
= 0; /* On error, stop trying */
1905 if (MULTIBP_ENAB(bus
->sih
)) {
1906 dhd_bus_pcie_pwr_req_clear(bus
);
1911 DHD_GENERAL_LOCK(dhd
, flags
);
1912 DHD_BUS_BUSY_CLEAR_IN_WD(dhd
);
1913 dhd_os_busbusy_wake(dhd
);
1914 DHD_GENERAL_UNLOCK(dhd
, flags
);
1917 } /* dhd_bus_watchdog */
1919 #if defined(SUPPORT_MULTIPLE_REVISION)
1920 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
1921 defined(SUPPORT_BCM4359_MIXED_MODULES)
1922 #define VENDOR_MURATA "murata"
1923 #define VENDOR_WISOL "wisol"
1924 #define VNAME_DELIM "_"
1925 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
1927 #if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
1929 #define MAX_EXTENSION 20
1930 #define MODULE_BCM4361_INDEX 3
1931 #define CHIP_REV_A0 1
1932 #define CHIP_REV_A1 2
1933 #define CHIP_REV_B0 3
1934 #define CHIP_REV_B1 4
1935 #define CHIP_REV_B2 5
1936 #define CHIP_REV_C0 6
1937 #define BOARD_TYPE_EPA 0x080f
1938 #define BOARD_TYPE_IPA 0x0827
1939 #define BOARD_TYPE_IPA_OLD 0x081a
1940 #define DEFAULT_CIDINFO_FOR_EPA "r00a_e000_a0_ePA"
1941 #define DEFAULT_CIDINFO_FOR_IPA "r00a_e000_a0_iPA"
1942 #define DEFAULT_CIDINFO_FOR_A1 "r01a_e30a_a1"
1943 #define DEFAULT_CIDINFO_FOR_B0 "r01i_e32_b0"
1944 #define MAX_VID_LEN 8
1945 #define CIS_TUPLE_HDR_LEN 2
1946 #define CIS_TUPLE_START_ADDRESS 0x18011110
1947 #define CIS_TUPLE_END_ADDRESS 0x18011167
1948 #define CIS_TUPLE_MAX_COUNT (uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\
1949 + 1) / sizeof(uint32))
1950 #define CIS_TUPLE_TAG_START 0x80
1951 #define CIS_TUPLE_TAG_VENDOR 0x81
1952 #define CIS_TUPLE_TAG_BOARDTYPE 0x1b
1953 #define CIS_TUPLE_TAG_LENGTH 1
1954 #define NVRAM_FEM_MURATA "_murata"
1955 #define CID_FEM_MURATA "_mur_"
1957 typedef struct cis_tuple_format
{
1959 uint8 len
; /* total length of tag and data */
1962 } cis_tuple_format_t
;
1965 char cid_ext
[MAX_EXTENSION
];
1966 char nvram_ext
[MAX_EXTENSION
];
1967 char fw_ext
[MAX_EXTENSION
];
1970 naming_info_t bcm4361_naming_table
[] = {
1971 { {""}, {""}, {""} },
1972 { {"r00a_e000_a0_ePA"}, {"_a0_ePA"}, {"_a0_ePA"} },
1973 { {"r00a_e000_a0_iPA"}, {"_a0"}, {"_a1"} },
1974 { {"r01a_e30a_a1"}, {"_r01a_a1"}, {"_a1"} },
1975 { {"r02a_e30a_a1"}, {"_r02a_a1"}, {"_a1"} },
1976 { {"r02c_e30a_a1"}, {"_r02c_a1"}, {"_a1"} },
1977 { {"r01d_e31_b0"}, {"_r01d_b0"}, {"_b0"} },
1978 { {"r01f_e31_b0"}, {"_r01f_b0"}, {"_b0"} },
1979 { {"r02g_e31_b0"}, {"_r02g_b0"}, {"_b0"} },
1980 { {"r01h_e32_b0"}, {"_r01h_b0"}, {"_b0"} },
1981 { {"r01i_e32_b0"}, {"_r01i_b0"}, {"_b0"} },
1982 { {"r02j_e32_b0"}, {"_r02j_b0"}, {"_b0"} },
1983 { {"r012_1kl_a1"}, {"_r012_a1"}, {"_a1"} },
1984 { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
1985 { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
1986 { {"r014_1kl_b0"}, {"_r014_b0"}, {"_b0"} },
1987 { {"r015_1kl_b0"}, {"_r015_b0"}, {"_b0"} },
1988 { {"r020_1kl_b0"}, {"_r020_b0"}, {"_b0"} },
1989 { {"r021_1kl_b0"}, {"_r021_b0"}, {"_b0"} },
1990 { {"r022_1kl_b0"}, {"_r022_b0"}, {"_b0"} },
1991 { {"r023_1kl_b0"}, {"_r023_b0"}, {"_b0"} },
1992 { {"r024_1kl_b0"}, {"_r024_b0"}, {"_b0"} },
1993 { {"r030_1kl_b0"}, {"_r030_b0"}, {"_b0"} },
1994 { {"r031_1kl_b0"}, {"_r030_b0"}, {"_b0"} }, /* exceptional case : r31 -> r30 */
1995 { {"r032_1kl_b0"}, {"_r032_b0"}, {"_b0"} },
1996 { {"r033_1kl_b0"}, {"_r033_b0"}, {"_b0"} },
1997 { {"r034_1kl_b0"}, {"_r034_b0"}, {"_b0"} },
1998 { {"r02a_e32a_b2"}, {"_r02a_b2"}, {"_b2"} },
1999 { {"r02b_e32a_b2"}, {"_r02b_b2"}, {"_b2"} },
2000 { {"r020_1qw_b2"}, {"_r020_b2"}, {"_b2"} },
2001 { {"r021_1qw_b2"}, {"_r021_b2"}, {"_b2"} },
2002 { {"r022_1qw_b2"}, {"_r022_b2"}, {"_b2"} },
2003 { {"r031_1qw_b2"}, {"_r031_b2"}, {"_b2"} },
2004 { {"r032_1qw_b2"}, {"_r032_b2"}, {"_b2"} },
2005 { {"r041_1qw_b2"}, {"_r041_b2"}, {"_b2"} }
2008 static naming_info_t
*
2009 dhd_find_naming_info(naming_info_t table
[], int table_size
, char *module_type
)
2011 int index_found
= 0, i
= 0;
2013 if (module_type
&& strlen(module_type
) > 0) {
2014 for (i
= 1; i
< table_size
; i
++) {
2015 if (!strncmp(table
[i
].cid_ext
, module_type
, strlen(table
[i
].cid_ext
))) {
2022 DHD_INFO(("%s: index_found=%d\n", __FUNCTION__
, index_found
));
2024 return &table
[index_found
];
2027 static naming_info_t
*
2028 dhd_find_naming_info_by_cid(naming_info_t table
[], int table_size
,
2031 int index_found
= 0, i
= 0;
2034 /* truncate extension */
2035 for (i
= 1, ptr
= cid_info
; i
< MODULE_BCM4361_INDEX
&& ptr
; i
++) {
2036 ptr
= bcmstrstr(ptr
, "_");
2042 for (i
= 1; i
< table_size
&& ptr
; i
++) {
2043 if (!strncmp(table
[i
].cid_ext
, ptr
, strlen(table
[i
].cid_ext
))) {
2049 DHD_INFO(("%s: index_found=%d\n", __FUNCTION__
, index_found
));
2051 return &table
[index_found
];
2055 dhd_parse_board_information_bcm4361(dhd_bus_t
*bus
, int *boardtype
,
2056 unsigned char *vid
, int *vid_length
)
2058 int boardtype_backplane_addr
[] = {
2059 0x18010324, /* OTP Control 1 */
2060 0x18012618, /* PMU min resource mask */
2062 int boardtype_backplane_data
[] = {
2064 0x0e4fffff /* Keep on ARMHTAVAIL */
2066 int int_val
= 0, i
= 0;
2067 cis_tuple_format_t
*tuple
;
2069 uint32 raw_data
[CIS_TUPLE_MAX_COUNT
];
2071 for (i
= 0; i
< ARRAYSIZE(boardtype_backplane_addr
); i
++) {
2072 /* Write new OTP and PMU configuration */
2073 if (si_backplane_access(bus
->sih
, boardtype_backplane_addr
[i
], sizeof(int),
2074 &boardtype_backplane_data
[i
], FALSE
) != BCME_OK
) {
2075 DHD_ERROR(("invalid size/addr combination\n"));
2079 if (si_backplane_access(bus
->sih
, boardtype_backplane_addr
[i
], sizeof(int),
2080 &int_val
, TRUE
) != BCME_OK
) {
2081 DHD_ERROR(("invalid size/addr combination\n"));
2085 DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n",
2086 __FUNCTION__
, boardtype_backplane_addr
[i
], int_val
));
2089 /* read tuple raw data */
2090 for (i
= 0; i
< CIS_TUPLE_MAX_COUNT
; i
++) {
2091 if (si_backplane_access(bus
->sih
, CIS_TUPLE_START_ADDRESS
+ i
* sizeof(uint32
),
2092 sizeof(uint32
), &raw_data
[i
], TRUE
) != BCME_OK
) {
2097 totlen
= i
* sizeof(uint32
);
2098 tuple
= (cis_tuple_format_t
*)raw_data
;
2100 /* check the first tuple has tag 'start' */
2101 if (tuple
->id
!= CIS_TUPLE_TAG_START
) {
2105 *vid_length
= *boardtype
= 0;
2107 /* find tagged parameter */
2108 while ((totlen
>= (tuple
->len
+ CIS_TUPLE_HDR_LEN
)) &&
2109 (*vid_length
== 0 || *boardtype
== 0)) {
2112 if ((tuple
->tag
== CIS_TUPLE_TAG_VENDOR
) &&
2113 (totlen
>= (int)(len
+ CIS_TUPLE_HDR_LEN
))) {
2115 memcpy(vid
, tuple
->data
, tuple
->len
- CIS_TUPLE_TAG_LENGTH
);
2116 *vid_length
= tuple
->len
- CIS_TUPLE_TAG_LENGTH
;
2117 prhex("OTP VID", tuple
->data
, tuple
->len
- CIS_TUPLE_TAG_LENGTH
);
2119 else if ((tuple
->tag
== CIS_TUPLE_TAG_BOARDTYPE
) &&
2120 (totlen
>= (int)(len
+ CIS_TUPLE_HDR_LEN
))) {
2121 /* found boardtype */
2122 *boardtype
= (int)tuple
->data
[0];
2123 prhex("OTP boardtype", tuple
->data
, tuple
->len
- CIS_TUPLE_TAG_LENGTH
);
2126 tuple
= (cis_tuple_format_t
*)((uint8
*)tuple
+ (len
+ CIS_TUPLE_HDR_LEN
));
2127 totlen
-= (len
+ CIS_TUPLE_HDR_LEN
);
2130 if (*vid_length
<= 0 || *boardtype
<= 0) {
2131 DHD_ERROR(("failed to parse information (vid=%d, boardtype=%d)\n",
2132 *vid_length
, *boardtype
));
2140 static naming_info_t
*
2141 dhd_find_naming_info_by_chip_rev(naming_info_t table
[], int table_size
,
2142 dhd_bus_t
*bus
, bool *is_murata_fem
)
2144 int board_type
= 0, chip_rev
= 0, vid_length
= 0;
2145 unsigned char vid
[MAX_VID_LEN
];
2146 naming_info_t
*info
= &table
[0];
2147 char *cid_info
= NULL
;
2149 if (!bus
|| !bus
->sih
) {
2150 DHD_ERROR(("%s:bus(%p) or bus->sih is NULL\n", __FUNCTION__
, bus
));
2153 chip_rev
= bus
->sih
->chiprev
;
2155 if (dhd_parse_board_information_bcm4361(bus
, &board_type
, vid
, &vid_length
)
2157 DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__
));
2161 DHD_INFO(("%s:chip version %d\n", __FUNCTION__
, chip_rev
));
2163 /* A0 chipset has exception only */
2164 if (chip_rev
== CHIP_REV_A0
) {
2165 if (board_type
== BOARD_TYPE_EPA
) {
2166 info
= dhd_find_naming_info(table
, table_size
,
2167 DEFAULT_CIDINFO_FOR_EPA
);
2168 } else if ((board_type
== BOARD_TYPE_IPA
) ||
2169 (board_type
== BOARD_TYPE_IPA_OLD
)) {
2170 info
= dhd_find_naming_info(table
, table_size
,
2171 DEFAULT_CIDINFO_FOR_IPA
);
2174 cid_info
= dhd_get_cid_info(vid
, vid_length
);
2176 info
= dhd_find_naming_info_by_cid(table
, table_size
, cid_info
);
2177 if (strstr(cid_info
, CID_FEM_MURATA
)) {
2178 *is_murata_fem
= TRUE
;
2185 #endif /* SUPPORT_BCM4361_MIXED_MODULES && USE_CID_CHECK */
2187 static int concate_revision_bcm4358(dhd_bus_t
*bus
, char *fw_path
, char *nv_path
)
2190 #if defined(SUPPORT_MULTIPLE_CHIPS)
2191 char chipver_tag
[20] = "_4358";
2193 char chipver_tag
[10] = {0, };
2194 #endif /* SUPPORT_MULTIPLE_CHIPS */
2196 chiprev
= dhd_bus_chiprev(bus
);
2198 DHD_ERROR(("----- CHIP 4358 A0 -----\n"));
2199 strcat(chipver_tag
, "_a0");
2200 } else if (chiprev
== 1) {
2201 DHD_ERROR(("----- CHIP 4358 A1 -----\n"));
2202 #if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS)
2203 strcat(chipver_tag
, "_a1");
2204 #endif /* defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) */
2205 } else if (chiprev
== 3) {
2206 DHD_ERROR(("----- CHIP 4358 A3 -----\n"));
2207 #if defined(SUPPORT_MULTIPLE_CHIPS)
2208 strcat(chipver_tag
, "_a3");
2209 #endif /* SUPPORT_MULTIPLE_CHIPS */
2211 DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev
));
2214 strcat(fw_path
, chipver_tag
);
2216 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
2217 if (chiprev
== 1 || chiprev
== 3) {
2218 int ret
= dhd_check_module_b85a();
2219 if ((chiprev
== 1) && (ret
< 0)) {
2220 memset(chipver_tag
, 0x00, sizeof(chipver_tag
));
2221 strcat(chipver_tag
, "_b85");
2222 strcat(chipver_tag
, "_a1");
2226 DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__
, chipver_tag
));
2227 #endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
2229 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
2230 if (system_rev
>= 10) {
2231 DHD_ERROR(("----- Board Rev [%d]-----\n", system_rev
));
2232 strcat(chipver_tag
, "_r10");
2234 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
2235 strcat(nv_path
, chipver_tag
);
2240 static int concate_revision_bcm4359(dhd_bus_t
*bus
, char *fw_path
, char *nv_path
)
2243 char chipver_tag
[10] = {0, };
2244 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2245 defined(SUPPORT_BCM4359_MIXED_MODULES)
2246 char chipver_tag_nv
[20] = {0, };
2247 int module_type
= -1;
2248 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2250 chip_ver
= bus
->sih
->chiprev
;
2251 if (chip_ver
== 4) {
2252 DHD_ERROR(("----- CHIP 4359 B0 -----\n"));
2253 strncat(chipver_tag
, "_b0", strlen("_b0"));
2254 } else if (chip_ver
== 5) {
2255 DHD_ERROR(("----- CHIP 4359 B1 -----\n"));
2256 strncat(chipver_tag
, "_b1", strlen("_b1"));
2257 } else if (chip_ver
== 9) {
2258 DHD_ERROR(("----- CHIP 4359 C0 -----\n"));
2259 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2260 defined(SUPPORT_BCM4359_MIXED_MODULES)
2261 if (dhd_check_module(VENDOR_MURATA
)) {
2262 strncat(chipver_tag_nv
, VNAME_DELIM
, strlen(VNAME_DELIM
));
2263 strncat(chipver_tag_nv
, VENDOR_MURATA
, strlen(VENDOR_MURATA
));
2264 } else if (dhd_check_module(VENDOR_WISOL
)) {
2265 strncat(chipver_tag_nv
, VNAME_DELIM
, strlen(VNAME_DELIM
));
2266 strncat(chipver_tag_nv
, VENDOR_WISOL
, strlen(VENDOR_WISOL
));
2268 /* In case of SEMCO module, extra vendor string doen not need to add */
2269 strncat(chipver_tag_nv
, "_c0", strlen("_c0"));
2270 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2271 strncat(chipver_tag
, "_c0", strlen("_c0"));
2272 #if defined(CONFIG_WLAN_GRACE) || defined(CONFIG_SEC_GRACEQLTE_PROJECT) || \
2273 defined(CONFIG_SEC_LYKANLTE_PROJECT) || defined(CONFIG_SEC_KELLYLTE_PROJECT)
2274 DHD_ERROR(("----- Adding _plus string -----\n"));
2275 strncat(chipver_tag
, "_plus", strlen("_plus"));
2276 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2277 defined(SUPPORT_BCM4359_MIXED_MODULES)
2278 strncat(chipver_tag_nv
, "_plus", strlen("_plus"));
2279 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2280 #endif /* CONFIG_WLAN_GRACE || CONFIG_SEC_GRACEQLTE_PROJECT || CONFIG_SEC_LYKANLTE_PROJECT ||
2281 * CONFIG_SEC_KELLYLTE_PROJECT
2284 DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver
));
2288 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2289 defined(SUPPORT_BCM4359_MIXED_MODULES)
2290 module_type
= dhd_check_module_b90();
2292 switch (module_type
) {
2293 case BCM4359_MODULE_TYPE_B90B
:
2294 strcat(fw_path
, chipver_tag
);
2296 case BCM4359_MODULE_TYPE_B90S
:
2297 strcat(fw_path
, chipver_tag
);
2298 if (!(strstr(nv_path
, VENDOR_MURATA
) || strstr(nv_path
, VENDOR_WISOL
))) {
2299 strcat(nv_path
, chipver_tag_nv
);
2301 strcat(nv_path
, chipver_tag
);
2306 * .cid.info file not exist case,
2307 * loading B90S FW force for initial MFG boot up.
2309 if (chip_ver
== 5) {
2310 strncat(fw_path
, "_b90s", strlen("_b90s"));
2312 strcat(fw_path
, chipver_tag
);
2313 strcat(nv_path
, chipver_tag
);
2316 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2317 strcat(fw_path
, chipver_tag
);
2318 strcat(nv_path
, chipver_tag
);
2319 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2324 concate_revision_bcm4361(dhd_bus_t
*bus
, char *fw_path
, char *nv_path
)
2327 #if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
2328 char module_type
[MAX_VNAME_LEN
];
2329 naming_info_t
*info
= NULL
;
2330 bool is_murata_fem
= FALSE
;
2332 memset(module_type
, 0, sizeof(module_type
));
2334 if (dhd_check_module_bcm4361(module_type
,
2335 MODULE_BCM4361_INDEX
, &is_murata_fem
) == BCME_OK
) {
2336 info
= dhd_find_naming_info(bcm4361_naming_table
,
2337 ARRAYSIZE(bcm4361_naming_table
), module_type
);
2339 /* in case of .cid.info doesn't exists */
2340 info
= dhd_find_naming_info_by_chip_rev(bcm4361_naming_table
,
2341 ARRAYSIZE(bcm4361_naming_table
), bus
, &is_murata_fem
);
2344 if (bcmstrnstr(nv_path
, PATH_MAX
, "_murata", 7)) {
2345 is_murata_fem
= FALSE
;
2349 if (is_murata_fem
) {
2350 strncat(nv_path
, NVRAM_FEM_MURATA
, strlen(NVRAM_FEM_MURATA
));
2352 strncat(nv_path
, info
->nvram_ext
, strlen(info
->nvram_ext
));
2353 strncat(fw_path
, info
->fw_ext
, strlen(info
->fw_ext
));
2355 DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__
));
2358 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2359 char chipver_tag
[10] = {0, };
2361 strcat(fw_path
, chipver_tag
);
2362 strcat(nv_path
, chipver_tag
);
2363 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2369 concate_revision(dhd_bus_t
*bus
, char *fw_path
, char *nv_path
)
2373 if (!bus
|| !bus
->sih
) {
2374 DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__
));
2378 if (!fw_path
|| !nv_path
) {
2379 DHD_ERROR(("fw_path or nv_path is null.\n"));
2383 switch (si_chipid(bus
->sih
)) {
2385 case BCM43569_CHIP_ID
:
2386 case BCM4358_CHIP_ID
:
2387 res
= concate_revision_bcm4358(bus
, fw_path
, nv_path
);
2389 case BCM4355_CHIP_ID
:
2390 case BCM4359_CHIP_ID
:
2391 res
= concate_revision_bcm4359(bus
, fw_path
, nv_path
);
2393 case BCM4361_CHIP_ID
:
2394 case BCM4347_CHIP_ID
:
2395 res
= concate_revision_bcm4361(bus
, fw_path
, nv_path
);
2398 DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
2404 #endif /* SUPPORT_MULTIPLE_REVISION */
2407 dhd_get_chipid(dhd_pub_t
*dhd
)
2409 dhd_bus_t
*bus
= dhd
->bus
;
2411 if (bus
&& bus
->sih
)
2412 return (uint16
)si_chipid(bus
->sih
);
2418 * Loads firmware given by caller supplied path and nvram image into PCIe dongle.
2420 * BCM_REQUEST_FW specific :
2421 * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
2422 * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
2424 * BCMEMBEDIMAGE specific:
2425 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2426 * file will be used instead.
2428 * @return BCME_OK on success
2431 dhd_bus_download_firmware(struct dhd_bus
*bus
, osl_t
*osh
,
2432 char *pfw_path
, char *pnv_path
)
2436 bus
->fw_path
= pfw_path
;
2437 bus
->nv_path
= pnv_path
;
2439 #if defined(SUPPORT_MULTIPLE_REVISION)
2440 if (concate_revision(bus
, bus
->fw_path
, bus
->nv_path
) != 0) {
2441 DHD_ERROR(("%s: fail to concatnate revison \n",
2445 #endif /* SUPPORT_MULTIPLE_REVISION */
2447 #if defined(DHD_BLOB_EXISTENCE_CHECK)
2448 dhd_set_blob_support(bus
->dhd
, bus
->fw_path
);
2449 #endif /* DHD_BLOB_EXISTENCE_CHECK */
2451 DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
2452 __FUNCTION__
, bus
->fw_path
, bus
->nv_path
));
2453 dhdpcie_dump_resource(bus
);
2455 ret
= dhdpcie_download_firmware(bus
, osh
);
2461 * Loads firmware given by 'bus->fw_path' into PCIe dongle.
2463 * BCM_REQUEST_FW specific :
2464 * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
2465 * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
2467 * BCMEMBEDIMAGE specific:
2468 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2469 * file will be used instead.
2471 * @return BCME_OK on success
2474 dhdpcie_download_firmware(struct dhd_bus
*bus
, osl_t
*osh
)
2477 #if defined(BCM_REQUEST_FW)
2478 uint chipid
= bus
->sih
->chip
;
2479 uint revid
= bus
->sih
->chiprev
;
2480 char fw_path
[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */
2481 char nv_path
[64]; /* path to nvram vars file */
2482 bus
->fw_path
= fw_path
;
2483 bus
->nv_path
= nv_path
;
2485 case BCM43570_CHIP_ID
:
2486 bcmstrncat(fw_path
, "43570", 5);
2489 bcmstrncat(fw_path
, "a0", 2);
2492 bcmstrncat(fw_path
, "a2", 2);
2495 DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__
,
2501 DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__
,
2505 /* load board specific nvram file */
2506 snprintf(bus
->nv_path
, sizeof(nv_path
), "%s.nvm", fw_path
);
2508 snprintf(bus
->fw_path
, sizeof(fw_path
), "%s-firmware.bin", fw_path
);
2509 #endif /* BCM_REQUEST_FW */
2511 DHD_OS_WAKE_LOCK(bus
->dhd
);
2512 ret
= _dhdpcie_download_firmware(bus
);
2514 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
2516 } /* dhdpcie_download_firmware */
2518 #define DHD_MEMORY_SET_PATTERN 0xAA
2521 * Downloads a file containing firmware into dongle memory. In case of a .bea file, the DHD
2522 * is updated with the event logging partitions within that file as well.
2524 * @param pfw_path Path to .bin or .bea file
2527 dhdpcie_download_code_file(struct dhd_bus
*bus
, char *pfw_path
)
2529 int bcmerror
= BCME_ERROR
;
2531 #if defined(DHD_FW_MEM_CORRUPTION)
2532 uint8
*p_org_fw
= NULL
;
2533 uint32 org_fw_size
= 0;
2534 uint32 fw_write_offset
= 0;
2535 #endif /* DHD_FW_MEM_CORRUPTION */
2538 char *imgbuf
= NULL
;
2539 uint8
*memblock
= NULL
, *memptr
;
2540 int offset_end
= bus
->ramsize
;
2542 DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__
, pfw_path
));
2544 /* Should succeed in opening image if it is actually given through registry
2545 * entry or in module param.
2547 imgbuf
= dhd_os_open_image1(bus
->dhd
, pfw_path
);
2548 if (imgbuf
== NULL
) {
2552 memptr
= memblock
= MALLOC(bus
->dhd
->osh
, MEMBLOCK
+ DHD_SDALIGN
);
2553 if (memblock
== NULL
) {
2554 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__
, MEMBLOCK
));
2555 bcmerror
= BCME_NOMEM
;
2558 if ((uint32
)(uintptr
)memblock
% DHD_SDALIGN
) {
2559 memptr
+= (DHD_SDALIGN
- ((uint32
)(uintptr
)memblock
% DHD_SDALIGN
));
2562 #if defined(DHD_FW_MEM_CORRUPTION)
2563 if (dhd_bus_get_fw_mode(bus
->dhd
) == DHD_FLAG_MFG_MODE
) {
2564 org_fw_size
= dhd_os_get_image_size(imgbuf
);
2565 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
2566 p_org_fw
= (uint8
*)DHD_OS_PREALLOC(bus
->dhd
,
2567 DHD_PREALLOC_MEMDUMP_RAM
, org_fw_size
);
2569 p_org_fw
= (uint8
*)VMALLOC(bus
->dhd
->osh
, org_fw_size
);
2570 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
2571 if (p_org_fw
== NULL
) {
2572 DHD_ERROR(("%s: Failed to allocate memory %d bytes for download check\n",
2573 __FUNCTION__
, org_fw_size
));
2574 bcmerror
= BCME_NOMEM
;
2577 memset(p_org_fw
, 0, org_fw_size
);
2580 #endif /* DHD_FW_MEM_CORRUPTION */
2582 /* check if CR4/CA7 */
2583 store_reset
= (si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0) ||
2584 si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0));
2585 /* Download image with MEMBLOCK size */
2586 while ((len
= dhd_os_get_image_block((char*)memptr
, MEMBLOCK
, imgbuf
))) {
2588 DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__
, len
));
2589 bcmerror
= BCME_ERROR
;
2592 /* if address is 0, store the reset instruction to be written in 0 */
2594 ASSERT(offset
== 0);
2595 bus
->resetinstr
= *(((uint32
*)memptr
));
2596 /* Add start of RAM address to the address given by user */
2597 offset
+= bus
->dongle_ram_base
;
2598 offset_end
+= offset
;
2599 store_reset
= FALSE
;
2602 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
, offset
, (uint8
*)memptr
, len
);
2604 DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
2605 __FUNCTION__
, bcmerror
, MEMBLOCK
, offset
));
2609 #if defined(DHD_FW_MEM_CORRUPTION)
2610 if (dhd_bus_get_fw_mode(bus
->dhd
) == DHD_FLAG_MFG_MODE
) {
2611 memcpy((p_org_fw
+ fw_write_offset
), memptr
, len
);
2612 fw_write_offset
+= len
;
2614 #endif /* DHD_FW_MEM_CORRUPTION */
2616 if (offset
>= offset_end
) {
2617 DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
2618 __FUNCTION__
, offset
, offset_end
));
2619 bcmerror
= BCME_ERROR
;
2623 #ifdef DHD_FW_MEM_CORRUPTION
2624 /* Read and compare the downloaded code */
2625 if (dhd_bus_get_fw_mode(bus
->dhd
) == DHD_FLAG_MFG_MODE
) {
2626 unsigned char *p_readback_buf
= NULL
;
2627 uint32 compared_len
;
2628 uint32 remaining_len
= 0;
2631 p_readback_buf
= MALLOC(bus
->dhd
->osh
, MEMBLOCK
);
2632 if (p_readback_buf
== NULL
) {
2633 DHD_ERROR(("%s: Failed to allocate memory %d bytes for readback buffer\n",
2634 __FUNCTION__
, MEMBLOCK
));
2635 bcmerror
= BCME_NOMEM
;
2638 /* Read image to verify downloaded contents. */
2639 offset
= bus
->dongle_ram_base
;
2641 while (compared_len
< org_fw_size
) {
2642 memset(p_readback_buf
, DHD_MEMORY_SET_PATTERN
, MEMBLOCK
);
2643 remaining_len
= org_fw_size
- compared_len
;
2645 if (remaining_len
>= MEMBLOCK
) {
2648 len
= remaining_len
;
2650 bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
, offset
,
2651 (uint8
*)p_readback_buf
, len
);
2653 DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
2654 __FUNCTION__
, bcmerror
, MEMBLOCK
, offset
));
2658 if (memcmp((p_org_fw
+ compared_len
), p_readback_buf
, len
) != 0) {
2659 DHD_ERROR(("%s: Downloaded image is corrupted. offset %d\n",
2660 __FUNCTION__
, compared_len
));
2661 bcmerror
= BCME_ERROR
;
2665 compared_len
+= len
;
2668 DHD_ERROR(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__
));
2671 if (p_readback_buf
) {
2672 MFREE(bus
->dhd
->osh
, p_readback_buf
, MEMBLOCK
);
2675 #endif /* DHD_FW_MEM_CORRUPTION */
2678 #if defined(DHD_FW_MEM_CORRUPTION)
2680 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
2681 DHD_OS_PREFREE(bus
->dhd
, p_org_fw
, org_fw_size
);
2683 VMFREE(bus
->dhd
->osh
, p_org_fw
, org_fw_size
);
2686 #endif /* DHD_FW_MEM_CORRUPTION */
2688 MFREE(bus
->dhd
->osh
, memblock
, MEMBLOCK
+ DHD_SDALIGN
);
2692 dhd_os_close_image1(bus
->dhd
, imgbuf
);
2696 } /* dhdpcie_download_code_file */
2698 #ifdef CUSTOMER_HW4_DEBUG
2699 #define MIN_NVRAMVARS_SIZE 128
2700 #endif /* CUSTOMER_HW4_DEBUG */
2703 dhdpcie_download_nvram(struct dhd_bus
*bus
)
2705 int bcmerror
= BCME_ERROR
;
2707 char * memblock
= NULL
;
2710 bool nvram_file_exists
;
2711 bool nvram_uefi_exists
= FALSE
;
2712 bool local_alloc
= FALSE
;
2713 pnv_path
= bus
->nv_path
;
2715 nvram_file_exists
= ((pnv_path
!= NULL
) && (pnv_path
[0] != '\0'));
2717 /* First try UEFI */
2718 len
= MAX_NVRAMBUF_SIZE
;
2719 dhd_get_download_buffer(bus
->dhd
, NULL
, NVRAM
, &memblock
, (int *)&len
);
2721 /* If UEFI empty, then read from file system */
2722 if ((len
<= 0) || (memblock
== NULL
)) {
2724 if (nvram_file_exists
) {
2725 len
= MAX_NVRAMBUF_SIZE
;
2726 dhd_get_download_buffer(bus
->dhd
, pnv_path
, NVRAM
, &memblock
, (int *)&len
);
2727 if ((len
<= 0 || len
> MAX_NVRAMBUF_SIZE
)) {
2732 /* For SROM OTP no external file or UEFI required */
2736 nvram_uefi_exists
= TRUE
;
2739 DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__
, len
));
2741 if (len
> 0 && len
<= MAX_NVRAMBUF_SIZE
&& memblock
!= NULL
) {
2742 bufp
= (char *) memblock
;
2746 if (nvram_uefi_exists
|| nvram_file_exists
) {
2747 len
= process_nvram_vars(bufp
, len
);
2751 DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__
, len
));
2752 #ifdef CUSTOMER_HW4_DEBUG
2753 if (len
< MIN_NVRAMVARS_SIZE
) {
2754 DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
2756 bcmerror
= BCME_ERROR
;
2759 #endif /* CUSTOMER_HW4_DEBUG */
2762 len
+= 4 - (len
% 4);
2767 bcmerror
= dhdpcie_downloadvars(bus
, memblock
, len
+ 1);
2769 DHD_ERROR(("%s: error downloading vars: %d\n",
2770 __FUNCTION__
, bcmerror
));
2777 MFREE(bus
->dhd
->osh
, memblock
, MAX_NVRAMBUF_SIZE
);
2779 dhd_free_download_buffer(bus
->dhd
, memblock
, MAX_NVRAMBUF_SIZE
);
2787 dhdpcie_ramsize_read_image(struct dhd_bus
*bus
, char *buf
, int len
)
2789 int bcmerror
= BCME_ERROR
;
2790 char *imgbuf
= NULL
;
2792 if (buf
== NULL
|| len
== 0)
2795 /* External image takes precedence if specified */
2796 if ((bus
->fw_path
!= NULL
) && (bus
->fw_path
[0] != '\0')) {
2797 // opens and seeks to correct file offset:
2798 imgbuf
= dhd_os_open_image1(bus
->dhd
, bus
->fw_path
);
2799 if (imgbuf
== NULL
) {
2800 DHD_ERROR(("%s: Failed to open firmware file\n", __FUNCTION__
));
2805 if (len
!= dhd_os_get_image_block(buf
, len
, imgbuf
)) {
2806 DHD_ERROR(("%s: Failed to read %d bytes data\n", __FUNCTION__
, len
));
2815 dhd_os_close_image1(bus
->dhd
, imgbuf
);
2820 /* The ramsize can be changed in the dongle image, for example 4365 chip share the sysmem
2821 * with BMC and we can adjust how many sysmem belong to CA7 during dongle compilation.
2822 * So in DHD we need to detect this case and update the correct dongle RAMSIZE as well.
2825 dhdpcie_ramsize_adj(struct dhd_bus
*bus
)
2827 int i
, search_len
= 0;
2828 uint8
*memptr
= NULL
;
2829 uint8
*ramsizeptr
= NULL
;
2831 uint32 ramsize_ptr_ptr
[] = {RAMSIZE_PTR_PTR_LIST
};
2832 hnd_ramsize_ptr_t ramsize_info
;
2834 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
2836 /* Adjust dongle RAMSIZE already called. */
2837 if (bus
->ramsize_adjusted
) {
2841 /* success or failure, we don't want to be here
2844 bus
->ramsize_adjusted
= TRUE
;
2846 /* Not handle if user restrict dongle ram size enabled */
2847 if (dhd_dongle_memsize
) {
2848 DHD_ERROR(("%s: user restrict dongle ram size to %d.\n", __FUNCTION__
,
2849 dhd_dongle_memsize
));
2853 /* Out immediately if no image to download */
2854 if ((bus
->fw_path
== NULL
) || (bus
->fw_path
[0] == '\0')) {
2855 DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__
));
2859 /* Get maximum RAMSIZE info search length */
2860 for (i
= 0; ; i
++) {
2861 if (ramsize_ptr_ptr
[i
] == RAMSIZE_PTR_PTR_END
)
2864 if (search_len
< (int)ramsize_ptr_ptr
[i
])
2865 search_len
= (int)ramsize_ptr_ptr
[i
];
2871 search_len
+= sizeof(hnd_ramsize_ptr_t
);
2873 memptr
= MALLOC(bus
->dhd
->osh
, search_len
);
2874 if (memptr
== NULL
) {
2875 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__
, search_len
));
2879 /* External image takes precedence if specified */
2880 if (dhdpcie_ramsize_read_image(bus
, (char *)memptr
, search_len
) != BCME_OK
) {
2884 ramsizeptr
= memptr
;
2885 ramsizelen
= search_len
;
2890 for (i
= 0; ; i
++) {
2891 if (ramsize_ptr_ptr
[i
] == RAMSIZE_PTR_PTR_END
)
2894 if (ramsize_ptr_ptr
[i
] + sizeof(hnd_ramsize_ptr_t
) > ramsizelen
)
2897 memcpy((char *)&ramsize_info
, ramsizeptr
+ ramsize_ptr_ptr
[i
],
2898 sizeof(hnd_ramsize_ptr_t
));
2900 if (ramsize_info
.magic
== HTOL32(HND_RAMSIZE_PTR_MAGIC
)) {
2901 bus
->orig_ramsize
= LTOH32(ramsize_info
.ram_size
);
2902 bus
->ramsize
= LTOH32(ramsize_info
.ram_size
);
2903 DHD_ERROR(("%s: Adjust dongle RAMSIZE to 0x%x\n", __FUNCTION__
,
2912 MFREE(bus
->dhd
->osh
, memptr
, search_len
);
2915 } /* dhdpcie_ramsize_adj */
2918 * Downloads firmware file given by 'bus->fw_path' into PCIe dongle
2920 * BCMEMBEDIMAGE specific:
2921 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2922 * file will be used instead.
2926 _dhdpcie_download_firmware(struct dhd_bus
*bus
)
2930 bool embed
= FALSE
; /* download embedded firmware */
2931 bool dlok
= FALSE
; /* download firmware succeeded */
2933 /* Out immediately if no image to download */
2934 if ((bus
->fw_path
== NULL
) || (bus
->fw_path
[0] == '\0')) {
2935 DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__
));
2938 /* Adjust ram size */
2939 dhdpcie_ramsize_adj(bus
);
2941 /* Keep arm in reset */
2942 if (dhdpcie_bus_download_state(bus
, TRUE
)) {
2943 DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__
));
2947 /* External image takes precedence if specified */
2948 if ((bus
->fw_path
!= NULL
) && (bus
->fw_path
[0] != '\0')) {
2949 if (dhdpcie_download_code_file(bus
, bus
->fw_path
)) {
2950 DHD_ERROR(("%s:%d dongle image file download failed\n", __FUNCTION__
,
2959 BCM_REFERENCE(embed
);
2961 DHD_ERROR(("%s:%d dongle image download failed\n", __FUNCTION__
, __LINE__
));
2965 /* EXAMPLE: nvram_array */
2966 /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
2967 /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
2969 /* External nvram takes precedence if specified */
2970 if (dhdpcie_download_nvram(bus
)) {
2971 DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__
, __LINE__
));
2975 /* Take arm out of reset */
2976 if (dhdpcie_bus_download_state(bus
, FALSE
)) {
2977 DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__
));
2985 } /* _dhdpcie_download_firmware */
2988 dhdpcie_bus_readconsole(dhd_bus_t
*bus
)
2990 dhd_console_t
*c
= &bus
->console
;
2991 uint8 line
[CONSOLE_LINE_MAX
], ch
;
2992 uint32 n
, idx
, addr
;
2997 /* Don't do anything until FWREADY updates console address */
2998 if (bus
->console_addr
== 0)
3001 /* Read console log struct */
3002 addr
= bus
->console_addr
+ OFFSETOF(hnd_cons_t
, log
);
3004 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
, (uint8
*)&c
->log
, sizeof(c
->log
))) < 0)
3007 /* Allocate console buffer (one time only) */
3008 if (c
->buf
== NULL
) {
3009 c
->bufsize
= ltoh32(c
->log
.buf_size
);
3010 if ((c
->buf
= MALLOC(bus
->dhd
->osh
, c
->bufsize
)) == NULL
)
3012 DHD_INFO(("conlog: bufsize=0x%x\n", c
->bufsize
));
3014 idx
= ltoh32(c
->log
.idx
);
3016 /* Protect against corrupt value */
3017 if (idx
> c
->bufsize
)
3020 /* Skip reading the console buffer if the index pointer has not moved */
3024 DHD_INFO(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c
->log
.buf
,
3027 /* Read the console buffer data to a local buffer */
3028 /* optimize and read only the portion of the buffer needed, but
3029 * important to handle wrap-around.
3031 addr
= ltoh32(c
->log
.buf
);
3033 /* wrap around case - write ptr < read ptr */
3034 if (idx
< c
->last
) {
3035 /* from read ptr to end of buffer */
3036 readlen
= c
->bufsize
- c
->last
;
3037 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
,
3038 addr
+ c
->last
, c
->buf
, readlen
)) < 0) {
3039 DHD_ERROR(("conlog: read error[1] ! \n"));
3042 /* from beginning of buffer to write ptr */
3043 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
,
3044 addr
, c
->buf
+ readlen
,
3046 DHD_ERROR(("conlog: read error[2] ! \n"));
3051 /* non-wraparound case, write ptr > read ptr */
3052 readlen
= (uint
)idx
- c
->last
;
3053 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
,
3054 addr
+ c
->last
, c
->buf
, readlen
)) < 0) {
3055 DHD_ERROR(("conlog: read error[3] ! \n"));
3059 /* update read ptr */
3062 /* now output the read data from the local buffer to the host console */
3063 while (i
< readlen
) {
3064 for (n
= 0; n
< CONSOLE_LINE_MAX
- 2 && i
< readlen
; n
++) {
3073 if (line
[n
- 1] == '\r')
3076 DHD_FWLOG(("CONSOLE: %s\n", line
));
3082 } /* dhdpcie_bus_readconsole */
3085 dhd_bus_dump_console_buffer(dhd_bus_t
*bus
)
3089 char *console_buffer
= NULL
;
3090 uint32 console_ptr
, console_size
, console_index
;
3091 uint8 line
[CONSOLE_LINE_MAX
], ch
;
3094 DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__
));
3096 if (bus
->is_linkdown
) {
3097 DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__
));
3101 addr
= bus
->pcie_sh
->console_addr
+ OFFSETOF(hnd_cons_t
, log
);
3102 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
,
3103 (uint8
*)&console_ptr
, sizeof(console_ptr
))) < 0) {
3107 addr
= bus
->pcie_sh
->console_addr
+ OFFSETOF(hnd_cons_t
, log
.buf_size
);
3108 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
,
3109 (uint8
*)&console_size
, sizeof(console_size
))) < 0) {
3113 addr
= bus
->pcie_sh
->console_addr
+ OFFSETOF(hnd_cons_t
, log
.idx
);
3114 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
,
3115 (uint8
*)&console_index
, sizeof(console_index
))) < 0) {
3119 console_ptr
= ltoh32(console_ptr
);
3120 console_size
= ltoh32(console_size
);
3121 console_index
= ltoh32(console_index
);
3123 if (console_size
> CONSOLE_BUFFER_MAX
||
3124 !(console_buffer
= MALLOC(bus
->dhd
->osh
, console_size
))) {
3128 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, console_ptr
,
3129 (uint8
*)console_buffer
, console_size
)) < 0) {
3133 for (i
= 0, n
= 0; i
< console_size
; i
+= n
+ 1) {
3134 for (n
= 0; n
< CONSOLE_LINE_MAX
- 2; n
++) {
3135 ch
= console_buffer
[(console_index
+ i
+ n
) % console_size
];
3142 if (line
[n
- 1] == '\r')
3145 /* Don't use DHD_ERROR macro since we print
3146 * a lot of information quickly. The macro
3147 * will truncate a lot of the printfs
3150 DHD_FWLOG(("CONSOLE: %s\n", line
));
3156 MFREE(bus
->dhd
->osh
, console_buffer
, console_size
);
3161 * Opens the file given by bus->fw_path, reads part of the file into a buffer and closes the file.
3163 * @return BCME_OK on success
3166 dhdpcie_checkdied(dhd_bus_t
*bus
, char *data
, uint size
)
3170 char *mbuffer
= NULL
;
3171 uint maxstrlen
= 256;
3173 pciedev_shared_t
*local_pciedev_shared
= bus
->pcie_sh
;
3174 struct bcmstrbuf strbuf
;
3175 unsigned long flags
;
3176 bool dongle_trap_occured
= FALSE
;
3178 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
3180 if (DHD_NOCHECKDIED_ON()) {
3186 * Called after a rx ctrl timeout. "data" is NULL.
3187 * allocate memory to trace the trap or assert.
3190 mbuffer
= data
= MALLOC(bus
->dhd
->osh
, msize
);
3192 if (mbuffer
== NULL
) {
3193 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__
, msize
));
3194 bcmerror
= BCME_NOMEM
;
3199 if ((str
= MALLOC(bus
->dhd
->osh
, maxstrlen
)) == NULL
) {
3200 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__
, maxstrlen
));
3201 bcmerror
= BCME_NOMEM
;
3204 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
3205 DHD_BUS_BUSY_SET_IN_CHECKDIED(bus
->dhd
);
3206 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
3208 if (MULTIBP_ENAB(bus
->sih
)) {
3209 dhd_bus_pcie_pwr_req(bus
);
3211 if ((bcmerror
= dhdpcie_readshared(bus
)) < 0) {
3215 bcm_binit(&strbuf
, data
, size
);
3217 bcm_bprintf(&strbuf
, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
3218 local_pciedev_shared
->msgtrace_addr
, local_pciedev_shared
->console_addr
);
3220 if ((local_pciedev_shared
->flags
& PCIE_SHARED_ASSERT_BUILT
) == 0) {
3221 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
3222 * (Avoids conflict with real asserts for programmatic parsing of output.)
3224 bcm_bprintf(&strbuf
, "Assrt not built in dongle\n");
3227 if ((bus
->pcie_sh
->flags
& (PCIE_SHARED_ASSERT
|PCIE_SHARED_TRAP
)) == 0) {
3228 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
3229 * (Avoids conflict with real asserts for programmatic parsing of output.)
3231 bcm_bprintf(&strbuf
, "No trap%s in dongle",
3232 (bus
->pcie_sh
->flags
& PCIE_SHARED_ASSERT_BUILT
)
3235 if (bus
->pcie_sh
->flags
& PCIE_SHARED_ASSERT
) {
3236 /* Download assert */
3237 bcm_bprintf(&strbuf
, "Dongle assert");
3238 if (bus
->pcie_sh
->assert_exp_addr
!= 0) {
3240 if ((bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
,
3241 bus
->pcie_sh
->assert_exp_addr
,
3242 (uint8
*)str
, maxstrlen
)) < 0) {
3246 str
[maxstrlen
- 1] = '\0';
3247 bcm_bprintf(&strbuf
, " expr \"%s\"", str
);
3250 if (bus
->pcie_sh
->assert_file_addr
!= 0) {
3252 if ((bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
,
3253 bus
->pcie_sh
->assert_file_addr
,
3254 (uint8
*)str
, maxstrlen
)) < 0) {
3258 str
[maxstrlen
- 1] = '\0';
3259 bcm_bprintf(&strbuf
, " file \"%s\"", str
);
3262 bcm_bprintf(&strbuf
, " line %d ", bus
->pcie_sh
->assert_line
);
3265 if (bus
->pcie_sh
->flags
& PCIE_SHARED_TRAP
) {
3266 trap_t
*tr
= &bus
->dhd
->last_trap_info
;
3267 dongle_trap_occured
= TRUE
;
3268 if ((bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
,
3269 bus
->pcie_sh
->trap_addr
, (uint8
*)tr
, sizeof(trap_t
))) < 0) {
3270 bus
->dhd
->dongle_trap_occured
= TRUE
;
3273 dhd_bus_dump_trap_info(bus
, &strbuf
);
3277 if (bus
->pcie_sh
->flags
& (PCIE_SHARED_ASSERT
| PCIE_SHARED_TRAP
)) {
3278 DHD_FWLOG(("%s: %s\n", __FUNCTION__
, strbuf
.origbuf
));
3280 /* wake up IOCTL wait event */
3281 dhd_wakeup_ioctl_event(bus
->dhd
, IOCTL_RETURN_ON_TRAP
);
3283 dhd_bus_dump_console_buffer(bus
);
3284 dhd_prot_debug_info_print(bus
->dhd
);
3286 #if defined(DHD_FW_COREDUMP)
3287 /* save core dump or write to a file */
3288 if (bus
->dhd
->memdump_enabled
) {
3289 #ifdef DHD_SSSR_DUMP
3290 if (bus
->dhd
->sssr_inited
) {
3291 dhdpcie_sssr_dump(bus
->dhd
);
3293 #endif /* DHD_SSSR_DUMP */
3294 bus
->dhd
->memdump_type
= DUMP_TYPE_DONGLE_TRAP
;
3295 dhdpcie_mem_dump(bus
);
3297 #endif /* DHD_FW_COREDUMP */
3299 /* set the trap occured flag only after all the memdump,
3300 * logdump and sssr dump collection has been scheduled
3302 if (dongle_trap_occured
) {
3303 bus
->dhd
->dongle_trap_occured
= TRUE
;
3306 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
3307 copy_hang_info_trap(bus
->dhd
);
3308 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
3309 dhd_schedule_reset(bus
->dhd
);
3313 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
3314 DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus
->dhd
);
3315 dhd_os_busbusy_wake(bus
->dhd
);
3316 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
3319 if (MULTIBP_ENAB(bus
->sih
)) {
3320 dhd_bus_pcie_pwr_req_clear(bus
);
3323 MFREE(bus
->dhd
->osh
, mbuffer
, msize
);
3325 MFREE(bus
->dhd
->osh
, str
, maxstrlen
);
3328 } /* dhdpcie_checkdied */
3330 /* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
3331 void dhdpcie_mem_dump_bugcheck(dhd_bus_t
*bus
, uint8
*buf
)
3334 int size
; /* Full mem size */
3335 int start
; /* Start address */
3336 int read_size
= 0; /* Read size of each iteration */
3337 uint8
*databuf
= buf
;
3343 start
= bus
->dongle_ram_base
;
3345 /* check for dead bus */
3348 ret
= dhdpcie_bus_membytes(bus
, FALSE
, start
, (uint8
*)&test_word
, read_size
);
3349 /* if read error or bus timeout */
3350 if (ret
|| (test_word
== 0xFFFFFFFF)) {
3355 /* Get full mem size */
3356 size
= bus
->ramsize
;
3357 /* Read mem content */
3360 read_size
= MIN(MEMBLOCK
, size
);
3361 if ((ret
= dhdpcie_bus_membytes(bus
, FALSE
, start
, databuf
, read_size
))) {
3365 /* Decrement size and increment start address */
3368 databuf
+= read_size
;
3370 bus
->dhd
->soc_ram
= buf
;
3371 bus
->dhd
->soc_ram_length
= bus
->ramsize
;
3375 #if defined(DHD_FW_COREDUMP)
3377 dhdpcie_mem_dump(dhd_bus_t
*bus
)
3380 int size
; /* Full mem size */
3381 int start
= bus
->dongle_ram_base
; /* Start address */
3382 int read_size
= 0; /* Read size of each iteration */
3383 uint8
*buf
= NULL
, *databuf
= NULL
;
3385 #ifdef EXYNOS_PCIE_DEBUG
3386 exynos_pcie_register_dump(1);
3387 #endif /* EXYNOS_PCIE_DEBUG */
3389 #ifdef SUPPORT_LINKDOWN_RECOVERY
3390 if (bus
->is_linkdown
) {
3391 DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__
));
3394 #endif /* SUPPORT_LINKDOWN_RECOVERY */
3396 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3397 if (pm_runtime_get_sync(dhd_bus_to_dev(bus
)) < 0)
3399 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3401 /* Get full mem size */
3402 size
= bus
->ramsize
;
3403 buf
= dhd_get_fwdump_buf(bus
->dhd
, size
);
3405 DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__
, size
));
3409 /* Read mem content */
3410 DHD_TRACE_HW4(("Dump dongle memory\n"));
3414 read_size
= MIN(MEMBLOCK
, size
);
3415 if ((ret
= dhdpcie_bus_membytes(bus
, FALSE
, start
, databuf
, read_size
)))
3417 DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__
, ret
));
3418 #ifdef DHD_DEBUG_UART
3419 bus
->dhd
->memdump_success
= FALSE
;
3420 #endif /* DHD_DEBUG_UART */
3425 /* Decrement size and increment start address */
3428 databuf
+= read_size
;
3430 #ifdef DHD_DEBUG_UART
3431 bus
->dhd
->memdump_success
= TRUE
;
3432 #endif /* DHD_DEBUG_UART */
3434 dhd_schedule_memdump(bus
->dhd
, buf
, bus
->ramsize
);
3435 /* buf, actually soc_ram free handled in dhd_{free,clear} */
3437 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3438 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus
));
3439 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus
));
3440 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3446 dhd_bus_mem_dump(dhd_pub_t
*dhdp
)
3448 dhd_bus_t
*bus
= dhdp
->bus
;
3449 int ret
= BCME_ERROR
;
3451 if (dhdp
->busstate
== DHD_BUS_DOWN
) {
3452 DHD_ERROR(("%s bus is down\n", __FUNCTION__
));
3456 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp
)) {
3457 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
3458 __FUNCTION__
, dhdp
->busstate
, dhdp
->dhd_bus_busy_state
));
3462 DHD_OS_WAKE_LOCK(dhdp
);
3463 ret
= dhdpcie_mem_dump(bus
);
3464 DHD_OS_WAKE_UNLOCK(dhdp
);
3467 #endif /* DHD_FW_COREDUMP */
3470 dhd_socram_dump(dhd_bus_t
*bus
)
3472 #if defined(DHD_FW_COREDUMP)
3473 DHD_OS_WAKE_LOCK(bus
->dhd
);
3474 dhd_bus_mem_dump(bus
->dhd
);
3475 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
3483 * Transfers bytes from host to dongle using pio mode.
3484 * Parameter 'address' is a backplane address.
3487 dhdpcie_bus_membytes(dhd_bus_t
*bus
, bool write
, ulong address
, uint8
*data
, uint size
)
3490 int detect_endian_flag
= 0x01;
3493 if (write
&& bus
->is_linkdown
) {
3494 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
3498 if (MULTIBP_ENAB(bus
->sih
)) {
3499 dhd_bus_pcie_pwr_req(bus
);
3501 /* Detect endianness. */
3502 little_endian
= *(char *)&detect_endian_flag
;
3504 /* In remap mode, adjust address beyond socram and redirect
3505 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
3506 * is not backplane accessible
3509 /* Determine initial transfer parameters */
3510 #ifdef DHD_SUPPORT_64BIT
3511 dsize
= sizeof(uint64
);
3512 #else /* !DHD_SUPPORT_64BIT */
3513 dsize
= sizeof(uint32
);
3514 #endif /* DHD_SUPPORT_64BIT */
3516 /* Do the transfer(s) */
3519 #ifdef DHD_SUPPORT_64BIT
3520 if (size
>= sizeof(uint64
) && little_endian
&& !(address
% 8)) {
3521 dhdpcie_bus_wtcm64(bus
, address
, *((uint64
*)data
));
3523 #else /* !DHD_SUPPORT_64BIT */
3524 if (size
>= sizeof(uint32
) && little_endian
&& !(address
% 4)) {
3525 dhdpcie_bus_wtcm32(bus
, address
, *((uint32
*)data
));
3527 #endif /* DHD_SUPPORT_64BIT */
3529 dsize
= sizeof(uint8
);
3530 dhdpcie_bus_wtcm8(bus
, address
, *data
);
3533 /* Adjust for next transfer (if any) */
3534 if ((size
-= dsize
)) {
3541 #ifdef DHD_SUPPORT_64BIT
3542 if (size
>= sizeof(uint64
) && little_endian
&& !(address
% 8))
3544 *(uint64
*)data
= dhdpcie_bus_rtcm64(bus
, address
);
3546 #else /* !DHD_SUPPORT_64BIT */
3547 if (size
>= sizeof(uint32
) && little_endian
&& !(address
% 4))
3549 *(uint32
*)data
= dhdpcie_bus_rtcm32(bus
, address
);
3551 #endif /* DHD_SUPPORT_64BIT */
3553 dsize
= sizeof(uint8
);
3554 *data
= dhdpcie_bus_rtcm8(bus
, address
);
3557 /* Adjust for next transfer (if any) */
3558 if ((size
-= dsize
) > 0) {
3564 if (MULTIBP_ENAB(bus
->sih
)) {
3565 dhd_bus_pcie_pwr_req_clear(bus
);
3568 } /* dhdpcie_bus_membytes */
3571 * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue
3572 * to the (non flow controlled) flow ring.
3575 dhd_bus_schedule_queue(struct dhd_bus
*bus
, uint16 flow_id
, bool txs
)
3577 flow_ring_node_t
*flow_ring_node
;
3579 #ifdef DHD_LOSSLESS_ROAMING
3580 dhd_pub_t
*dhdp
= bus
->dhd
;
3582 DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__
, flow_id
));
3584 /* ASSERT on flow_id */
3585 if (flow_id
>= bus
->max_submission_rings
) {
3586 DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__
,
3587 flow_id
, bus
->max_submission_rings
));
3591 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flow_id
);
3593 if (flow_ring_node
->prot_info
== NULL
) {
3594 DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__
));
3595 return BCME_NOTREADY
;
3598 #ifdef DHD_LOSSLESS_ROAMING
3599 if ((dhdp
->dequeue_prec_map
& (1 << flow_ring_node
->flow_info
.tid
)) == 0) {
3600 DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
3601 __FUNCTION__
, flow_ring_node
->flow_info
.tid
));
3604 #endif /* DHD_LOSSLESS_ROAMING */
3607 unsigned long flags
;
3609 flow_queue_t
*queue
;
3610 #ifdef DHD_LOSSLESS_ROAMING
3611 struct ether_header
*eh
;
3613 #endif /* DHD_LOSSLESS_ROAMING */
3615 queue
= &flow_ring_node
->queue
; /* queue associated with flow ring */
3617 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
3619 if (flow_ring_node
->status
!= FLOW_RING_STATUS_OPEN
) {
3620 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
3621 return BCME_NOTREADY
;
3624 while ((txp
= dhd_flow_queue_dequeue(bus
->dhd
, queue
)) != NULL
) {
3628 * Modifying the packet length caused P2P cert failures.
3629 * Specifically on test cases where a packet of size 52 bytes
3630 * was injected, the sniffer capture showed 62 bytes because of
3631 * which the cert tests failed. So making the below change
3632 * only Router specific.
3635 #ifdef DHDTCPACK_SUPPRESS
3636 if (bus
->dhd
->tcpack_sup_mode
!= TCPACK_SUP_HOLD
) {
3637 ret
= dhd_tcpack_check_xmit(bus
->dhd
, txp
);
3638 if (ret
!= BCME_OK
) {
3639 DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n",
3643 #endif /* DHDTCPACK_SUPPRESS */
3644 #ifdef DHD_LOSSLESS_ROAMING
3645 pktdata
= (uint8
*)PKTDATA(OSH_NULL
, txp
);
3646 eh
= (struct ether_header
*) pktdata
;
3647 if (eh
->ether_type
== hton16(ETHER_TYPE_802_1X
)) {
3648 uint8 prio
= (uint8
)PKTPRIO(txp
);
3649 /* Restore to original priority for 802.1X packet */
3650 if (prio
== PRIO_8021D_NC
) {
3651 PKTSETPRIO(txp
, dhdp
->prio_8021x
);
3654 #endif /* DHD_LOSSLESS_ROAMING */
3655 /* Attempt to transfer packet over flow ring */
3656 ret
= dhd_prot_txdata(bus
->dhd
, txp
, flow_ring_node
->flow_info
.ifindex
);
3657 if (ret
!= BCME_OK
) { /* may not have resources in flow ring */
3658 DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__
, ret
));
3659 dhd_prot_txdata_write_flush(bus
->dhd
, flow_id
);
3660 /* reinsert at head */
3661 dhd_flow_queue_reinsert(bus
->dhd
, queue
, txp
);
3662 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
3664 /* If we are able to requeue back, return success */
3669 dhd_prot_txdata_write_flush(bus
->dhd
, flow_id
);
3671 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
3675 } /* dhd_bus_schedule_queue */
3677 /** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */
3679 dhd_bus_txdata(struct dhd_bus
*bus
, void *txp
, uint8 ifidx
)
3682 #ifdef IDLE_TX_FLOW_MGMT
3684 #endif /* IDLE_TX_FLOW_MGMT */
3685 flow_queue_t
*queue
;
3686 flow_ring_node_t
*flow_ring_node
;
3687 unsigned long flags
;
3689 void *txp_pend
= NULL
;
3691 if (!bus
->dhd
->flowid_allocator
) {
3692 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__
));
3696 flowid
= DHD_PKT_GET_FLOWID(txp
);
3698 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flowid
);
3700 DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
3701 __FUNCTION__
, flowid
, flow_ring_node
->status
, flow_ring_node
->active
));
3703 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
3704 if ((flowid
>= bus
->dhd
->num_flow_rings
) ||
3705 #ifdef IDLE_TX_FLOW_MGMT
3706 (!flow_ring_node
->active
))
3708 (!flow_ring_node
->active
) ||
3709 (flow_ring_node
->status
== FLOW_RING_STATUS_DELETE_PENDING
) ||
3710 (flow_ring_node
->status
== FLOW_RING_STATUS_STA_FREEING
))
3711 #endif /* IDLE_TX_FLOW_MGMT */
3713 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
3714 DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
3715 __FUNCTION__
, flowid
, flow_ring_node
->status
,
3716 flow_ring_node
->active
));
3721 #ifdef IDLE_TX_FLOW_MGMT
3722 node_status
= flow_ring_node
->status
;
3724 /* handle diffrent status states here!! */
3725 switch (node_status
)
3727 case FLOW_RING_STATUS_OPEN
:
3729 if (bus
->enable_idle_flowring_mgmt
) {
3730 /* Move the node to the head of active list */
3731 dhd_flow_ring_move_to_active_list_head(bus
, flow_ring_node
);
3735 case FLOW_RING_STATUS_SUSPENDED
:
3736 DHD_INFO(("Need to Initiate TX Flow resume\n"));
3737 /* Issue resume_ring request */
3738 dhd_bus_flow_ring_resume_request(bus
,
3742 case FLOW_RING_STATUS_CREATE_PENDING
:
3743 case FLOW_RING_STATUS_RESUME_PENDING
:
3744 /* Dont do anything here!! */
3745 DHD_INFO(("Waiting for Flow create/resume! status is %u\n",
3749 case FLOW_RING_STATUS_DELETE_PENDING
:
3751 DHD_ERROR(("Dropping packet!! flowid %u status is %u\n",
3752 flowid
, node_status
));
3755 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
3758 /* Now queue the packet */
3759 #endif /* IDLE_TX_FLOW_MGMT */
3761 queue
= &flow_ring_node
->queue
; /* queue associated with flow ring */
3763 if ((ret
= dhd_flow_queue_enqueue(bus
->dhd
, queue
, txp
)) != BCME_OK
)
3766 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
3768 if (flow_ring_node
->status
) {
3769 DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
3770 __FUNCTION__
, flowid
, flow_ring_node
->status
,
3771 flow_ring_node
->active
));
3778 ret
= dhd_bus_schedule_queue(bus
, flowid
, FALSE
); /* from queue to flowring */
3780 /* If we have anything pending, try to push into q */
3782 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
3784 if ((ret
= dhd_flow_queue_enqueue(bus
->dhd
, queue
, txp_pend
)) != BCME_OK
) {
3785 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
3790 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
3796 DHD_INFO(("%s: Toss %d\n", __FUNCTION__
, ret
));
3797 PKTCFREE(bus
->dhd
->osh
, txp
, TRUE
);
3799 } /* dhd_bus_txdata */
3802 dhd_bus_stop_queue(struct dhd_bus
*bus
)
3804 dhd_txflowcontrol(bus
->dhd
, ALL_INTERFACES
, ON
);
3808 dhd_bus_start_queue(struct dhd_bus
*bus
)
3810 dhd_txflowcontrol(bus
->dhd
, ALL_INTERFACES
, OFF
);
3813 /* Device console input function */
3814 int dhd_bus_console_in(dhd_pub_t
*dhd
, uchar
*msg
, uint msglen
)
3816 dhd_bus_t
*bus
= dhd
->bus
;
3819 /* Address could be zero if CONSOLE := 0 in dongle Makefile */
3820 if (bus
->console_addr
== 0)
3821 return BCME_UNSUPPORTED
;
3823 /* Don't allow input if dongle is in reset */
3824 if (bus
->dhd
->dongle_reset
) {
3825 return BCME_NOTREADY
;
3828 /* Zero cbuf_index */
3829 addr
= bus
->console_addr
+ OFFSETOF(hnd_cons_t
, cbuf_idx
);
3831 if ((rv
= dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*)&val
, sizeof(val
))) < 0)
3834 /* Write message into cbuf */
3835 addr
= bus
->console_addr
+ OFFSETOF(hnd_cons_t
, cbuf
);
3836 if ((rv
= dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*)msg
, msglen
)) < 0)
3839 /* Write length into vcons_in */
3840 addr
= bus
->console_addr
+ OFFSETOF(hnd_cons_t
, vcons_in
);
3841 val
= htol32(msglen
);
3842 if ((rv
= dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*)&val
, sizeof(val
))) < 0)
3845 /* generate an interrupt to dongle to indicate that it needs to process cons command */
3846 dhdpcie_send_mb_data(bus
, H2D_HOST_CONS_INT
);
3849 } /* dhd_bus_console_in */
3852 * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
3853 * contained in 'pkt'. Processes rx frame, forwards up the layer to netif.
3856 dhd_bus_rx_frame(struct dhd_bus
*bus
, void* pkt
, int ifidx
, uint pkt_count
)
3858 dhd_rx_frame(bus
->dhd
, ifidx
, pkt
, pkt_count
, 0);
3861 /** 'offset' is a backplane address */
3863 dhdpcie_bus_wtcm8(dhd_bus_t
*bus
, ulong offset
, uint8 data
)
3865 if (bus
->is_linkdown
) {
3866 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
3869 W_REG(bus
->dhd
->osh
, (volatile uint8
*)(bus
->tcm
+ offset
), data
);
3874 dhdpcie_bus_rtcm8(dhd_bus_t
*bus
, ulong offset
)
3876 volatile uint8 data
;
3877 if (bus
->is_linkdown
) {
3878 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
3881 data
= R_REG(bus
->dhd
->osh
, (volatile uint8
*)(bus
->tcm
+ offset
));
3887 dhdpcie_bus_wtcm32(dhd_bus_t
*bus
, ulong offset
, uint32 data
)
3889 if (bus
->is_linkdown
) {
3890 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
3893 W_REG(bus
->dhd
->osh
, (volatile uint32
*)(bus
->tcm
+ offset
), data
);
3897 dhdpcie_bus_wtcm16(dhd_bus_t
*bus
, ulong offset
, uint16 data
)
3899 if (bus
->is_linkdown
) {
3900 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
3903 W_REG(bus
->dhd
->osh
, (volatile uint16
*)(bus
->tcm
+ offset
), data
);
3906 #ifdef DHD_SUPPORT_64BIT
3908 dhdpcie_bus_wtcm64(dhd_bus_t
*bus
, ulong offset
, uint64 data
)
3910 if (bus
->is_linkdown
) {
3911 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
3914 W_REG(bus
->dhd
->osh
, (volatile uint64
*)(bus
->tcm
+ offset
), data
);
3917 #endif /* DHD_SUPPORT_64BIT */
3920 dhdpcie_bus_rtcm16(dhd_bus_t
*bus
, ulong offset
)
3922 volatile uint16 data
;
3923 if (bus
->is_linkdown
) {
3924 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
3927 data
= R_REG(bus
->dhd
->osh
, (volatile uint16
*)(bus
->tcm
+ offset
));
3933 dhdpcie_bus_rtcm32(dhd_bus_t
*bus
, ulong offset
)
3935 volatile uint32 data
;
3936 if (bus
->is_linkdown
) {
3937 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
3940 data
= R_REG(bus
->dhd
->osh
, (volatile uint32
*)(bus
->tcm
+ offset
));
3945 #ifdef DHD_SUPPORT_64BIT
3947 dhdpcie_bus_rtcm64(dhd_bus_t
*bus
, ulong offset
)
3949 volatile uint64 data
;
3950 if (bus
->is_linkdown
) {
3951 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
3954 data
= R_REG(bus
->dhd
->osh
, (volatile uint64
*)(bus
->tcm
+ offset
));
3958 #endif /* DHD_SUPPORT_64BIT */
3960 /** A snippet of dongle memory is shared between host and dongle */
3962 dhd_bus_cmn_writeshared(dhd_bus_t
*bus
, void *data
, uint32 len
, uint8 type
, uint16 ringid
)
3965 ulong addr
; /* dongle address */
3967 DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__
, type
, len
));
3969 if (bus
->is_linkdown
) {
3970 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
3974 if (MULTIBP_ENAB(bus
->sih
)) {
3975 dhd_bus_pcie_pwr_req(bus
);
3978 case D2H_DMA_SCRATCH_BUF
:
3979 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, host_dma_scratch_buffer
);
3980 long_data
= HTOL64(*(uint64
*)data
);
3981 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
3982 if (dhd_msg_level
& DHD_INFO_VAL
) {
3983 prhex(__FUNCTION__
, data
, len
);
3987 case D2H_DMA_SCRATCH_BUF_LEN
:
3988 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, host_dma_scratch_buffer_len
);
3989 dhdpcie_bus_wtcm32(bus
, addr
, (uint32
) HTOL32(*(uint32
*)data
));
3990 if (dhd_msg_level
& DHD_INFO_VAL
) {
3991 prhex(__FUNCTION__
, data
, len
);
3995 case H2D_DMA_INDX_WR_BUF
:
3996 long_data
= HTOL64(*(uint64
*)data
);
3997 addr
= DHD_RING_INFO_MEMBER_ADDR(bus
, h2d_w_idx_hostaddr
);
3998 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
3999 if (dhd_msg_level
& DHD_INFO_VAL
) {
4000 prhex(__FUNCTION__
, data
, len
);
4004 case H2D_DMA_INDX_RD_BUF
:
4005 long_data
= HTOL64(*(uint64
*)data
);
4006 addr
= DHD_RING_INFO_MEMBER_ADDR(bus
, h2d_r_idx_hostaddr
);
4007 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4008 if (dhd_msg_level
& DHD_INFO_VAL
) {
4009 prhex(__FUNCTION__
, data
, len
);
4013 case D2H_DMA_INDX_WR_BUF
:
4014 long_data
= HTOL64(*(uint64
*)data
);
4015 addr
= DHD_RING_INFO_MEMBER_ADDR(bus
, d2h_w_idx_hostaddr
);
4016 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4017 if (dhd_msg_level
& DHD_INFO_VAL
) {
4018 prhex(__FUNCTION__
, data
, len
);
4022 case D2H_DMA_INDX_RD_BUF
:
4023 long_data
= HTOL64(*(uint64
*)data
);
4024 addr
= DHD_RING_INFO_MEMBER_ADDR(bus
, d2h_r_idx_hostaddr
);
4025 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4026 if (dhd_msg_level
& DHD_INFO_VAL
) {
4027 prhex(__FUNCTION__
, data
, len
);
4031 case H2D_IFRM_INDX_WR_BUF
:
4032 long_data
= HTOL64(*(uint64
*)data
);
4033 addr
= DHD_RING_INFO_MEMBER_ADDR(bus
, ifrm_w_idx_hostaddr
);
4034 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4035 if (dhd_msg_level
& DHD_INFO_VAL
) {
4036 prhex(__FUNCTION__
, data
, len
);
4040 case RING_ITEM_LEN
:
4041 addr
= DHD_RING_MEM_MEMBER_ADDR(bus
, ringid
, len_items
);
4042 dhdpcie_bus_wtcm16(bus
, addr
, (uint16
) HTOL16(*(uint16
*)data
));
4045 case RING_MAX_ITEMS
:
4046 addr
= DHD_RING_MEM_MEMBER_ADDR(bus
, ringid
, max_item
);
4047 dhdpcie_bus_wtcm16(bus
, addr
, (uint16
) HTOL16(*(uint16
*)data
));
4050 case RING_BUF_ADDR
:
4051 long_data
= HTOL64(*(uint64
*)data
);
4052 addr
= DHD_RING_MEM_MEMBER_ADDR(bus
, ringid
, base_addr
);
4053 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4054 if (dhd_msg_level
& DHD_INFO_VAL
) {
4055 prhex(__FUNCTION__
, data
, len
);
4060 addr
= bus
->ring_sh
[ringid
].ring_state_w
;
4061 dhdpcie_bus_wtcm16(bus
, addr
, (uint16
) HTOL16(*(uint16
*)data
));
4065 addr
= bus
->ring_sh
[ringid
].ring_state_r
;
4066 dhdpcie_bus_wtcm16(bus
, addr
, (uint16
) HTOL16(*(uint16
*)data
));
4070 addr
= bus
->d2h_mb_data_ptr_addr
;
4071 dhdpcie_bus_wtcm32(bus
, addr
, (uint32
) HTOL32(*(uint32
*)data
));
4075 addr
= bus
->h2d_mb_data_ptr_addr
;
4076 dhdpcie_bus_wtcm32(bus
, addr
, (uint32
) HTOL32(*(uint32
*)data
));
4079 case HOST_API_VERSION
:
4080 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, host_cap
);
4081 dhdpcie_bus_wtcm32(bus
, addr
, (uint32
) HTOL32(*(uint32
*)data
));
4084 case DNGL_TO_HOST_TRAP_ADDR
:
4085 long_data
= HTOL64(*(uint64
*)data
);
4086 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, host_trap_addr
);
4087 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4088 DHD_INFO(("Wrote trap addr:0x%x\n", (uint32
) HTOL32(*(uint32
*)data
)));
4092 case DNGL_TO_HOST_TRAP_ADDR_LEN
:
4093 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, device_trap_debug_buffer_len
);
4094 dhdpcie_bus_wtcm16(bus
, addr
, (uint16
) HTOL16(*(uint16
*)data
));
4096 #endif /* D2H_MINIDUMP */
4101 if (MULTIBP_ENAB(bus
->sih
)) {
4102 dhd_bus_pcie_pwr_req_clear(bus
);
4104 } /* dhd_bus_cmn_writeshared */
4106 /** A snippet of dongle memory is shared between host and dongle */
4108 dhd_bus_cmn_readshared(dhd_bus_t
*bus
, void* data
, uint8 type
, uint16 ringid
)
4110 ulong addr
; /* dongle address */
4112 if (MULTIBP_ENAB(bus
->sih
)) {
4113 dhd_bus_pcie_pwr_req(bus
);
4117 addr
= bus
->ring_sh
[ringid
].ring_state_w
;
4118 *(uint16
*)data
= LTOH16(dhdpcie_bus_rtcm16(bus
, addr
));
4122 addr
= bus
->ring_sh
[ringid
].ring_state_r
;
4123 *(uint16
*)data
= LTOH16(dhdpcie_bus_rtcm16(bus
, addr
));
4126 case TOTAL_LFRAG_PACKET_CNT
:
4127 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, total_lfrag_pkt_cnt
);
4128 *(uint16
*)data
= LTOH16(dhdpcie_bus_rtcm16(bus
, addr
));
4132 addr
= bus
->h2d_mb_data_ptr_addr
;
4133 *(uint32
*)data
= LTOH32(dhdpcie_bus_rtcm32(bus
, addr
));
4137 addr
= bus
->d2h_mb_data_ptr_addr
;
4138 *(uint32
*)data
= LTOH32(dhdpcie_bus_rtcm32(bus
, addr
));
4141 case MAX_HOST_RXBUFS
:
4142 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, max_host_rxbufs
);
4143 *(uint16
*)data
= LTOH16(dhdpcie_bus_rtcm16(bus
, addr
));
4149 if (MULTIBP_ENAB(bus
->sih
)) {
4150 dhd_bus_pcie_pwr_req_clear(bus
);
4154 uint32
dhd_bus_get_sharedflags(dhd_bus_t
*bus
)
4156 return ((pciedev_shared_t
*)bus
->pcie_sh
)->flags
;
4160 dhd_bus_clearcounts(dhd_pub_t
*dhdp
)
4165 * @param params input buffer, NULL for 'set' operation.
4166 * @param plen length of 'params' buffer, 0 for 'set' operation.
4167 * @param arg output buffer
4170 dhd_bus_iovar_op(dhd_pub_t
*dhdp
, const char *name
,
4171 void *params
, int plen
, void *arg
, int len
, bool set
)
4173 dhd_bus_t
*bus
= dhdp
->bus
;
4174 const bcm_iovar_t
*vi
= NULL
;
4175 int bcmerror
= BCME_UNSUPPORTED
;
4179 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
4183 if (!name
|| len
< 0)
4186 /* Get MUST have return space */
4187 ASSERT(set
|| (arg
&& len
));
4188 if (!(set
|| (arg
&& len
)))
4191 /* Set does NOT take qualifiers */
4192 ASSERT(!set
|| (!params
&& !plen
));
4193 if (!(!set
|| (!params
&& !plen
)))
4196 DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__
,
4197 name
, (set
? "set" : "get"), len
, plen
));
4199 if (MULTIBP_ENAB(bus
->sih
)) {
4200 dhd_bus_pcie_pwr_req(bus
);
4203 /* Look up var locally; if not found pass to host driver */
4204 if ((vi
= bcm_iovar_lookup(dhdpcie_iovars
, name
)) == NULL
) {
4208 /* set up 'params' pointer in case this is a set command so that
4209 * the convenience int and bool code can be common to set and get
4211 if (params
== NULL
) {
4216 if (vi
->type
== IOVT_VOID
)
4218 else if (vi
->type
== IOVT_BUFFER
)
4221 /* all other types are integer sized */
4222 val_size
= sizeof(int);
4224 actionid
= set
? IOV_SVAL(vi
->varid
) : IOV_GVAL(vi
->varid
);
4225 bcmerror
= dhdpcie_bus_doiovar(bus
, vi
, actionid
, name
, params
, plen
, arg
, len
, val_size
);
4228 /* In DEVRESET_QUIESCE/DEVRESET_ON,
4229 * this includes dongle re-attach which initialize pwr_req_ref count to 0 and
4230 * causes pwr_req_ref count miss-match in pwr req clear function and hang.
4231 * In this case, bypass pwr req clear.
4233 if (bcmerror
== BCME_DNGL_DEVRESET
) {
4236 if (MULTIBP_ENAB(bus
->sih
)) {
4237 dhd_bus_pcie_pwr_req_clear(bus
);
4241 } /* dhd_bus_iovar_op */
4244 #include <bcm_buzzz.h>
4247 dhd_buzzz_dump_cntrs(char *p
, uint32
*core
, uint32
*log
,
4248 const int num_counters
)
4252 uint32 curr
[BCM_BUZZZ_COUNTERS_MAX
], prev
[BCM_BUZZZ_COUNTERS_MAX
];
4253 uint32 delta
[BCM_BUZZZ_COUNTERS_MAX
];
4255 /* Compute elapsed counter values per counter event type */
4256 for (ctr
= 0U; ctr
< num_counters
; ctr
++) {
4257 prev
[ctr
] = core
[ctr
];
4259 core
[ctr
] = curr
[ctr
]; /* saved for next log */
4261 if (curr
[ctr
] < prev
[ctr
])
4262 delta
[ctr
] = curr
[ctr
] + (~0U - prev
[ctr
]);
4264 delta
[ctr
] = (curr
[ctr
] - prev
[ctr
]);
4266 bytes
+= sprintf(p
+ bytes
, "%12u ", delta
[ctr
]);
4272 typedef union cm3_cnts
{ /* export this in bcm_buzzz.h */
4284 dhd_bcm_buzzz_dump_cntrs6(char *p
, uint32
*core
, uint32
*log
)
4288 uint32 cyccnt
, instrcnt
;
4289 cm3_cnts_t cm3_cnts
;
4292 { /* 32bit cyccnt */
4293 uint32 curr
, prev
, delta
;
4294 prev
= core
[0]; curr
= *log
++; core
[0] = curr
;
4296 delta
= curr
+ (~0U - prev
);
4298 delta
= (curr
- prev
);
4300 bytes
+= sprintf(p
+ bytes
, "%12u ", delta
);
4304 { /* Extract the 4 cnts: cpi, exc, sleep and lsu */
4307 cm3_cnts_t curr
, prev
, delta
;
4308 prev
.u32
= core
[1]; curr
.u32
= * log
++; core
[1] = curr
.u32
;
4309 for (i
= 0; i
< 4; i
++) {
4310 if (curr
.u8
[i
] < prev
.u8
[i
])
4311 delta
.u8
[i
] = curr
.u8
[i
] + (max8
- prev
.u8
[i
]);
4313 delta
.u8
[i
] = (curr
.u8
[i
] - prev
.u8
[i
]);
4314 bytes
+= sprintf(p
+ bytes
, "%4u ", delta
.u8
[i
]);
4316 cm3_cnts
.u32
= delta
.u32
;
4319 { /* Extract the foldcnt from arg0 */
4320 uint8 curr
, prev
, delta
, max8
= ~0;
4321 bcm_buzzz_arg0_t arg0
; arg0
.u32
= *log
;
4322 prev
= core
[2]; curr
= arg0
.klog
.cnt
; core
[2] = curr
;
4324 delta
= curr
+ (max8
- prev
);
4326 delta
= (curr
- prev
);
4327 bytes
+= sprintf(p
+ bytes
, "%4u ", delta
);
4331 instrcnt
= cyccnt
- (cm3_cnts
.u8
[0] + cm3_cnts
.u8
[1] + cm3_cnts
.u8
[2]
4332 + cm3_cnts
.u8
[3]) + foldcnt
;
4333 if (instrcnt
> 0xFFFFFF00)
4334 bytes
+= sprintf(p
+ bytes
, "[%10s] ", "~");
4336 bytes
+= sprintf(p
+ bytes
, "[%10u] ", instrcnt
);
4341 dhd_buzzz_dump_log(char *p
, uint32
*core
, uint32
*log
, bcm_buzzz_t
*buzzz
)
4344 bcm_buzzz_arg0_t arg0
;
4345 static uint8
* fmt
[] = BCM_BUZZZ_FMT_STRINGS
;
4347 if (buzzz
->counters
== 6) {
4348 bytes
+= dhd_bcm_buzzz_dump_cntrs6(p
, core
, log
);
4349 log
+= 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
4351 bytes
+= dhd_buzzz_dump_cntrs(p
, core
, log
, buzzz
->counters
);
4352 log
+= buzzz
->counters
; /* (N x 32bit) CR4=3, CA7=4 */
4355 /* Dump the logged arguments using the registered formats */
4358 switch (arg0
.klog
.args
) {
4360 bytes
+= sprintf(p
+ bytes
, fmt
[arg0
.klog
.id
]);
4364 uint32 arg1
= *log
++;
4365 bytes
+= sprintf(p
+ bytes
, fmt
[arg0
.klog
.id
], arg1
);
4371 arg1
= *log
++; arg2
= *log
++;
4372 bytes
+= sprintf(p
+ bytes
, fmt
[arg0
.klog
.id
], arg1
, arg2
);
4377 uint32 arg1
, arg2
, arg3
;
4378 arg1
= *log
++; arg2
= *log
++; arg3
= *log
++;
4379 bytes
+= sprintf(p
+ bytes
, fmt
[arg0
.klog
.id
], arg1
, arg2
, arg3
);
4384 uint32 arg1
, arg2
, arg3
, arg4
;
4385 arg1
= *log
++; arg2
= *log
++;
4386 arg3
= *log
++; arg4
= *log
++;
4387 bytes
+= sprintf(p
+ bytes
, fmt
[arg0
.klog
.id
], arg1
, arg2
, arg3
, arg4
);
4391 printf("Maximum one argument supported\n");
4395 bytes
+= sprintf(p
+ bytes
, "\n");
4400 void dhd_buzzz_dump(bcm_buzzz_t
*buzzz_p
, void *buffer_p
, char *p
)
4403 uint32 total
, part1
, part2
, log_sz
, core
[BCM_BUZZZ_COUNTERS_MAX
];
4406 for (i
= 0; i
< BCM_BUZZZ_COUNTERS_MAX
; i
++) {
4410 log_sz
= buzzz_p
->log_sz
;
4412 part1
= ((uint32
)buzzz_p
->cur
- (uint32
)buzzz_p
->log
) / log_sz
;
4414 if (buzzz_p
->wrap
== TRUE
) {
4415 part2
= ((uint32
)buzzz_p
->end
- (uint32
)buzzz_p
->cur
) / log_sz
;
4416 total
= (buzzz_p
->buffer_sz
- BCM_BUZZZ_LOGENTRY_MAXSZ
) / log_sz
;
4419 total
= buzzz_p
->count
;
4423 printf("bcm_buzzz_dump total<%u> done\n", total
);
4426 printf("bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n",
4427 total
, part2
, part1
);
4430 if (part2
) { /* with wrap */
4431 log
= (void*)((size_t)buffer_p
+ (buzzz_p
->cur
- buzzz_p
->log
));
4432 while (part2
--) { /* from cur to end : part2 */
4434 dhd_buzzz_dump_log(p
, core
, (uint32
*)log
, buzzz_p
);
4436 log
= (void*)((size_t)log
+ buzzz_p
->log_sz
);
4440 log
= (void*)buffer_p
;
4443 dhd_buzzz_dump_log(p
, core
, (uint32
*)log
, buzzz_p
);
4445 log
= (void*)((size_t)log
+ buzzz_p
->log_sz
);
4448 printf("bcm_buzzz_dump done.\n");
4451 int dhd_buzzz_dump_dngl(dhd_bus_t
*bus
)
4453 bcm_buzzz_t
* buzzz_p
= NULL
;
4454 void * buffer_p
= NULL
;
4455 char * page_p
= NULL
;
4456 pciedev_shared_t
*sh
;
4459 if (bus
->dhd
->busstate
!= DHD_BUS_DATA
) {
4460 return BCME_UNSUPPORTED
;
4462 if ((page_p
= (char *)MALLOC(bus
->dhd
->osh
, 4096)) == NULL
) {
4463 printf("Page memory allocation failure\n");
4466 if ((buzzz_p
= MALLOC(bus
->dhd
->osh
, sizeof(bcm_buzzz_t
))) == NULL
) {
4467 printf("BCM BUZZZ memory allocation failure\n");
4471 ret
= dhdpcie_readshared(bus
);
4473 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__
));
4479 DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__
, sh
->buzz_dbg_ptr
));
4481 if (sh
->buzz_dbg_ptr
!= 0U) { /* Fetch and display dongle BUZZZ Trace */
4483 dhdpcie_bus_membytes(bus
, FALSE
, (ulong
)sh
->buzz_dbg_ptr
,
4484 (uint8
*)buzzz_p
, sizeof(bcm_buzzz_t
));
4486 printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
4487 "count<%u> status<%u> wrap<%u>\n"
4488 "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
4489 (int)sh
->buzz_dbg_ptr
,
4490 (int)buzzz_p
->log
, (int)buzzz_p
->cur
, (int)buzzz_p
->end
,
4491 buzzz_p
->count
, buzzz_p
->status
, buzzz_p
->wrap
,
4492 buzzz_p
->cpu_idcode
, buzzz_p
->counters
, buzzz_p
->group
,
4493 buzzz_p
->buffer_sz
, buzzz_p
->log_sz
);
4495 if (buzzz_p
->count
== 0) {
4496 printf("Empty dongle BUZZZ trace\n\n");
4500 /* Allocate memory for trace buffer and format strings */
4501 buffer_p
= MALLOC(bus
->dhd
->osh
, buzzz_p
->buffer_sz
);
4502 if (buffer_p
== NULL
) {
4503 printf("Buffer memory allocation failure\n");
4507 /* Fetch the trace. format strings are exported via bcm_buzzz.h */
4508 dhdpcie_bus_membytes(bus
, FALSE
, (uint32
)buzzz_p
->log
, /* Trace */
4509 (uint8
*)buffer_p
, buzzz_p
->buffer_sz
);
4511 /* Process and display the trace using formatted output */
4515 for (ctr
= 0; ctr
< buzzz_p
->counters
; ctr
++) {
4516 printf("<Evt[%02X]> ", buzzz_p
->eventid
[ctr
]);
4518 printf("<code execution point>\n");
4521 dhd_buzzz_dump(buzzz_p
, buffer_p
, page_p
);
4523 printf("----- End of dongle BCM BUZZZ Trace -----\n\n");
4525 MFREE(bus
->dhd
->osh
, buffer_p
, buzzz_p
->buffer_sz
); buffer_p
= NULL
;
4530 if (page_p
) MFREE(bus
->dhd
->osh
, page_p
, 4096);
4531 if (buzzz_p
) MFREE(bus
->dhd
->osh
, buzzz_p
, sizeof(bcm_buzzz_t
));
4532 if (buffer_p
) MFREE(bus
->dhd
->osh
, buffer_p
, buzzz_p
->buffer_sz
);
4536 #endif /* BCM_BUZZZ */
4538 #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \
4539 ((sih)->buscoretype == PCIE2_CORE_ID))
4541 #define PCIE_FLR_CAPAB_BIT 28
4542 #define PCIE_FUNCTION_LEVEL_RESET_BIT 15
4544 /* Change delays for only QT HW, FPGA and silicon uses same delay */
4546 #define DHD_FUNCTION_LEVEL_RESET_DELAY 300000u
4547 #define DHD_SSRESET_STATUS_RETRY_DELAY 10000u
4549 #define DHD_FUNCTION_LEVEL_RESET_DELAY 55u /* 55 msec delay */
4550 #define DHD_SSRESET_STATUS_RETRY_DELAY 40u
4552 #define DHD_SSRESET_STATUS_RETRIES 50u
4555 dhd_bus_perform_flr(dhd_bus_t
*bus
, bool force_fail
)
4561 DHD_ERROR(("******** Perform FLR ********\n"));
4563 /* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */
4564 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_DEVICE_CAPABILITY
, sizeof(val
));
4565 flr_capab
= val
& (1 << PCIE_FLR_CAPAB_BIT
);
4566 DHD_ERROR(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
4567 PCIE_CFG_DEVICE_CAPABILITY
, val
, flr_capab
));
4569 DHD_ERROR(("Chip does not support FLR\n"));
4570 return BCME_UNSUPPORTED
;
4573 /* Save pcie config space */
4574 DHD_ERROR(("Save Pcie Config Space\n"));
4575 DHD_PCIE_CONFIG_SAVE(bus
);
4577 /* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */
4578 DHD_ERROR(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
4579 PCIE_FUNCTION_LEVEL_RESET_BIT
, PCIE_CFG_DEVICE_CONTROL
));
4580 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_DEVICE_CONTROL
, sizeof(val
));
4581 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL
, val
));
4582 val
= val
| (1 << PCIE_FUNCTION_LEVEL_RESET_BIT
);
4583 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL
, val
));
4584 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIE_CFG_DEVICE_CONTROL
, sizeof(val
), val
);
4586 /* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */
4587 DHD_ERROR(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY
));
4588 OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY
* 1000u);
4591 DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
4592 PCIE_SSRESET_DISABLE_BIT
, PCIE_CFG_SUBSYSTEM_CONTROL
));
4593 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
));
4594 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL
,
4596 val
= val
| (1 << PCIE_SSRESET_DISABLE_BIT
);
4597 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL
,
4599 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
), val
);
4601 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
));
4602 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL
,
4606 /* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */
4607 DHD_ERROR(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
4608 PCIE_FUNCTION_LEVEL_RESET_BIT
, PCIE_CFG_DEVICE_CONTROL
));
4609 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_DEVICE_CONTROL
, sizeof(val
));
4610 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL
, val
));
4611 val
= val
& ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT
);
4612 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL
, val
));
4613 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIE_CFG_DEVICE_CONTROL
, sizeof(val
), val
);
4615 /* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
4616 DHD_ERROR(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
4617 "is cleared\n", PCIE_SSRESET_STATUS_BIT
, PCIE_CFG_SUBSYSTEM_CONTROL
));
4619 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
));
4620 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
4621 PCIE_CFG_SUBSYSTEM_CONTROL
, val
));
4622 val
= val
& (1 << PCIE_SSRESET_STATUS_BIT
);
4623 OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY
);
4624 } while (val
&& (retry
++ < DHD_SSRESET_STATUS_RETRIES
));
4627 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
4628 PCIE_CFG_SUBSYSTEM_CONTROL
, PCIE_SSRESET_STATUS_BIT
));
4629 /* User has to fire the IOVAR again, if force_fail is needed */
4631 bus
->flr_force_fail
= FALSE
;
4632 DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__
));
4637 /* Restore pcie config space */
4638 DHD_ERROR(("Restore Pcie Config Space\n"));
4639 DHD_PCIE_CONFIG_RESTORE(bus
);
4641 DHD_ERROR(("******** FLR Succedeed ********\n"));
4646 #ifdef DHD_USE_BP_RESET
4647 #define DHD_BP_RESET_ASPM_DISABLE_DELAY 500u /* usec */
4649 #define DHD_BP_RESET_STATUS_RETRY_DELAY 40u /* usec */
4650 #define DHD_BP_RESET_STATUS_RETRIES 50u
4652 #define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT 10
4653 #define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT 21
4655 dhd_bus_perform_bp_reset(struct dhd_bus
*bus
)
4659 uint dar_clk_ctrl_status_reg
= DAR_CLK_CTRL(bus
->sih
->buscorerev
);
4663 DHD_ERROR(("******** Perform BP reset ********\n"));
4666 DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
4667 PCIECFGREG_LINK_STATUS_CTRL
));
4668 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
, sizeof(val
));
4669 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
, val
));
4670 val
= val
& (~PCIE_ASPM_ENAB
);
4671 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
, val
));
4672 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
, sizeof(val
), val
);
4674 /* wait for delay usec */
4675 DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY
));
4676 OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY
);
4678 /* Set bit 10 of PCIECFGREG_SPROM_CTRL */
4679 DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n",
4680 PCIE_CFG_SPROM_CTRL_SB_RESET_BIT
, PCIECFGREG_SPROM_CTRL
));
4681 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIECFGREG_SPROM_CTRL
, sizeof(val
));
4682 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL
, val
));
4683 val
= val
| (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT
);
4684 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL
, val
));
4685 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIECFGREG_SPROM_CTRL
, sizeof(val
), val
);
4687 /* Wait till bit backplane reset is ASSERTED i,e
4688 * bit 10 of PCIECFGREG_SPROM_CTRL is cleared.
4689 * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid
4690 * else DAR register will read previous old value
4692 DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of "
4693 "PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n",
4694 PCIE_CFG_SPROM_CTRL_SB_RESET_BIT
, PCIECFGREG_SPROM_CTRL
));
4696 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIECFGREG_SPROM_CTRL
, sizeof(val
));
4697 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL
, val
));
4698 cond
= val
& (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT
);
4699 OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY
);
4700 } while (cond
&& (retry
++ < DHD_BP_RESET_STATUS_RETRIES
));
4703 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
4704 PCIECFGREG_SPROM_CTRL
, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT
));
4709 /* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */
4710 DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of "
4711 "dar_clk_ctrl_status_reg(0x%x) is cleared\n",
4712 PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT
, dar_clk_ctrl_status_reg
));
4714 val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
4715 dar_clk_ctrl_status_reg
, 0, 0);
4716 DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n",
4717 dar_clk_ctrl_status_reg
, val
));
4718 cond
= val
& (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT
);
4719 OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY
);
4720 } while (cond
&& (retry
++ < DHD_BP_RESET_STATUS_RETRIES
));
4723 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
4724 dar_clk_ctrl_status_reg
, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT
));
4730 DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
4731 PCIECFGREG_LINK_STATUS_CTRL
));
4732 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
, sizeof(val
));
4733 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
, val
));
4734 val
= val
| (PCIE_ASPM_L1_ENAB
);
4735 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
, val
));
4736 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
, sizeof(val
), val
);
4738 DHD_ERROR(("******** BP reset Succedeed ********\n"));
4742 #endif /* DHD_USE_BP_RESET */
4745 dhd_bus_devreset(dhd_pub_t
*dhdp
, uint8 flag
)
4747 dhd_bus_t
*bus
= dhdp
->bus
;
4749 unsigned long flags
;
4750 unsigned long flags_bus
;
4751 #ifdef CONFIG_ARCH_MSM
4752 int retry
= POWERUP_MAX_RETRY
;
4753 #endif /* CONFIG_ARCH_MSM */
4755 if (flag
== TRUE
) { /* Turn off WLAN */
4756 /* Removing Power */
4757 DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__
));
4759 bus
->dhd
->up
= FALSE
;
4761 /* wait for other contexts to finish -- if required a call
4762 * to OSL_DELAY for 1s can be added to give other contexts
4763 * a chance to finish
4765 dhdpcie_advertise_bus_cleanup(bus
->dhd
);
4767 if (bus
->dhd
->busstate
!= DHD_BUS_DOWN
) {
4768 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4769 atomic_set(&bus
->dhd
->block_bus
, TRUE
);
4770 dhd_flush_rx_tx_wq(bus
->dhd
);
4771 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4773 #ifdef BCMPCIE_OOB_HOST_WAKE
4774 /* Clean up any pending host wake IRQ */
4775 dhd_bus_oob_intr_set(bus
->dhd
, FALSE
);
4776 dhd_bus_oob_intr_unregister(bus
->dhd
);
4777 #endif /* BCMPCIE_OOB_HOST_WAKE */
4778 dhd_os_wd_timer(dhdp
, 0);
4779 dhd_bus_stop(bus
, TRUE
);
4781 DHD_BUS_LOCK(bus
->bus_lock
, flags_bus
);
4782 dhdpcie_bus_intr_disable(bus
);
4783 DHD_BUS_UNLOCK(bus
->bus_lock
, flags_bus
);
4784 dhdpcie_free_irq(bus
);
4786 dhd_deinit_bus_lock(bus
);
4787 dhd_bus_release_dongle(bus
);
4788 dhdpcie_bus_free_resource(bus
);
4789 bcmerror
= dhdpcie_bus_disable_device(bus
);
4791 DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
4792 __FUNCTION__
, bcmerror
));
4793 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4794 atomic_set(&bus
->dhd
->block_bus
, FALSE
);
4795 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4797 /* Clean up protocol data after Bus Master Enable bit clear
4798 * so that host can safely unmap DMA and remove the allocated buffers
4799 * from the PKTID MAP. Some Applicantion Processors supported
4800 * System MMU triggers Kernel panic when they detect to attempt to
4801 * DMA-unmapped memory access from the devices which use the
4802 * System MMU. Therefore, Kernel panic can be happened since it is
4803 * possible that dongle can access to DMA-unmapped memory after
4804 * calling the dhd_prot_reset().
4805 * For this reason, the dhd_prot_reset() and dhd_clear() functions
4806 * should be located after the dhdpcie_bus_disable_device().
4808 dhd_prot_reset(dhdp
);
4810 #ifdef CONFIG_ARCH_MSM
4811 bcmerror
= dhdpcie_bus_clock_stop(bus
);
4813 DHD_ERROR(("%s: host clock stop failed: %d\n",
4814 __FUNCTION__
, bcmerror
));
4815 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4816 atomic_set(&bus
->dhd
->block_bus
, FALSE
);
4817 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4820 #endif /* CONFIG_ARCH_MSM */
4821 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
4822 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
4823 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
4824 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4825 atomic_set(&bus
->dhd
->block_bus
, FALSE
);
4826 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4829 dhdpcie_free_irq(bus
);
4831 #ifdef BCMPCIE_OOB_HOST_WAKE
4832 /* Clean up any pending host wake IRQ */
4833 dhd_bus_oob_intr_set(bus
->dhd
, FALSE
);
4834 dhd_bus_oob_intr_unregister(bus
->dhd
);
4835 #endif /* BCMPCIE_OOB_HOST_WAKE */
4836 dhd_dpc_kill(bus
->dhd
);
4837 if (!bus
->no_bus_init
) {
4838 dhd_bus_release_dongle(bus
);
4839 dhdpcie_bus_free_resource(bus
);
4840 bcmerror
= dhdpcie_bus_disable_device(bus
);
4842 DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
4843 __FUNCTION__
, bcmerror
));
4846 /* Clean up protocol data after Bus Master Enable bit clear
4847 * so that host can safely unmap DMA and remove the allocated
4848 * buffers from the PKTID MAP. Some Applicantion Processors
4849 * supported System MMU triggers Kernel panic when they detect
4850 * to attempt to DMA-unmapped memory access from the devices
4851 * which use the System MMU.
4852 * Therefore, Kernel panic can be happened since it is possible
4853 * that dongle can access to DMA-unmapped memory after calling
4854 * the dhd_prot_reset().
4855 * For this reason, the dhd_prot_reset() and dhd_clear() functions
4856 * should be located after the dhdpcie_bus_disable_device().
4858 dhd_prot_reset(dhdp
);
4861 bus
->no_bus_init
= FALSE
;
4863 #ifdef CONFIG_ARCH_MSM
4864 bcmerror
= dhdpcie_bus_clock_stop(bus
);
4866 DHD_ERROR(("%s: host clock stop failed: %d\n",
4867 __FUNCTION__
, bcmerror
));
4870 #endif /* CONFIG_ARCH_MSM */
4873 bus
->dhd
->dongle_reset
= TRUE
;
4874 DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__
));
4876 } else { /* Turn on WLAN */
4877 if (bus
->dhd
->busstate
== DHD_BUS_DOWN
) {
4879 DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__
));
4880 #ifdef CONFIG_ARCH_MSM
4882 bcmerror
= dhdpcie_bus_clock_start(bus
);
4884 DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
4892 if (bcmerror
&& !retry
) {
4893 DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
4894 __FUNCTION__
, bcmerror
));
4897 #endif /* CONFIG_ARCH_MSM */
4898 bus
->is_linkdown
= 0;
4899 #ifdef SUPPORT_LINKDOWN_RECOVERY
4900 bus
->read_shm_fail
= FALSE
;
4901 #endif /* SUPPORT_LINKDOWN_RECOVERY */
4902 bcmerror
= dhdpcie_bus_enable_device(bus
);
4904 DHD_ERROR(("%s: host configuration restore failed: %d\n",
4905 __FUNCTION__
, bcmerror
));
4909 bcmerror
= dhdpcie_bus_alloc_resource(bus
);
4911 DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
4912 __FUNCTION__
, bcmerror
));
4916 bcmerror
= dhdpcie_bus_dongle_attach(bus
);
4918 DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
4919 __FUNCTION__
, bcmerror
));
4923 bcmerror
= dhd_bus_request_irq(bus
);
4925 DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
4926 __FUNCTION__
, bcmerror
));
4930 bus
->dhd
->dongle_reset
= FALSE
;
4932 bcmerror
= dhd_bus_start(dhdp
);
4934 DHD_ERROR(("%s: dhd_bus_start: %d\n",
4935 __FUNCTION__
, bcmerror
));
4939 bus
->dhd
->up
= TRUE
;
4940 /* Renabling watchdog which is disabled in dhdpcie_advertise_bus_cleanup */
4941 if (bus
->dhd
->dhd_watchdog_ms_backup
) {
4942 DHD_ERROR(("%s: Enabling wdtick after dhd init\n",
4944 dhd_os_wd_timer(bus
->dhd
, bus
->dhd
->dhd_watchdog_ms_backup
);
4946 DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__
));
4948 DHD_ERROR(("%s: what should we do here\n", __FUNCTION__
));
4955 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
4956 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
4957 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
4963 dhdpcie_get_dma_ring_indices(dhd_pub_t
*dhd
)
4965 int h2d_support
, d2h_support
;
4967 d2h_support
= dhd
->dma_d2h_ring_upd_support
? 1 : 0;
4968 h2d_support
= dhd
->dma_h2d_ring_upd_support
? 1 : 0;
4969 return (d2h_support
| (h2d_support
<< 1));
4973 dhdpcie_set_dma_ring_indices(dhd_pub_t
*dhd
, int32 int_val
)
4976 /* Can change it only during initialization/FW download */
4977 if (dhd
->busstate
== DHD_BUS_DOWN
) {
4978 if ((int_val
> 3) || (int_val
< 0)) {
4979 DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
4980 bcmerror
= BCME_BADARG
;
4982 dhd
->dma_d2h_ring_upd_support
= (int_val
& 1) ? TRUE
: FALSE
;
4983 dhd
->dma_h2d_ring_upd_support
= (int_val
& 2) ? TRUE
: FALSE
;
4984 dhd
->dma_ring_upd_overwrite
= TRUE
;
4987 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
4989 bcmerror
= BCME_NOTDOWN
;
4996 * IOVAR handler of the DHD bus layer (in this case, the PCIe bus).
4998 * @param actionid e.g. IOV_SVAL(IOV_PCIEREG)
4999 * @param params input buffer
5000 * @param plen length in [bytes] of input buffer 'params'
5001 * @param arg output buffer
5002 * @param len length in [bytes] of output buffer 'arg'
5005 dhdpcie_bus_doiovar(dhd_bus_t
*bus
, const bcm_iovar_t
*vi
, uint32 actionid
, const char *name
,
5006 void *params
, int plen
, void *arg
, int len
, int val_size
)
5014 DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
5015 __FUNCTION__
, actionid
, name
, params
, plen
, arg
, len
, val_size
));
5017 if ((bcmerror
= bcm_iovar_lencheck(vi
, arg
, len
, IOV_ISSET(actionid
))) != 0)
5020 if (plen
>= (int)sizeof(int_val
))
5021 bcopy(params
, &int_val
, sizeof(int_val
));
5023 if (plen
>= (int)sizeof(int_val
) * 2)
5024 bcopy((void*)((uintptr
)params
+ sizeof(int_val
)), &int_val2
, sizeof(int_val2
));
5026 if (plen
>= (int)sizeof(int_val
) * 3)
5027 bcopy((void*)((uintptr
)params
+ 2 * sizeof(int_val
)), &int_val3
, sizeof(int_val3
));
5029 bool_val
= (int_val
!= 0) ? TRUE
: FALSE
;
5031 /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
5032 if (bus
->dhd
->dongle_reset
&& !(actionid
== IOV_SVAL(IOV_DEVRESET
) ||
5033 actionid
== IOV_GVAL(IOV_DEVRESET
))) {
5034 bcmerror
= BCME_NOTREADY
;
5040 case IOV_SVAL(IOV_VARS
):
5041 bcmerror
= dhdpcie_downloadvars(bus
, arg
, len
);
5043 case IOV_SVAL(IOV_PCIE_LPBK
):
5044 bcmerror
= dhdpcie_bus_lpback_req(bus
, int_val
);
5047 case IOV_SVAL(IOV_PCIE_DMAXFER
): {
5051 if (plen
>= (int)sizeof(int_val
) * 4) {
5052 bcopy((void*)((uintptr
)params
+ 3 * sizeof(int_val
)),
5053 &int_val4
, sizeof(int_val4
));
5055 if (plen
>= (int)sizeof(int_val
) * 5) {
5056 bcopy((void*)((uintptr
)params
+ 4 * sizeof(int_val
)),
5057 &wait
, sizeof(wait
));
5059 if (plen
>= (int)sizeof(core_num
) * 6) {
5060 bcopy((void*)((uintptr
)params
+ 5 * sizeof(core_num
)),
5061 &core_num
, sizeof(core_num
));
5063 bcmerror
= dhdpcie_bus_dmaxfer_req(bus
, int_val
, int_val2
, int_val3
,
5064 int_val4
, core_num
, wait
);
5065 if (wait
&& bcmerror
>= 0) {
5066 /* get the status of the dma transfer */
5067 int_val4
= dhdmsgbuf_dmaxfer_status(bus
->dhd
);
5068 bcopy(&int_val4
, params
, sizeof(int_val
));
5073 case IOV_GVAL(IOV_PCIE_DMAXFER
): {
5075 dma_status
= dhdmsgbuf_dmaxfer_status(bus
->dhd
);
5076 bcopy(&dma_status
, arg
, val_size
);
5081 case IOV_GVAL(IOV_PCIE_SUSPEND
):
5082 int_val
= (bus
->dhd
->busstate
== DHD_BUS_SUSPEND
) ? 1 : 0;
5083 bcopy(&int_val
, arg
, val_size
);
5086 case IOV_SVAL(IOV_PCIE_SUSPEND
):
5087 if (bool_val
) { /* Suspend */
5089 unsigned long flags
;
5092 * If some other context is busy, wait until they are done,
5093 * before starting suspend
5095 ret
= dhd_os_busbusy_wait_condition(bus
->dhd
,
5096 &bus
->dhd
->dhd_bus_busy_state
, DHD_BUS_BUSY_IN_DHD_IOVAR
);
5098 DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n",
5099 __FUNCTION__
, bus
->dhd
->dhd_bus_busy_state
));
5103 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5104 DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus
->dhd
);
5105 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5106 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5107 dhdpcie_bus_suspend(bus
, TRUE
, TRUE
);
5109 dhdpcie_bus_suspend(bus
, TRUE
);
5110 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5112 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5113 DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus
->dhd
);
5114 dhd_os_busbusy_wake(bus
->dhd
);
5115 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5116 } else { /* Resume */
5117 unsigned long flags
;
5118 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5119 DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus
->dhd
);
5120 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5122 dhdpcie_bus_suspend(bus
, FALSE
);
5124 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5125 DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus
->dhd
);
5126 dhd_os_busbusy_wake(bus
->dhd
);
5127 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5131 case IOV_GVAL(IOV_MEMSIZE
):
5132 int_val
= (int32
)bus
->ramsize
;
5133 bcopy(&int_val
, arg
, val_size
);
5136 /* Debug related. Dumps core registers or one of the dongle memory */
5137 case IOV_GVAL(IOV_DUMP_DONGLE
):
5139 dump_dongle_in_t ddi
= *(dump_dongle_in_t
*)params
;
5140 dump_dongle_out_t
*ddo
= (dump_dongle_out_t
*)arg
;
5141 uint32
*p
= ddo
->val
;
5142 const uint max_offset
= 4096 - 1; /* one core contains max 4096/4 registers */
5144 if (plen
< sizeof(ddi
) || len
< sizeof(ddo
)) {
5145 bcmerror
= BCME_BADARG
;
5150 case DUMP_DONGLE_COREREG
:
5153 if (si_setcoreidx(bus
->sih
, ddi
.index
) == NULL
) {
5154 break; // beyond last core: core enumeration ended
5157 ddo
->address
= si_addrspace(bus
->sih
, CORE_SLAVE_PORT_0
, CORE_BASE_ADDR_0
);
5158 ddo
->address
+= ddi
.offset
; // BP address at which this dump starts
5160 ddo
->id
= si_coreid(bus
->sih
);
5161 ddo
->rev
= si_corerev(bus
->sih
);
5163 while (ddi
.offset
< max_offset
&&
5164 sizeof(dump_dongle_out_t
) + ddo
->n_bytes
< (uint
)len
) {
5165 *p
++ = si_corereg(bus
->sih
, ddi
.index
, ddi
.offset
, 0, 0);
5166 ddi
.offset
+= sizeof(uint32
);
5167 ddo
->n_bytes
+= sizeof(uint32
);
5171 // TODO: implement d11 SHM/TPL dumping
5172 bcmerror
= BCME_BADARG
;
5178 /* Debug related. Returns a string with dongle capabilities */
5179 case IOV_GVAL(IOV_DNGL_CAPS
):
5181 strncpy(arg
, bus
->dhd
->fw_capabilities
,
5182 MIN(strlen(bus
->dhd
->fw_capabilities
), (size_t)len
));
5183 ((char*)arg
)[len
- 1] = '\0';
5187 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
5188 case IOV_SVAL(IOV_GDB_SERVER
):
5189 /* debugger_*() functions may sleep, so cannot hold spinlock */
5190 DHD_PERIM_UNLOCK(bus
->dhd
);
5192 debugger_init((void *) bus
, &bus_ops
, int_val
, SI_ENUM_BASE(bus
->sih
));
5196 DHD_PERIM_LOCK(bus
->dhd
);
5198 #endif /* DEBUGGER || DHD_DSCOPE */
5201 /* Dump dongle side buzzz trace to console */
5202 case IOV_GVAL(IOV_BUZZZ_DUMP
):
5203 bcmerror
= dhd_buzzz_dump_dngl(bus
);
5205 #endif /* BCM_BUZZZ */
5207 case IOV_SVAL(IOV_SET_DOWNLOAD_STATE
):
5208 bcmerror
= dhdpcie_bus_download_state(bus
, bool_val
);
5211 case IOV_GVAL(IOV_RAMSIZE
):
5212 int_val
= (int32
)bus
->ramsize
;
5213 bcopy(&int_val
, arg
, val_size
);
5216 case IOV_SVAL(IOV_RAMSIZE
):
5217 bus
->ramsize
= int_val
;
5218 bus
->orig_ramsize
= int_val
;
5221 case IOV_GVAL(IOV_RAMSTART
):
5222 int_val
= (int32
)bus
->dongle_ram_base
;
5223 bcopy(&int_val
, arg
, val_size
);
5226 case IOV_GVAL(IOV_CC_NVMSHADOW
):
5228 struct bcmstrbuf dump_b
;
5230 bcm_binit(&dump_b
, arg
, len
);
5231 bcmerror
= dhdpcie_cc_nvmshadow(bus
, &dump_b
);
5235 case IOV_GVAL(IOV_SLEEP_ALLOWED
):
5236 bool_val
= bus
->sleep_allowed
;
5237 bcopy(&bool_val
, arg
, val_size
);
5240 case IOV_SVAL(IOV_SLEEP_ALLOWED
):
5241 bus
->sleep_allowed
= bool_val
;
5244 case IOV_GVAL(IOV_DONGLEISOLATION
):
5245 int_val
= bus
->dhd
->dongle_isolation
;
5246 bcopy(&int_val
, arg
, val_size
);
5249 case IOV_SVAL(IOV_DONGLEISOLATION
):
5250 bus
->dhd
->dongle_isolation
= bool_val
;
5253 case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD
):
5254 int_val
= bus
->ltrsleep_on_unload
;
5255 bcopy(&int_val
, arg
, val_size
);
5258 case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD
):
5259 bus
->ltrsleep_on_unload
= bool_val
;
5262 case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK
):
5264 struct bcmstrbuf dump_b
;
5265 bcm_binit(&dump_b
, arg
, len
);
5266 bcmerror
= dhd_prot_ringupd_dump(bus
->dhd
, &dump_b
);
5269 case IOV_GVAL(IOV_DMA_RINGINDICES
):
5271 int_val
= dhdpcie_get_dma_ring_indices(bus
->dhd
);
5272 bcopy(&int_val
, arg
, sizeof(int_val
));
5275 case IOV_SVAL(IOV_DMA_RINGINDICES
):
5276 bcmerror
= dhdpcie_set_dma_ring_indices(bus
->dhd
, int_val
);
5279 case IOV_GVAL(IOV_METADATA_DBG
):
5280 int_val
= dhd_prot_metadata_dbg_get(bus
->dhd
);
5281 bcopy(&int_val
, arg
, val_size
);
5283 case IOV_SVAL(IOV_METADATA_DBG
):
5284 dhd_prot_metadata_dbg_set(bus
->dhd
, (int_val
!= 0));
5287 case IOV_GVAL(IOV_RX_METADATALEN
):
5288 int_val
= dhd_prot_metadatalen_get(bus
->dhd
, TRUE
);
5289 bcopy(&int_val
, arg
, val_size
);
5292 case IOV_SVAL(IOV_RX_METADATALEN
):
5294 bcmerror
= BCME_BUFTOOLONG
;
5297 dhd_prot_metadatalen_set(bus
->dhd
, int_val
, TRUE
);
5300 case IOV_SVAL(IOV_TXP_THRESHOLD
):
5301 dhd_prot_txp_threshold(bus
->dhd
, TRUE
, int_val
);
5304 case IOV_GVAL(IOV_TXP_THRESHOLD
):
5305 int_val
= dhd_prot_txp_threshold(bus
->dhd
, FALSE
, int_val
);
5306 bcopy(&int_val
, arg
, val_size
);
5309 case IOV_SVAL(IOV_DB1_FOR_MB
):
5311 bus
->db1_for_mb
= TRUE
;
5313 bus
->db1_for_mb
= FALSE
;
5316 case IOV_GVAL(IOV_DB1_FOR_MB
):
5317 if (bus
->db1_for_mb
)
5321 bcopy(&int_val
, arg
, val_size
);
5324 case IOV_GVAL(IOV_TX_METADATALEN
):
5325 int_val
= dhd_prot_metadatalen_get(bus
->dhd
, FALSE
);
5326 bcopy(&int_val
, arg
, val_size
);
5329 case IOV_SVAL(IOV_TX_METADATALEN
):
5331 bcmerror
= BCME_BUFTOOLONG
;
5334 dhd_prot_metadatalen_set(bus
->dhd
, int_val
, FALSE
);
5337 case IOV_SVAL(IOV_DEVRESET
):
5339 case DHD_BUS_DEVRESET_ON
:
5340 bcmerror
= dhd_bus_devreset(bus
->dhd
, (uint8
)int_val
);
5342 case DHD_BUS_DEVRESET_OFF
:
5343 bcmerror
= dhd_bus_devreset(bus
->dhd
, (uint8
)int_val
);
5345 case DHD_BUS_DEVRESET_FLR
:
5346 bcmerror
= dhd_bus_perform_flr(bus
, bus
->flr_force_fail
);
5348 case DHD_BUS_DEVRESET_FLR_FORCE_FAIL
:
5349 bus
->flr_force_fail
= TRUE
;
5352 DHD_ERROR(("%s: invalid argument for devreset\n", __FUNCTION__
));
5356 case IOV_SVAL(IOV_FORCE_FW_TRAP
):
5357 if (bus
->dhd
->busstate
== DHD_BUS_DATA
)
5358 dhdpcie_fw_trap(bus
);
5360 DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__
));
5361 bcmerror
= BCME_NOTUP
;
5364 case IOV_GVAL(IOV_FLOW_PRIO_MAP
):
5365 int_val
= bus
->dhd
->flow_prio_map_type
;
5366 bcopy(&int_val
, arg
, val_size
);
5369 case IOV_SVAL(IOV_FLOW_PRIO_MAP
):
5370 int_val
= (int32
)dhd_update_flow_prio_map(bus
->dhd
, (uint8
)int_val
);
5371 bcopy(&int_val
, arg
, val_size
);
5374 #ifdef DHD_PCIE_RUNTIMEPM
5375 case IOV_GVAL(IOV_IDLETIME
):
5376 int_val
= bus
->idletime
;
5377 bcopy(&int_val
, arg
, val_size
);
5380 case IOV_SVAL(IOV_IDLETIME
):
5382 bcmerror
= BCME_BADARG
;
5384 bus
->idletime
= int_val
;
5385 if (bus
->idletime
) {
5386 DHD_ENABLE_RUNTIME_PM(bus
->dhd
);
5388 DHD_DISABLE_RUNTIME_PM(bus
->dhd
);
5392 #endif /* DHD_PCIE_RUNTIMEPM */
5394 case IOV_GVAL(IOV_TXBOUND
):
5395 int_val
= (int32
)dhd_txbound
;
5396 bcopy(&int_val
, arg
, val_size
);
5399 case IOV_SVAL(IOV_TXBOUND
):
5400 dhd_txbound
= (uint
)int_val
;
5403 case IOV_SVAL(IOV_H2D_MAILBOXDATA
):
5404 dhdpcie_send_mb_data(bus
, (uint
)int_val
);
5407 case IOV_SVAL(IOV_INFORINGS
):
5408 dhd_prot_init_info_rings(bus
->dhd
);
5411 case IOV_SVAL(IOV_H2D_PHASE
):
5412 if (bus
->dhd
->busstate
!= DHD_BUS_DOWN
) {
5413 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5415 bcmerror
= BCME_NOTDOWN
;
5419 bus
->dhd
->h2d_phase_supported
= TRUE
;
5421 bus
->dhd
->h2d_phase_supported
= FALSE
;
5424 case IOV_GVAL(IOV_H2D_PHASE
):
5425 int_val
= (int32
) bus
->dhd
->h2d_phase_supported
;
5426 bcopy(&int_val
, arg
, val_size
);
5429 case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE
):
5430 if (bus
->dhd
->busstate
!= DHD_BUS_DOWN
) {
5431 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5433 bcmerror
= BCME_NOTDOWN
;
5437 bus
->dhd
->force_dongletrap_on_bad_h2d_phase
= TRUE
;
5439 bus
->dhd
->force_dongletrap_on_bad_h2d_phase
= FALSE
;
5442 case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE
):
5443 int_val
= (int32
) bus
->dhd
->force_dongletrap_on_bad_h2d_phase
;
5444 bcopy(&int_val
, arg
, val_size
);
5447 case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM
):
5448 if (bus
->dhd
->busstate
!= DHD_BUS_DOWN
) {
5449 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5451 bcmerror
= BCME_NOTDOWN
;
5454 dhd_prot_set_h2d_max_txpost(bus
->dhd
, (uint16
)int_val
);
5457 case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM
):
5458 int_val
= dhd_prot_get_h2d_max_txpost(bus
->dhd
);
5459 bcopy(&int_val
, arg
, val_size
);
5462 case IOV_GVAL(IOV_RXBOUND
):
5463 int_val
= (int32
)dhd_rxbound
;
5464 bcopy(&int_val
, arg
, val_size
);
5467 case IOV_SVAL(IOV_RXBOUND
):
5468 dhd_rxbound
= (uint
)int_val
;
5471 case IOV_GVAL(IOV_TRAPDATA
):
5473 struct bcmstrbuf dump_b
;
5474 bcm_binit(&dump_b
, arg
, len
);
5475 bcmerror
= dhd_prot_dump_extended_trap(bus
->dhd
, &dump_b
, FALSE
);
5479 case IOV_GVAL(IOV_TRAPDATA_RAW
):
5481 struct bcmstrbuf dump_b
;
5482 bcm_binit(&dump_b
, arg
, len
);
5483 bcmerror
= dhd_prot_dump_extended_trap(bus
->dhd
, &dump_b
, TRUE
);
5486 case IOV_SVAL(IOV_HANGREPORT
):
5487 bus
->dhd
->hang_report
= bool_val
;
5488 DHD_ERROR(("%s: Set hang_report as %d\n",
5489 __FUNCTION__
, bus
->dhd
->hang_report
));
5492 case IOV_GVAL(IOV_HANGREPORT
):
5493 int_val
= (int32
)bus
->dhd
->hang_report
;
5494 bcopy(&int_val
, arg
, val_size
);
5497 case IOV_SVAL(IOV_CTO_PREVENTION
):
5501 if (bus
->sih
->buscorerev
< 19) {
5502 bcmerror
= BCME_UNSUPPORTED
;
5505 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5506 OFFSETOF(sbpcieregs_t
, configaddr
), ~0, PCI_LINK_STATUS
);
5508 pcie_lnkst
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5509 OFFSETOF(sbpcieregs_t
, configdata
), 0, 0);
5511 if ((bus
->sih
->buscorerev
== 19) &&
5512 (((pcie_lnkst
>> PCI_LINK_SPEED_SHIFT
) &
5513 PCI_LINK_SPEED_MASK
) == PCIE_LNK_SPEED_GEN1
)) {
5514 bcmerror
= BCME_UNSUPPORTED
;
5517 bus
->cto_enable
= bool_val
;
5518 dhdpcie_cto_init(bus
, bus
->cto_enable
);
5519 DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
5520 __FUNCTION__
, bus
->cto_enable
));
5524 case IOV_GVAL(IOV_CTO_PREVENTION
):
5525 if (bus
->sih
->buscorerev
< 19) {
5526 bcmerror
= BCME_UNSUPPORTED
;
5529 int_val
= (int32
)bus
->cto_enable
;
5530 bcopy(&int_val
, arg
, val_size
);
5533 case IOV_SVAL(IOV_CTO_THRESHOLD
):
5535 if (bus
->sih
->buscorerev
< 19) {
5536 bcmerror
= BCME_UNSUPPORTED
;
5539 bus
->cto_threshold
= (uint32
)int_val
;
5543 case IOV_GVAL(IOV_CTO_THRESHOLD
):
5544 if (bus
->sih
->buscorerev
< 19) {
5545 bcmerror
= BCME_UNSUPPORTED
;
5548 if (bus
->cto_threshold
)
5549 int_val
= (int32
)bus
->cto_threshold
;
5551 int_val
= (int32
)PCIE_CTO_TO_THRESH_DEFAULT
;
5553 bcopy(&int_val
, arg
, val_size
);
5556 case IOV_SVAL(IOV_PCIE_WD_RESET
):
5558 uint32 wd_en
= (bus
->sih
->buscorerev
== 66) ? WD_SSRESET_PCIE_F0_EN
:
5559 (WD_SSRESET_PCIE_F0_EN
| WD_SSRESET_PCIE_ALL_FN_EN
);
5560 pcie_watchdog_reset(bus
->osh
, bus
->sih
,
5561 WD_ENABLE_MASK
, wd_en
);
5565 case IOV_GVAL(IOV_IDMA_ENABLE
):
5566 int_val
= bus
->idma_enabled
;
5567 bcopy(&int_val
, arg
, val_size
);
5569 case IOV_SVAL(IOV_IDMA_ENABLE
):
5570 bus
->idma_enabled
= (bool)int_val
;
5572 case IOV_GVAL(IOV_IFRM_ENABLE
):
5573 int_val
= bus
->ifrm_enabled
;
5574 bcopy(&int_val
, arg
, val_size
);
5576 case IOV_SVAL(IOV_IFRM_ENABLE
):
5577 bus
->ifrm_enabled
= (bool)int_val
;
5579 case IOV_GVAL(IOV_CLEAR_RING
):
5580 bcopy(&int_val
, arg
, val_size
);
5581 dhd_flow_rings_flush(bus
->dhd
, 0);
5583 case IOV_GVAL(IOV_DAR_ENABLE
):
5584 int_val
= bus
->dar_enabled
;
5585 bcopy(&int_val
, arg
, val_size
);
5587 case IOV_SVAL(IOV_DAR_ENABLE
):
5588 bus
->dar_enabled
= (bool)int_val
;
5591 case IOV_GVAL(IOV_MINIDUMP_OVERRIDE
):
5592 int_val
= bus
->d2h_minidump_override
;
5593 bcopy(&int_val
, arg
, val_size
);
5595 case IOV_SVAL(IOV_MINIDUMP_OVERRIDE
):
5596 /* Can change it only before FW download */
5597 if (bus
->dhd
->busstate
!= DHD_BUS_DOWN
) {
5598 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5600 bcmerror
= BCME_NOTDOWN
;
5603 bus
->d2h_minidump_override
= (bool)int_val
;
5605 #endif /* D2H_MINIDUMP */
5607 bcmerror
= BCME_UNSUPPORTED
;
5613 } /* dhdpcie_bus_doiovar */
5615 /** Transfers bytes from host to dongle using pio mode */
5617 dhdpcie_bus_lpback_req(struct dhd_bus
*bus
, uint32 len
)
5619 if (bus
->dhd
== NULL
) {
5620 DHD_ERROR(("bus not inited\n"));
5623 if (bus
->dhd
->prot
== NULL
) {
5624 DHD_ERROR(("prot is not inited\n"));
5627 if (bus
->dhd
->busstate
!= DHD_BUS_DATA
) {
5628 DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
5631 dhdmsgbuf_lpbk_req(bus
->dhd
, len
);
5635 /* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
5637 dhd_bus_hostready(struct dhd_bus
*bus
)
5639 if (!bus
->dhd
->d2h_hostrdy_supported
) {
5643 if (bus
->is_linkdown
) {
5644 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
5648 DHD_INFO_HW4(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__
,
5649 dhd_pcie_config_read(bus
->osh
, PCI_CFG_CMD
, sizeof(uint32
))));
5650 if (DAR_PWRREQ(bus
)) {
5651 dhd_bus_pcie_pwr_req(bus
);
5653 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dhd_bus_db1_addr_get(bus
), ~0, 0x12345678);
5654 bus
->hostready_count
++;
5655 DHD_INFO_HW4(("%s: Ring Hostready:%d\n", __FUNCTION__
, bus
->hostready_count
));
5658 /* Clear INTSTATUS */
5660 dhdpcie_bus_clear_intstatus(struct dhd_bus
*bus
)
5662 uint32 intstatus
= 0;
5663 if ((bus
->sih
->buscorerev
== 6) || (bus
->sih
->buscorerev
== 4) ||
5664 (bus
->sih
->buscorerev
== 2)) {
5665 intstatus
= dhdpcie_bus_cfg_read_dword(bus
, PCIIntstatus
, 4);
5666 dhdpcie_bus_cfg_write_dword(bus
, PCIIntstatus
, 4, intstatus
);
5668 /* this is a PCIE core register..not a config register... */
5669 intstatus
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_int
, 0, 0);
5670 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_int
, bus
->def_intmask
,
5676 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5677 dhdpcie_bus_suspend(struct dhd_bus
*bus
, bool state
, bool byint
)
5679 dhdpcie_bus_suspend(struct dhd_bus
*bus
, bool state
)
5680 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5684 unsigned long flags
, flags_bus
;
5685 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5686 int d3_read_retry
= 0;
5687 uint32 d2h_mb_data
= 0;
5689 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5691 if (bus
->dhd
== NULL
) {
5692 DHD_ERROR(("bus not inited\n"));
5695 if (bus
->dhd
->prot
== NULL
) {
5696 DHD_ERROR(("prot is not inited\n"));
5700 if (dhd_query_bus_erros(bus
->dhd
)) {
5704 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5705 if (!(bus
->dhd
->busstate
== DHD_BUS_DATA
|| bus
->dhd
->busstate
== DHD_BUS_SUSPEND
)) {
5706 DHD_ERROR(("not in a readystate\n"));
5707 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5710 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5711 if (bus
->dhd
->dongle_reset
) {
5712 DHD_ERROR(("Dongle is in reset state.\n"));
5716 /* Check whether we are already in the requested state.
5717 * state=TRUE means Suspend
5718 * state=FALSE meanse Resume
5720 if (state
== TRUE
&& bus
->dhd
->busstate
== DHD_BUS_SUSPEND
) {
5721 DHD_ERROR(("Bus is already in SUSPEND state.\n"));
5723 } else if (state
== FALSE
&& bus
->dhd
->busstate
== DHD_BUS_DATA
) {
5724 DHD_ERROR(("Bus is already in RESUME state.\n"));
5732 if (bus
->is_linkdown
) {
5733 DHD_ERROR(("%s: PCIe link was down, state=%d\n",
5734 __FUNCTION__
, state
));
5739 DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__
));
5741 bus
->dhd
->dhd_watchdog_ms_backup
= dhd_watchdog_ms
;
5742 if (bus
->dhd
->dhd_watchdog_ms_backup
) {
5743 DHD_ERROR(("%s: Disabling wdtick before going to suspend\n",
5745 dhd_os_wd_timer(bus
->dhd
, 0);
5748 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5749 if (DHD_BUS_BUSY_CHECK_IN_TX(bus
->dhd
)) {
5750 DHD_ERROR(("Tx Request is not ended\n"));
5751 bus
->dhd
->busstate
= DHD_BUS_DATA
;
5752 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5756 bus
->last_suspend_start_time
= OSL_LOCALTIME_NS();
5758 /* stop all interface network queue. */
5759 dhd_bus_stop_queue(bus
);
5760 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5762 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5764 DHD_OS_WAKE_LOCK_WAIVE(bus
->dhd
);
5765 /* Clear wait_for_d3_ack before sending D3_INFORM */
5766 bus
->wait_for_d3_ack
= 0;
5767 dhdpcie_send_mb_data(bus
, H2D_HOST_D3_INFORM
);
5769 timeleft
= dhd_os_d3ack_wait(bus
->dhd
, &bus
->wait_for_d3_ack
);
5770 DHD_OS_WAKE_LOCK_RESTORE(bus
->dhd
);
5772 /* Clear wait_for_d3_ack before sending D3_INFORM */
5773 bus
->wait_for_d3_ack
= 0;
5774 dhdpcie_send_mb_data(bus
, H2D_HOST_D3_INFORM
| H2D_HOST_ACK_NOINT
);
5775 while (!bus
->wait_for_d3_ack
&& d3_read_retry
< MAX_D3_ACK_TIMEOUT
) {
5776 dhdpcie_handle_mb_data(bus
);
5777 usleep_range(1000, 1500);
5782 DHD_OS_WAKE_LOCK_WAIVE(bus
->dhd
);
5784 /* Clear wait_for_d3_ack before sending D3_INFORM */
5785 bus
->wait_for_d3_ack
= 0;
5787 * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state
5788 * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata
5789 * inside atomic context, so that no more DBs will be
5790 * rung after sending D3_INFORM
5792 dhdpcie_send_mb_data(bus
, H2D_HOST_D3_INFORM
);
5794 /* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
5796 timeleft
= dhd_os_d3ack_wait(bus
->dhd
, &bus
->wait_for_d3_ack
);
5798 #ifdef DHD_RECOVER_TIMEOUT
5799 if (bus
->wait_for_d3_ack
== 0) {
5800 /* If wait_for_d3_ack was not updated because D2H MB was not received */
5801 uint32 intstatus
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5802 bus
->pcie_mailbox_int
, 0, 0);
5803 int host_irq_disabled
= dhdpcie_irq_disabled(bus
);
5804 if ((intstatus
) && (intstatus
!= (uint32
)-1) &&
5805 (timeleft
== 0) && (!dhd_query_bus_erros(bus
->dhd
))) {
5806 DHD_ERROR(("%s: D3 ACK trying again intstatus=%x"
5807 " host_irq_disabled=%d\n",
5808 __FUNCTION__
, intstatus
, host_irq_disabled
));
5809 dhd_pcie_intr_count_dump(bus
->dhd
);
5810 dhd_print_tasklet_status(bus
->dhd
);
5811 dhd_prot_process_ctrlbuf(bus
->dhd
);
5812 timeleft
= dhd_os_d3ack_wait(bus
->dhd
, &bus
->wait_for_d3_ack
);
5813 /* Clear Interrupts */
5814 dhdpcie_bus_clear_intstatus(bus
);
5816 } /* bus->wait_for_d3_ack was 0 */
5817 #endif /* DHD_RECOVER_TIMEOUT */
5819 DHD_OS_WAKE_LOCK_RESTORE(bus
->dhd
);
5820 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5822 /* To allow threads that got pre-empted to complete.
5824 while ((active
= dhd_os_check_wakelock_all(bus
->dhd
)) &&
5825 (idle_retry
< MAX_WKLK_IDLE_CHECK
)) {
5830 if (bus
->wait_for_d3_ack
) {
5831 DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__
));
5832 /* Got D3 Ack. Suspend the bus */
5834 DHD_ERROR(("%s():Suspend failed because of wakelock"
5835 "restoring Dongle to D0\n", __FUNCTION__
));
5837 if (bus
->dhd
->dhd_watchdog_ms_backup
) {
5838 DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n",
5840 dhd_os_wd_timer(bus
->dhd
,
5841 bus
->dhd
->dhd_watchdog_ms_backup
);
5845 * Dongle still thinks that it has to be in D3 state until
5846 * it gets a D0 Inform, but we are backing off from suspend.
5847 * Ensure that Dongle is brought back to D0.
5849 * Bringing back Dongle from D3 Ack state to D0 state is a
5850 * 2 step process. Dongle would want to know that D0 Inform
5851 * would be sent as a MB interrupt to bring it out of D3 Ack
5852 * state to D0 state. So we have to send both this message.
5855 /* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
5856 bus
->wait_for_d3_ack
= 0;
5858 DHD_BUS_LOCK(bus
->bus_lock
, flags_bus
);
5859 bus
->bus_low_power_state
= DHD_BUS_NO_LOW_POWER_STATE
;
5860 /* Enable back the intmask which was cleared in DPC
5861 * after getting D3_ACK.
5863 bus
->resume_intr_enable_count
++;
5865 /* For Linux, Macos etc (otherthan NDIS) enable back the dongle
5866 * interrupts using intmask and host interrupts
5867 * which were disabled in the dhdpcie_bus_isr()->
5868 * dhd_bus_handle_d3_ack().
5870 /* Enable back interrupt using Intmask!! */
5871 dhdpcie_bus_intr_enable(bus
);
5872 /* Enable back interrupt from Host side!! */
5873 dhdpcie_enable_irq(bus
);
5875 DHD_BUS_UNLOCK(bus
->bus_lock
, flags_bus
);
5877 if (bus
->use_d0_inform
) {
5878 DHD_OS_WAKE_LOCK_WAIVE(bus
->dhd
);
5879 dhdpcie_send_mb_data(bus
,
5880 (H2D_HOST_D0_INFORM_IN_USE
| H2D_HOST_D0_INFORM
));
5881 DHD_OS_WAKE_LOCK_RESTORE(bus
->dhd
);
5883 /* ring doorbell 1 (hostready) */
5884 dhd_bus_hostready(bus
);
5886 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5887 bus
->dhd
->busstate
= DHD_BUS_DATA
;
5888 /* resume all interface network queue. */
5889 dhd_bus_start_queue(bus
);
5890 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5893 /* Actual Suspend after no wakelock */
5894 /* At this time bus->bus_low_power_state will be
5895 * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK
5896 * in dhd_bus_handle_d3_ack()
5898 if (bus
->use_d0_inform
&&
5899 (bus
->api
.fw_rev
< PCIE_SHARED_VERSION_6
)) {
5900 DHD_OS_WAKE_LOCK_WAIVE(bus
->dhd
);
5901 dhdpcie_send_mb_data(bus
, (H2D_HOST_D0_INFORM_IN_USE
));
5902 DHD_OS_WAKE_LOCK_RESTORE(bus
->dhd
);
5905 #if defined(BCMPCIE_OOB_HOST_WAKE)
5906 dhdpcie_oob_intr_set(bus
, TRUE
);
5907 #endif /* BCMPCIE_OOB_HOST_WAKE */
5909 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5910 /* The Host cannot process interrupts now so disable the same.
5911 * No need to disable the dongle INTR using intmask, as we are
5912 * already calling disabling INTRs from DPC context after
5913 * getting D3_ACK in dhd_bus_handle_d3_ack.
5914 * Code may not look symmetric between Suspend and
5915 * Resume paths but this is done to close down the timing window
5916 * between DPC and suspend context and bus->bus_low_power_state
5917 * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC.
5919 bus
->dhd
->d3ackcnt_timeout
= 0;
5920 bus
->dhd
->busstate
= DHD_BUS_SUSPEND
;
5921 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5922 DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, "
5923 "BaseAddress1(0x%x)=0x%x\n", __FUNCTION__
,
5924 PCIECFGREG_BASEADDR0
,
5925 dhd_pcie_config_read(bus
->osh
,
5926 PCIECFGREG_BASEADDR0
, sizeof(uint32
)),
5927 PCIECFGREG_BASEADDR1
,
5928 dhd_pcie_config_read(bus
->osh
,
5929 PCIECFGREG_BASEADDR1
, sizeof(uint32
))));
5930 dhdpcie_dump_resource(bus
);
5931 /* Handle Host Suspend */
5932 rc
= dhdpcie_pci_suspend_resume(bus
, state
);
5934 bus
->last_suspend_end_time
= OSL_LOCALTIME_NS();
5937 } else if (timeleft
== 0) { /* D3 ACK Timeout */
5938 #ifdef DHD_FW_COREDUMP
5939 uint32 cur_memdump_mode
= bus
->dhd
->memdump_enabled
;
5940 #endif /* DHD_FW_COREDUMP */
5942 /* check if the D3 ACK timeout due to scheduling issue */
5943 bus
->dhd
->is_sched_error
= !dhd_query_bus_erros(bus
->dhd
) &&
5944 bus
->isr_entry_time
> bus
->last_d3_inform_time
&&
5945 dhd_bus_query_dpc_sched_errors(bus
->dhd
);
5946 bus
->dhd
->d3ack_timeout_occured
= TRUE
;
5947 /* If the D3 Ack has timeout */
5948 bus
->dhd
->d3ackcnt_timeout
++;
5949 DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n",
5950 __FUNCTION__
, bus
->dhd
->is_sched_error
?
5951 " due to scheduling problem" : "", bus
->dhd
->d3ackcnt_timeout
));
5952 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
5953 if (bus
->dhd
->is_sched_error
&& cur_memdump_mode
) {
5954 /* change g_assert_type to trigger Kernel panic */
5956 /* use ASSERT() to trigger panic */
5959 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
5960 DHD_BUS_LOCK(bus
->bus_lock
, flags_bus
);
5961 bus
->bus_low_power_state
= DHD_BUS_NO_LOW_POWER_STATE
;
5962 DHD_BUS_UNLOCK(bus
->bus_lock
, flags_bus
);
5963 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5964 bus
->dhd
->busstate
= DHD_BUS_DATA
;
5965 /* resume all interface network queue. */
5966 dhd_bus_start_queue(bus
);
5967 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5968 if (!bus
->dhd
->dongle_trap_occured
&&
5969 !bus
->is_linkdown
) {
5970 uint32 intstatus
= 0;
5972 /* Check if PCIe bus status is valid */
5973 intstatus
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5974 bus
->pcie_mailbox_int
, 0, 0);
5975 if (intstatus
== (uint32
)-1) {
5976 /* Invalidate PCIe bus status */
5977 bus
->is_linkdown
= 1;
5980 dhd_bus_dump_console_buffer(bus
);
5981 dhd_prot_debug_info_print(bus
->dhd
);
5982 #ifdef DHD_FW_COREDUMP
5983 if (cur_memdump_mode
) {
5984 /* write core dump to file */
5985 bus
->dhd
->memdump_type
= DUMP_TYPE_D3_ACK_TIMEOUT
;
5986 dhdpcie_mem_dump(bus
);
5988 #endif /* DHD_FW_COREDUMP */
5990 DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
5992 #ifdef SUPPORT_LINKDOWN_RECOVERY
5993 #ifdef CONFIG_ARCH_MSM
5994 bus
->no_cfg_restore
= 1;
5995 #endif /* CONFIG_ARCH_MSM */
5996 #endif /* SUPPORT_LINKDOWN_RECOVERY */
5997 dhd_os_check_hang(bus
->dhd
, 0, -ETIMEDOUT
);
5999 #if defined(DHD_ERPOM)
6000 dhd_schedule_reset(bus
->dhd
);
6006 DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__
));
6007 bus
->last_resume_start_time
= OSL_LOCALTIME_NS();
6010 * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
6011 * si_backplane_access(function to read/write backplane)
6012 * updates the window(PCIE2_BAR0_CORE2_WIN) only if
6013 * window being accessed is different form the window
6014 * being pointed by second_bar0win.
6015 * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold,
6016 * invalidating second_bar0win after resume updates
6017 * PCIE2_BAR0_CORE2_WIN with right window.
6019 si_invalidate_second_bar0win(bus
->sih
);
6020 #if defined(BCMPCIE_OOB_HOST_WAKE)
6021 DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus
->dhd
);
6022 #endif /* BCMPCIE_OOB_HOST_WAKE */
6023 rc
= dhdpcie_pci_suspend_resume(bus
, state
);
6024 DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, BaseAddress1(0x%x)=0x%x\n",
6025 __FUNCTION__
, PCIECFGREG_BASEADDR0
,
6026 dhd_pcie_config_read(bus
->osh
, PCIECFGREG_BASEADDR0
, sizeof(uint32
)),
6027 PCIECFGREG_BASEADDR1
,
6028 dhd_pcie_config_read(bus
->osh
, PCIECFGREG_BASEADDR1
, sizeof(uint32
))));
6029 dhdpcie_dump_resource(bus
);
6031 DHD_BUS_LOCK(bus
->bus_lock
, flags_bus
);
6032 /* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */
6033 bus
->bus_low_power_state
= DHD_BUS_NO_LOW_POWER_STATE
;
6034 DHD_BUS_UNLOCK(bus
->bus_lock
, flags_bus
);
6036 if (!rc
&& bus
->dhd
->busstate
== DHD_BUS_SUSPEND
) {
6037 if (bus
->use_d0_inform
) {
6038 DHD_OS_WAKE_LOCK_WAIVE(bus
->dhd
);
6039 dhdpcie_send_mb_data(bus
, (H2D_HOST_D0_INFORM
));
6040 DHD_OS_WAKE_LOCK_RESTORE(bus
->dhd
);
6042 /* ring doorbell 1 (hostready) */
6043 dhd_bus_hostready(bus
);
6045 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
6046 bus
->dhd
->busstate
= DHD_BUS_DATA
;
6047 #ifdef DHD_PCIE_RUNTIMEPM
6048 if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus
->dhd
)) {
6051 wake_up_interruptible(&bus
->rpm_queue
);
6053 #endif /* DHD_PCIE_RUNTIMEPM */
6054 /* resume all interface network queue. */
6055 dhd_bus_start_queue(bus
);
6057 /* TODO: for NDIS also we need to use enable_irq in future */
6058 bus
->resume_intr_enable_count
++;
6060 /* For Linux, Macos etc (otherthan NDIS) enable back the dongle interrupts
6061 * using intmask and host interrupts
6062 * which were disabled in the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack().
6064 dhdpcie_bus_intr_enable(bus
); /* Enable back interrupt using Intmask!! */
6065 dhdpcie_enable_irq(bus
); /* Enable back interrupt from Host side!! */
6067 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
6069 if (bus
->dhd
->dhd_watchdog_ms_backup
) {
6070 DHD_ERROR(("%s: Enabling wdtick after resume\n",
6072 dhd_os_wd_timer(bus
->dhd
, bus
->dhd
->dhd_watchdog_ms_backup
);
6075 bus
->last_resume_end_time
= OSL_LOCALTIME_NS();
6082 dhdpcie_force_alp(struct dhd_bus
*bus
, bool enable
)
6084 ASSERT(bus
&& bus
->sih
);
6086 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
6087 OFFSETOF(sbpcieregs_t
, u
.pcie2
.clk_ctl_st
), CCS_FORCEALP
, CCS_FORCEALP
);
6089 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
6090 OFFSETOF(sbpcieregs_t
, u
.pcie2
.clk_ctl_st
), CCS_FORCEALP
, 0);
6095 /* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */
6097 dhdpcie_set_l1_entry_time(struct dhd_bus
*bus
, int l1_entry_time
)
6101 ASSERT(bus
&& bus
->sih
);
6103 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, OFFSETOF(sbpcieregs_t
, configaddr
), ~0,
6105 reg_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
6106 OFFSETOF(sbpcieregs_t
, configdata
), 0, 0);
6107 reg_val
= (reg_val
& ~(0x7f << 16)) | ((l1_entry_time
& 0x7f) << 16);
6108 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, OFFSETOF(sbpcieregs_t
, configdata
), ~0,
6115 dhd_apply_d11_war_length(struct dhd_bus
*bus
, uint32 len
, uint32 d11_lpbk
)
6117 uint16 chipid
= si_chipid(bus
->sih
);
6118 if ((chipid
== BCM4375_CHIP_ID
||
6119 chipid
== BCM4377_CHIP_ID
) &&
6120 (d11_lpbk
!= M2M_DMA_LPBK
&& d11_lpbk
!= M2M_NON_DMA_LPBK
) &&
6124 DHD_ERROR(("%s: len %d\n", __FUNCTION__
, len
));
6128 /** Transfers bytes from host to dongle and to host again using DMA */
6130 dhdpcie_bus_dmaxfer_req(struct dhd_bus
*bus
,
6131 uint32 len
, uint32 srcdelay
, uint32 destdelay
,
6132 uint32 d11_lpbk
, uint32 core_num
, uint32 wait
)
6136 if (bus
->dhd
== NULL
) {
6137 DHD_ERROR(("bus not inited\n"));
6140 if (bus
->dhd
->prot
== NULL
) {
6141 DHD_ERROR(("prot is not inited\n"));
6144 if (bus
->dhd
->busstate
!= DHD_BUS_DATA
) {
6145 DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
6149 if (len
< 5 || len
> 4194296) {
6150 DHD_ERROR(("len is too small or too large\n"));
6154 len
= dhd_apply_d11_war_length(bus
, len
, d11_lpbk
);
6156 bus
->dmaxfer_complete
= FALSE
;
6157 ret
= dhdmsgbuf_dmaxfer_req(bus
->dhd
, len
, srcdelay
, destdelay
,
6158 d11_lpbk
, core_num
);
6159 if (ret
!= BCME_OK
|| !wait
)
6162 ret
= dhd_os_dmaxfer_wait(bus
->dhd
, &bus
->dmaxfer_complete
);
6164 ret
= BCME_NOTREADY
;
6171 dhdpcie_bus_download_state(dhd_bus_t
*bus
, bool enter
)
6174 volatile uint32
*cr4_regs
;
6177 DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__
));
6180 /* To enter download state, disable ARM and reset SOCRAM.
6181 * To exit download state, simply reset ARM (default is RAM boot).
6184 /* Make sure BAR1 maps to backplane address 0 */
6185 dhdpcie_bus_cfg_write_dword(bus
, PCI_BAR1_WIN
, 4, 0x00000000);
6186 bus
->alp_only
= TRUE
;
6188 /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
6189 cr4_regs
= si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0);
6191 if (cr4_regs
== NULL
&& !(si_setcore(bus
->sih
, ARM7S_CORE_ID
, 0)) &&
6192 !(si_setcore(bus
->sih
, ARMCM3_CORE_ID
, 0)) &&
6193 !(si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0))) {
6194 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__
));
6195 bcmerror
= BCME_ERROR
;
6199 if (si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0)) {
6200 /* Halt ARM & remove reset */
6201 si_core_reset(bus
->sih
, SICF_CPUHALT
, SICF_CPUHALT
);
6202 if (!(si_setcore(bus
->sih
, SYSMEM_CORE_ID
, 0))) {
6203 DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__
));
6204 bcmerror
= BCME_ERROR
;
6207 si_core_reset(bus
->sih
, 0, 0);
6208 /* reset last 4 bytes of RAM address. to be used for shared area */
6209 dhdpcie_init_shared_addr(bus
);
6210 } else if (cr4_regs
== NULL
) { /* no CR4 present on chip */
6211 si_core_disable(bus
->sih
, 0);
6213 if (!(si_setcore(bus
->sih
, SOCRAM_CORE_ID
, 0))) {
6214 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__
));
6215 bcmerror
= BCME_ERROR
;
6219 si_core_reset(bus
->sih
, 0, 0);
6221 /* Clear the top bit of memory */
6224 if (dhdpcie_bus_membytes(bus
, TRUE
, bus
->ramsize
- 4,
6225 (uint8
*)&zeros
, 4) < 0) {
6226 bcmerror
= BCME_ERROR
;
6234 * Read RAM base address [0x18_0000]
6235 * [next] Download firmware
6236 * [done at else] Populate the reset vector
6237 * [done at else] Remove ARM halt
6239 /* Halt ARM & remove reset */
6240 si_core_reset(bus
->sih
, SICF_CPUHALT
, SICF_CPUHALT
);
6241 if (BCM43602_CHIP(bus
->sih
->chip
)) {
6242 W_REG(bus
->pcie_mb_intr_osh
, cr4_regs
+ ARMCR4REG_BANKIDX
, 5);
6243 W_REG(bus
->pcie_mb_intr_osh
, cr4_regs
+ ARMCR4REG_BANKPDA
, 0);
6244 W_REG(bus
->pcie_mb_intr_osh
, cr4_regs
+ ARMCR4REG_BANKIDX
, 7);
6245 W_REG(bus
->pcie_mb_intr_osh
, cr4_regs
+ ARMCR4REG_BANKPDA
, 0);
6247 /* reset last 4 bytes of RAM address. to be used for shared area */
6248 dhdpcie_init_shared_addr(bus
);
6251 if (si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0)) {
6253 if ((bcmerror
= dhdpcie_bus_write_vars(bus
))) {
6254 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__
));
6257 /* write random numbers to sysmem for the purpose of
6258 * randomizing heap address space.
6260 if ((bcmerror
= dhdpcie_wrt_rnd(bus
)) != BCME_OK
) {
6261 DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
6265 /* switch back to arm core again */
6266 if (!(si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0))) {
6267 DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__
));
6268 bcmerror
= BCME_ERROR
;
6271 /* write address 0 with reset instruction */
6272 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
, 0,
6273 (uint8
*)&bus
->resetinstr
, sizeof(bus
->resetinstr
));
6274 /* now remove reset and halt and continue to run CA7 */
6275 } else if (!si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0)) {
6276 if (!(si_setcore(bus
->sih
, SOCRAM_CORE_ID
, 0))) {
6277 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__
));
6278 bcmerror
= BCME_ERROR
;
6282 if (!si_iscoreup(bus
->sih
)) {
6283 DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__
));
6284 bcmerror
= BCME_ERROR
;
6288 /* Enable remap before ARM reset but after vars.
6289 * No backplane access in remap mode
6291 if (!si_setcore(bus
->sih
, PCMCIA_CORE_ID
, 0) &&
6292 !si_setcore(bus
->sih
, SDIOD_CORE_ID
, 0)) {
6293 DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__
));
6294 bcmerror
= BCME_ERROR
;
6298 if (!(si_setcore(bus
->sih
, ARM7S_CORE_ID
, 0)) &&
6299 !(si_setcore(bus
->sih
, ARMCM3_CORE_ID
, 0))) {
6300 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__
));
6301 bcmerror
= BCME_ERROR
;
6305 if (BCM43602_CHIP(bus
->sih
->chip
)) {
6306 /* Firmware crashes on SOCSRAM access when core is in reset */
6307 if (!(si_setcore(bus
->sih
, SOCRAM_CORE_ID
, 0))) {
6308 DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
6310 bcmerror
= BCME_ERROR
;
6313 si_core_reset(bus
->sih
, 0, 0);
6314 si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0);
6318 if ((bcmerror
= dhdpcie_bus_write_vars(bus
))) {
6319 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__
));
6323 /* write a random number to TCM for the purpose of
6324 * randomizing heap address space.
6326 if ((bcmerror
= dhdpcie_wrt_rnd(bus
)) != BCME_OK
) {
6327 DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
6332 if ((bcmerror
= dhdpcie_wrt_host_whitelist_region(bus
)) != BCME_OK
) {
6333 DHD_ERROR(("%s: Failed to write Whitelist region to TCM !\n",
6337 /* switch back to arm core again */
6338 if (!(si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0))) {
6339 DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__
));
6340 bcmerror
= BCME_ERROR
;
6344 /* write address 0 with reset instruction */
6345 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
, 0,
6346 (uint8
*)&bus
->resetinstr
, sizeof(bus
->resetinstr
));
6348 if (bcmerror
== BCME_OK
) {
6351 bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
, 0,
6352 (uint8
*)&tmp
, sizeof(tmp
));
6354 if (bcmerror
== BCME_OK
&& tmp
!= bus
->resetinstr
) {
6355 DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
6356 __FUNCTION__
, bus
->resetinstr
));
6357 DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
6358 __FUNCTION__
, tmp
));
6359 bcmerror
= BCME_ERROR
;
6364 /* now remove reset and halt and continue to run CR4 */
6367 si_core_reset(bus
->sih
, 0, 0);
6369 /* Allow HT Clock now that the ARM is running. */
6370 bus
->alp_only
= FALSE
;
6372 bus
->dhd
->busstate
= DHD_BUS_LOAD
;
6376 /* Always return to PCIE core */
6377 si_setcore(bus
->sih
, PCIE2_CORE_ID
, 0);
6380 } /* dhdpcie_bus_download_state */
6383 dhdpcie_bus_write_vars(dhd_bus_t
*bus
)
6386 uint32 varsize
, phys_size
;
6391 uint8
*nvram_ularray
;
6392 #endif /* DHD_DEBUG */
6394 /* Even if there are no vars are to be written, we still need to set the ramsize. */
6395 varsize
= bus
->varsz
? ROUNDUP(bus
->varsz
, 4) : 0;
6396 varaddr
= (bus
->ramsize
- 4) - varsize
;
6398 varaddr
+= bus
->dongle_ram_base
;
6402 vbuffer
= (uint8
*)MALLOC(bus
->dhd
->osh
, varsize
);
6406 bzero(vbuffer
, varsize
);
6407 bcopy(bus
->vars
, vbuffer
, bus
->varsz
);
6408 /* Write the vars list */
6409 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
, varaddr
, vbuffer
, varsize
);
6411 /* Implement read back and verify later */
6413 /* Verify NVRAM bytes */
6414 DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize
));
6415 nvram_ularray
= (uint8
*)MALLOC(bus
->dhd
->osh
, varsize
);
6416 if (!nvram_ularray
) {
6417 MFREE(bus
->dhd
->osh
, vbuffer
, varsize
);
6421 /* Upload image to verify downloaded contents. */
6422 memset(nvram_ularray
, 0xaa, varsize
);
6424 /* Read the vars list to temp buffer for comparison */
6425 bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
, varaddr
, nvram_ularray
, varsize
);
6427 DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
6428 __FUNCTION__
, bcmerror
, varsize
, varaddr
));
6431 /* Compare the org NVRAM with the one read from RAM */
6432 if (memcmp(vbuffer
, nvram_ularray
, varsize
)) {
6433 DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__
));
6435 DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
6438 MFREE(bus
->dhd
->osh
, nvram_ularray
, varsize
);
6439 #endif /* DHD_DEBUG */
6441 MFREE(bus
->dhd
->osh
, vbuffer
, varsize
);
6444 phys_size
= REMAP_ENAB(bus
) ? bus
->ramsize
: bus
->orig_ramsize
;
6446 phys_size
+= bus
->dongle_ram_base
;
6448 /* adjust to the user specified RAM */
6449 DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
6450 phys_size
, bus
->ramsize
));
6451 DHD_INFO(("Vars are at %d, orig varsize is %d\n",
6453 varsize
= ((phys_size
- 4) - varaddr
);
6456 * Determine the length token:
6457 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
6461 bus
->nvram_csm
= varsizew
;
6463 varsizew
= varsize
/ 4;
6464 varsizew
= (~varsizew
<< 16) | (varsizew
& 0x0000FFFF);
6465 bus
->nvram_csm
= varsizew
;
6466 varsizew
= htol32(varsizew
);
6469 DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize
, varsizew
));
6471 /* Write the length token to the last word */
6472 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
, (phys_size
- 4),
6473 (uint8
*)&varsizew
, 4);
6476 } /* dhdpcie_bus_write_vars */
6479 dhdpcie_downloadvars(dhd_bus_t
*bus
, void *arg
, int len
)
6481 int bcmerror
= BCME_OK
;
6482 #if defined(KEEP_KR_REGREV) || defined(KEEP_JP_REGREV)
6485 #endif /* KEEP_KR_REGREV || KEEP_JP_REGREV */
6487 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
6489 /* Basic sanity checks */
6491 bcmerror
= BCME_NOTDOWN
;
6495 bcmerror
= BCME_BUFTOOSHORT
;
6499 /* Free the old ones and replace with passed variables */
6501 MFREE(bus
->dhd
->osh
, bus
->vars
, bus
->varsz
);
6503 bus
->vars
= MALLOC(bus
->dhd
->osh
, len
);
6504 bus
->varsz
= bus
->vars
? len
: 0;
6505 if (bus
->vars
== NULL
) {
6506 bcmerror
= BCME_NOMEM
;
6510 /* Copy the passed variables, which should include the terminating double-null */
6511 bcopy(arg
, bus
->vars
, bus
->varsz
);
6513 #ifdef DHD_USE_SINGLE_NVRAM_FILE
6514 if (dhd_bus_get_fw_mode(bus
->dhd
) == DHD_FLAG_MFG_MODE
) {
6518 char tag
[2][8] = {"ccode=", "regrev="};
6520 /* Find ccode and regrev info */
6521 for (i
= 0; i
< 2; i
++) {
6522 sp
= strnstr(bus
->vars
, tag
[i
], bus
->varsz
);
6524 DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n",
6525 __FUNCTION__
, bus
->nv_path
));
6526 bcmerror
= BCME_ERROR
;
6529 sp
= strchr(sp
, '=');
6530 ep
= strchr(sp
, '\0');
6531 /* We assumed that string length of both ccode and
6532 * regrev values should not exceed WLC_CNTRY_BUF_SZ
6534 if (ep
&& ((ep
- sp
) <= WLC_CNTRY_BUF_SZ
)) {
6536 while (*sp
!= '\0') {
6537 DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
6538 __FUNCTION__
, tag
[i
], *sp
));
6542 DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n",
6543 __FUNCTION__
, tag
[i
]));
6544 bcmerror
= BCME_ERROR
;
6549 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
6551 #if defined(KEEP_KR_REGREV) || defined(KEEP_JP_REGREV)
6552 #ifdef DHD_USE_SINGLE_NVRAM_FILE
6553 if (dhd_bus_get_fw_mode(bus
->dhd
) != DHD_FLAG_MFG_MODE
)
6554 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
6557 tmpbuf
= MALLOCZ(bus
->dhd
->osh
, bus
->varsz
+ 1);
6558 if (tmpbuf
== NULL
) {
6561 memcpy(tmpbuf
, bus
->vars
, bus
->varsz
);
6562 for (tmpidx
= 0; tmpidx
< bus
->varsz
; tmpidx
++) {
6563 if (tmpbuf
[tmpidx
] == 0) {
6564 tmpbuf
[tmpidx
] = '\n';
6567 bus
->dhd
->vars_ccode
[0] = 0;
6568 bus
->dhd
->vars_regrev
= 0;
6569 if ((pos
= strstr(tmpbuf
, "ccode"))) {
6570 sscanf(pos
, "ccode=%s\n", bus
->dhd
->vars_ccode
);
6572 if ((pos
= strstr(tmpbuf
, "regrev"))) {
6573 sscanf(pos
, "regrev=%u\n", &(bus
->dhd
->vars_regrev
));
6575 MFREE(bus
->dhd
->osh
, tmpbuf
, bus
->varsz
+ 1);
6577 #endif /* KEEP_KR_REGREV || KEEP_JP_REGREV */
6583 /* loop through the capability list and see if the pcie capabilty exists */
6585 dhdpcie_find_pci_capability(osl_t
*osh
, uint8 req_cap_id
)
6591 /* check for Header type 0 */
6592 byte_val
= read_pci_cfg_byte(PCI_CFG_HDR
);
6593 if ((byte_val
& 0x7f) != PCI_HEADER_NORMAL
) {
6594 DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__
));
6598 /* check if the capability pointer field exists */
6599 byte_val
= read_pci_cfg_byte(PCI_CFG_STAT
);
6600 if (!(byte_val
& PCI_CAPPTR_PRESENT
)) {
6601 DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__
));
6605 cap_ptr
= read_pci_cfg_byte(PCI_CFG_CAPPTR
);
6606 /* check if the capability pointer is 0x00 */
6607 if (cap_ptr
== 0x00) {
6608 DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__
));
6612 /* loop thr'u the capability list and see if the pcie capabilty exists */
6614 cap_id
= read_pci_cfg_byte(cap_ptr
);
6616 while (cap_id
!= req_cap_id
) {
6617 cap_ptr
= read_pci_cfg_byte((cap_ptr
+ 1));
6618 if (cap_ptr
== 0x00) break;
6619 cap_id
= read_pci_cfg_byte(cap_ptr
);
6627 dhdpcie_pme_active(osl_t
*osh
, bool enable
)
6632 cap_ptr
= dhdpcie_find_pci_capability(osh
, PCI_CAP_POWERMGMTCAP_ID
);
6635 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__
));
6639 pme_csr
= OSL_PCI_READ_CONFIG(osh
, cap_ptr
+ PME_CSR_OFFSET
, sizeof(uint32
));
6640 DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__
, pme_csr
));
6642 pme_csr
|= PME_CSR_PME_STAT
;
6644 pme_csr
|= PME_CSR_PME_EN
;
6646 pme_csr
&= ~PME_CSR_PME_EN
;
6649 OSL_PCI_WRITE_CONFIG(osh
, cap_ptr
+ PME_CSR_OFFSET
, sizeof(uint32
), pme_csr
);
6653 dhdpcie_pme_cap(osl_t
*osh
)
6658 cap_ptr
= dhdpcie_find_pci_capability(osh
, PCI_CAP_POWERMGMTCAP_ID
);
6661 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__
));
6665 pme_cap
= OSL_PCI_READ_CONFIG(osh
, cap_ptr
, sizeof(uint32
));
6667 DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__
, pme_cap
));
6669 return ((pme_cap
& PME_CAP_PM_STATES
) != 0);
6673 dhdpcie_lcreg(osl_t
*osh
, uint32 mask
, uint32 val
)
6677 uint8 lcreg_offset
; /* PCIE capability LCreg offset in the config space */
6680 pcie_cap
= dhdpcie_find_pci_capability(osh
, PCI_CAP_PCIECAP_ID
);
6683 DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__
));
6687 lcreg_offset
= pcie_cap
+ PCIE_CAP_LINKCTRL_OFFSET
;
6692 reg_val
= OSL_PCI_READ_CONFIG(osh
, lcreg_offset
, sizeof(uint32
));
6696 reg_val
|= (mask
& val
);
6699 OSL_PCI_WRITE_CONFIG(osh
, lcreg_offset
, sizeof(uint32
), reg_val
);
6701 return OSL_PCI_READ_CONFIG(osh
, lcreg_offset
, sizeof(uint32
));
6705 dhdpcie_clkreq(osl_t
*osh
, uint32 mask
, uint32 val
)
6709 uint8 lcreg_offset
; /* PCIE capability LCreg offset in the config space */
6711 pcie_cap
= dhdpcie_find_pci_capability(osh
, PCI_CAP_PCIECAP_ID
);
6714 DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__
));
6718 lcreg_offset
= pcie_cap
+ PCIE_CAP_LINKCTRL_OFFSET
;
6720 reg_val
= OSL_PCI_READ_CONFIG(osh
, lcreg_offset
, sizeof(uint32
));
6724 reg_val
|= PCIE_CLKREQ_ENAB
;
6726 reg_val
&= ~PCIE_CLKREQ_ENAB
;
6727 OSL_PCI_WRITE_CONFIG(osh
, lcreg_offset
, sizeof(uint32
), reg_val
);
6728 reg_val
= OSL_PCI_READ_CONFIG(osh
, lcreg_offset
, sizeof(uint32
));
6730 if (reg_val
& PCIE_CLKREQ_ENAB
)
6736 void dhd_dump_intr_counters(dhd_pub_t
*dhd
, struct bcmstrbuf
*strbuf
)
6739 uint64 current_time
= OSL_LOCALTIME_NS();
6742 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
6748 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__
));
6752 bcm_bprintf(strbuf
, "\n ------- DUMPING INTR enable/disable counters-------\n");
6753 bcm_bprintf(strbuf
, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
6754 "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
6755 "dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
6756 bus
->resume_intr_enable_count
, bus
->dpc_intr_enable_count
,
6757 bus
->isr_intr_disable_count
, bus
->suspend_intr_disable_count
,
6758 bus
->dpc_return_busdown_count
, bus
->non_ours_irq_count
);
6759 #ifdef BCMPCIE_OOB_HOST_WAKE
6760 bcm_bprintf(strbuf
, "oob_intr_count=%lu oob_intr_enable_count=%lu"
6761 " oob_intr_disable_count=%lu\n oob_irq_num=%d last_oob_irq_time=%llu\n",
6762 bus
->oob_intr_count
, bus
->oob_intr_enable_count
,
6763 bus
->oob_intr_disable_count
, dhdpcie_get_oob_irq_num(dhd
->bus
),
6764 bus
->last_oob_irq_time
);
6765 #endif /* BCMPCIE_OOB_HOST_WAKE */
6766 bcm_bprintf(strbuf
, "\ncurrent_time="SEC_USEC_FMT
" isr_entry_time="SEC_USEC_FMT
6767 " isr_exit_time="SEC_USEC_FMT
"\ndpc_sched_time="SEC_USEC_FMT
6768 " last_non_ours_irq_time="SEC_USEC_FMT
" dpc_entry_time="SEC_USEC_FMT
"\n"
6769 "last_process_ctrlbuf_time="SEC_USEC_FMT
" last_process_flowring_time="SEC_USEC_FMT
6770 " last_process_txcpl_time="SEC_USEC_FMT
"\nlast_process_rxcpl_time="SEC_USEC_FMT
6771 " last_process_infocpl_time="SEC_USEC_FMT
6772 "\ndpc_exit_time="SEC_USEC_FMT
" resched_dpc_time="SEC_USEC_FMT
"\n"
6773 "last_d3_inform_time="SEC_USEC_FMT
"\n",
6774 GET_SEC_USEC(current_time
), GET_SEC_USEC(bus
->isr_entry_time
),
6775 GET_SEC_USEC(bus
->isr_exit_time
), GET_SEC_USEC(bus
->dpc_entry_time
),
6776 GET_SEC_USEC(bus
->dpc_sched_time
), GET_SEC_USEC(dhd
->bus
->last_non_ours_irq_time
),
6777 GET_SEC_USEC(bus
->last_process_ctrlbuf_time
),
6778 GET_SEC_USEC(bus
->last_process_flowring_time
),
6779 GET_SEC_USEC(bus
->last_process_txcpl_time
),
6780 GET_SEC_USEC(bus
->last_process_rxcpl_time
),
6781 GET_SEC_USEC(bus
->last_process_infocpl_time
),
6782 GET_SEC_USEC(bus
->dpc_exit_time
), GET_SEC_USEC(bus
->resched_dpc_time
),
6783 GET_SEC_USEC(bus
->last_d3_inform_time
));
6785 bcm_bprintf(strbuf
, "\nlast_suspend_start_time="SEC_USEC_FMT
" last_suspend_end_time="
6786 SEC_USEC_FMT
" last_resume_start_time="SEC_USEC_FMT
" last_resume_end_time="
6787 SEC_USEC_FMT
"\n", GET_SEC_USEC(bus
->last_suspend_start_time
),
6788 GET_SEC_USEC(dhd
->bus
->last_suspend_end_time
),
6789 GET_SEC_USEC(dhd
->bus
->last_resume_start_time
),
6790 GET_SEC_USEC(dhd
->bus
->last_resume_end_time
));
6793 void dhd_dump_intr_registers(dhd_pub_t
*dhd
, struct bcmstrbuf
*strbuf
)
6795 uint32 intstatus
= 0;
6798 uint32 d2h_mb_data
= 0;
6800 intstatus
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
6801 dhd
->bus
->pcie_mailbox_int
, 0, 0);
6802 intmask
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
6803 dhd
->bus
->pcie_mailbox_mask
, 0, 0);
6804 d2h_db0
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, PCID2H_MailBox
, 0, 0);
6805 dhd_bus_cmn_readshared(dhd
->bus
, &d2h_mb_data
, D2H_MB_DATA
, 0);
6807 bcm_bprintf(strbuf
, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
6808 intstatus
, intmask
, d2h_db0
);
6809 bcm_bprintf(strbuf
, "d2h_mb_data=0x%x def_intmask=0x%x\n",
6810 d2h_mb_data
, dhd
->bus
->def_intmask
);
6812 /** Add bus dump output to a buffer */
6813 void dhd_bus_dump(dhd_pub_t
*dhdp
, struct bcmstrbuf
*strbuf
)
6817 flow_ring_node_t
*flow_ring_node
;
6818 flow_info_t
*flow_info
;
6819 #ifdef TX_STATUS_LATENCY_STATS
6821 if_flow_lkup_t
*if_flow_lkup
;
6822 dhd_if_tx_status_latency_t if_tx_status_latency
[DHD_MAX_IFS
];
6823 #endif /* TX_STATUS_LATENCY_STATS */
6825 if (dhdp
->busstate
!= DHD_BUS_DATA
)
6828 #ifdef TX_STATUS_LATENCY_STATS
6829 memset(if_tx_status_latency
, 0, sizeof(if_tx_status_latency
));
6830 #endif /* TX_STATUS_LATENCY_STATS */
6831 #ifdef DHD_WAKE_STATUS
6832 bcm_bprintf(strbuf
, "wake %u rxwake %u readctrlwake %u\n",
6833 bcmpcie_get_total_wake(dhdp
->bus
), dhdp
->bus
->wake_counts
.rxwake
,
6834 dhdp
->bus
->wake_counts
.rcwake
);
6835 #ifdef DHD_WAKE_RX_STATUS
6836 bcm_bprintf(strbuf
, " unicast %u muticast %u broadcast %u arp %u\n",
6837 dhdp
->bus
->wake_counts
.rx_ucast
, dhdp
->bus
->wake_counts
.rx_mcast
,
6838 dhdp
->bus
->wake_counts
.rx_bcast
, dhdp
->bus
->wake_counts
.rx_arp
);
6839 bcm_bprintf(strbuf
, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
6840 dhdp
->bus
->wake_counts
.rx_multi_ipv4
, dhdp
->bus
->wake_counts
.rx_multi_ipv6
,
6841 dhdp
->bus
->wake_counts
.rx_icmpv6
, dhdp
->bus
->wake_counts
.rx_multi_other
);
6842 bcm_bprintf(strbuf
, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
6843 dhdp
->bus
->wake_counts
.rx_icmpv6_ra
, dhdp
->bus
->wake_counts
.rx_icmpv6_na
,
6844 dhdp
->bus
->wake_counts
.rx_icmpv6_ns
);
6845 #endif /* DHD_WAKE_RX_STATUS */
6846 #ifdef DHD_WAKE_EVENT_STATUS
6847 for (flowid
= 0; flowid
< WLC_E_LAST
; flowid
++)
6848 if (dhdp
->bus
->wake_counts
.rc_event
[flowid
] != 0)
6849 bcm_bprintf(strbuf
, " %s = %u\n", bcmevent_get_name(flowid
),
6850 dhdp
->bus
->wake_counts
.rc_event
[flowid
]);
6851 bcm_bprintf(strbuf
, "\n");
6852 #endif /* DHD_WAKE_EVENT_STATUS */
6853 #endif /* DHD_WAKE_STATUS */
6855 dhd_prot_print_info(dhdp
, strbuf
);
6856 dhd_dump_intr_registers(dhdp
, strbuf
);
6857 dhd_dump_intr_counters(dhdp
, strbuf
);
6858 bcm_bprintf(strbuf
, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
6859 dhdp
->bus
->h2d_mb_data_ptr_addr
, dhdp
->bus
->d2h_mb_data_ptr_addr
);
6860 bcm_bprintf(strbuf
, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp
->cumm_ctr
));
6862 "%s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
6863 "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
6864 "Overflows", "RD", "WR");
6866 #ifdef TX_STATUS_LATENCY_STATS
6867 /* Average Tx status/Completion Latency in micro secs */
6868 bcm_bprintf(strbuf
, "%12s", "AvgTxCmpL_Us ");
6869 #endif /* TX_STATUS_LATENCY_STATS */
6871 bcm_bprintf(strbuf
, "%5s %6s %5s \n", "Acked", "tossed", "noack");
6873 for (flowid
= 0; flowid
< dhdp
->num_flow_rings
; flowid
++) {
6874 flow_ring_node
= DHD_FLOW_RING(dhdp
, flowid
);
6875 if (!flow_ring_node
->active
)
6878 flow_info
= &flow_ring_node
->flow_info
;
6880 "%3d. %4d %2d %4d "MACDBG
" %4d %4d %6d %10u ", ix
++,
6881 flow_ring_node
->flowid
, flow_info
->ifindex
, flow_info
->tid
,
6882 MAC2STRDBG(flow_info
->da
),
6883 DHD_FLOW_QUEUE_LEN(&flow_ring_node
->queue
),
6884 DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node
->queue
)),
6885 DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node
->queue
)),
6886 DHD_FLOW_QUEUE_FAILURES(&flow_ring_node
->queue
));
6887 dhd_prot_print_flow_ring(dhdp
, flow_ring_node
->prot_info
, strbuf
,
6890 #ifdef TX_STATUS_LATENCY_STATS
6891 bcm_bprintf(strbuf
, "%12d ",
6892 flow_info
->num_tx_status
?
6893 DIV_U64_BY_U64(flow_info
->cum_tx_status_latency
,
6894 flow_info
->num_tx_status
) : 0);
6896 ifindex
= flow_info
->ifindex
;
6897 ASSERT(ifindex
< DHD_MAX_IFS
);
6898 if (ifindex
< DHD_MAX_IFS
) {
6899 if_tx_status_latency
[ifindex
].num_tx_status
+= flow_info
->num_tx_status
;
6900 if_tx_status_latency
[ifindex
].cum_tx_status_latency
+=
6901 flow_info
->cum_tx_status_latency
;
6903 DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n",
6904 __FUNCTION__
, ifindex
, flowid
));
6906 #endif /* TX_STATUS_LATENCY_STATS */
6908 "%5s %6s %5s\n", "NA", "NA", "NA");
6911 #ifdef TX_STATUS_LATENCY_STATS
6912 bcm_bprintf(strbuf
, "%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStats");
6913 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
6914 for (ix
= 0; ix
< DHD_MAX_IFS
; ix
++) {
6915 if (!if_flow_lkup
[ix
].status
) {
6918 bcm_bprintf(strbuf
, "%2d %16d %16d\n",
6920 if_tx_status_latency
[ix
].num_tx_status
?
6921 DIV_U64_BY_U64(if_tx_status_latency
[ix
].cum_tx_status_latency
,
6922 if_tx_status_latency
[ix
].num_tx_status
): 0,
6923 if_tx_status_latency
[ix
].num_tx_status
);
6925 #endif /* TX_STATUS_LATENCY_STATS */
6926 bcm_bprintf(strbuf
, "D3 inform cnt %d\n", dhdp
->bus
->d3_inform_cnt
);
6927 bcm_bprintf(strbuf
, "D0 inform cnt %d\n", dhdp
->bus
->d0_inform_cnt
);
6928 bcm_bprintf(strbuf
, "D0 inform in use cnt %d\n", dhdp
->bus
->d0_inform_in_use_cnt
);
6929 if (dhdp
->d2h_hostrdy_supported
) {
6930 bcm_bprintf(strbuf
, "hostready count:%d\n", dhdp
->bus
->hostready_count
);
6932 bcm_bprintf(strbuf
, "d2h_intr_method -> %s\n",
6933 dhdp
->bus
->d2h_intr_method
? "PCIE_MSI" : "PCIE_INTX");
6937 * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
6938 * flow queue to their flow ring.
6941 dhd_update_txflowrings(dhd_pub_t
*dhd
)
6943 unsigned long flags
;
6945 flow_ring_node_t
*flow_ring_node
;
6946 struct dhd_bus
*bus
= dhd
->bus
;
6948 /* Hold flowring_list_lock to ensure no race condition while accessing the List */
6949 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
6950 for (item
= dll_head_p(&bus
->flowring_active_list
);
6951 (!dhd_is_device_removed(dhd
) && !dll_end(&bus
->flowring_active_list
, item
));
6953 if (dhd
->hang_was_sent
) {
6957 next
= dll_next_p(item
);
6958 flow_ring_node
= dhd_constlist_to_flowring(item
);
6960 /* Ensure that flow_ring_node in the list is Not Null */
6961 ASSERT(flow_ring_node
!= NULL
);
6963 /* Ensure that the flowring node has valid contents */
6964 ASSERT(flow_ring_node
->prot_info
!= NULL
);
6966 dhd_prot_update_txflowring(dhd
, flow_ring_node
->flowid
, flow_ring_node
->prot_info
);
6968 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
6971 /** Mailbox ringbell Function */
6973 dhd_bus_gen_devmb_intr(struct dhd_bus
*bus
)
6975 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
6976 (bus
->sih
->buscorerev
== 4)) {
6977 DHD_ERROR(("mailbox communication not supported\n"));
6980 if (bus
->db1_for_mb
) {
6981 /* this is a pcie core register, not the config register */
6982 DHD_INFO(("writing a mail box interrupt to the device, through doorbell 1\n"));
6983 if (DAR_PWRREQ(bus
)) {
6984 dhd_bus_pcie_pwr_req(bus
);
6986 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dhd_bus_db1_addr_get(bus
),
6989 DHD_INFO(("writing a mail box interrupt to the device, through config space\n"));
6990 dhdpcie_bus_cfg_write_dword(bus
, PCISBMbx
, 4, (1 << 0));
6991 dhdpcie_bus_cfg_write_dword(bus
, PCISBMbx
, 4, (1 << 0));
6995 /* Upon receiving a mailbox interrupt,
6996 * if H2D_FW_TRAP bit is set in mailbox location
7000 dhdpcie_fw_trap(dhd_bus_t
*bus
)
7002 /* Send the mailbox data and generate mailbox intr. */
7003 dhdpcie_send_mb_data(bus
, H2D_FW_TRAP
);
7004 /* For FWs that cannot interprete H2D_FW_TRAP */
7005 (void)dhd_wl_ioctl_set_intiovar(bus
->dhd
, "bus:disconnect", 99, WLC_SET_VAR
, TRUE
, 0);
7008 /** mailbox doorbell ring function */
7010 dhd_bus_ringbell(struct dhd_bus
*bus
, uint32 value
)
7012 /* Skip after sending D3_INFORM */
7013 if (bus
->bus_low_power_state
!= DHD_BUS_NO_LOW_POWER_STATE
) {
7014 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7015 __FUNCTION__
, bus
->bus_low_power_state
));
7019 /* Skip in the case of link down */
7020 if (bus
->is_linkdown
) {
7021 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
7025 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
7026 (bus
->sih
->buscorerev
== 4)) {
7027 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_int
,
7028 PCIE_INTB
, PCIE_INTB
);
7030 /* this is a pcie core register, not the config regsiter */
7031 DHD_INFO(("writing a door bell to the device\n"));
7032 if (IDMA_ACTIVE(bus
->dhd
)) {
7033 if (DAR_PWRREQ(bus
)) {
7034 dhd_bus_pcie_pwr_req(bus
);
7036 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dhd_bus_db0_addr_2_get(bus
),
7039 if (DAR_PWRREQ(bus
)) {
7040 dhd_bus_pcie_pwr_req(bus
);
7042 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
7043 dhd_bus_db0_addr_get(bus
), ~0, 0x12345678);
7048 /** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */
7050 dhd_bus_ringbell_2(struct dhd_bus
*bus
, uint32 value
, bool devwake
)
7052 /* this is a pcie core register, not the config regsiter */
7053 /* Skip after sending D3_INFORM */
7054 if (bus
->bus_low_power_state
!= DHD_BUS_NO_LOW_POWER_STATE
) {
7055 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7056 __FUNCTION__
, bus
->bus_low_power_state
));
7060 /* Skip in the case of link down */
7061 if (bus
->is_linkdown
) {
7062 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
7066 DHD_INFO(("writing a door bell 2 to the device\n"));
7067 if (DAR_PWRREQ(bus
)) {
7068 dhd_bus_pcie_pwr_req(bus
);
7070 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dhd_bus_db0_addr_2_get(bus
),
7075 dhdpcie_bus_ringbell_fast(struct dhd_bus
*bus
, uint32 value
)
7077 /* Skip after sending D3_INFORM */
7078 if (bus
->bus_low_power_state
!= DHD_BUS_NO_LOW_POWER_STATE
) {
7079 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7080 __FUNCTION__
, bus
->bus_low_power_state
));
7084 /* Skip in the case of link down */
7085 if (bus
->is_linkdown
) {
7086 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
7090 if (DAR_PWRREQ(bus
)) {
7091 dhd_bus_pcie_pwr_req(bus
);
7093 W_REG(bus
->pcie_mb_intr_osh
, bus
->pcie_mb_intr_addr
, value
);
7097 dhdpcie_bus_ringbell_2_fast(struct dhd_bus
*bus
, uint32 value
, bool devwake
)
7099 /* Skip after sending D3_INFORM */
7100 if (bus
->bus_low_power_state
!= DHD_BUS_NO_LOW_POWER_STATE
) {
7101 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7102 __FUNCTION__
, bus
->bus_low_power_state
));
7106 /* Skip in the case of link down */
7107 if (bus
->is_linkdown
) {
7108 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
7112 if (DAR_PWRREQ(bus
)) {
7113 dhd_bus_pcie_pwr_req(bus
);
7115 W_REG(bus
->pcie_mb_intr_osh
, bus
->pcie_mb_intr_2_addr
, value
);
7119 dhd_bus_ringbell_oldpcie(struct dhd_bus
*bus
, uint32 value
)
7122 /* Skip after sending D3_INFORM */
7123 if (bus
->bus_low_power_state
!= DHD_BUS_NO_LOW_POWER_STATE
) {
7124 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
7125 __FUNCTION__
, bus
->bus_low_power_state
));
7129 /* Skip in the case of link down */
7130 if (bus
->is_linkdown
) {
7131 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
7135 w
= (R_REG(bus
->pcie_mb_intr_osh
, bus
->pcie_mb_intr_addr
) & ~PCIE_INTB
) | PCIE_INTB
;
7136 W_REG(bus
->pcie_mb_intr_osh
, bus
->pcie_mb_intr_addr
, w
);
7140 dhd_bus_get_mbintr_fn(struct dhd_bus
*bus
)
7142 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
7143 (bus
->sih
->buscorerev
== 4)) {
7144 bus
->pcie_mb_intr_addr
= si_corereg_addr(bus
->sih
, bus
->sih
->buscoreidx
,
7145 bus
->pcie_mailbox_int
);
7146 if (bus
->pcie_mb_intr_addr
) {
7147 bus
->pcie_mb_intr_osh
= si_osh(bus
->sih
);
7148 return dhd_bus_ringbell_oldpcie
;
7151 bus
->pcie_mb_intr_addr
= si_corereg_addr(bus
->sih
, bus
->sih
->buscoreidx
,
7152 dhd_bus_db0_addr_get(bus
));
7153 if (bus
->pcie_mb_intr_addr
) {
7154 bus
->pcie_mb_intr_osh
= si_osh(bus
->sih
);
7155 return dhdpcie_bus_ringbell_fast
;
7158 return dhd_bus_ringbell
;
7162 dhd_bus_get_mbintr_2_fn(struct dhd_bus
*bus
)
7164 bus
->pcie_mb_intr_2_addr
= si_corereg_addr(bus
->sih
, bus
->sih
->buscoreidx
,
7165 dhd_bus_db0_addr_2_get(bus
));
7166 if (bus
->pcie_mb_intr_2_addr
) {
7167 bus
->pcie_mb_intr_osh
= si_osh(bus
->sih
);
7168 return dhdpcie_bus_ringbell_2_fast
;
7170 return dhd_bus_ringbell_2
;
7174 dhd_bus_dpc(struct dhd_bus
*bus
)
7176 bool resched
= FALSE
; /* Flag indicating resched wanted */
7177 unsigned long flags
;
7179 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
7181 bus
->dpc_entry_time
= OSL_LOCALTIME_NS();
7183 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
7184 /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
7185 * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
7186 * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
7187 * and if we return from here, then IOCTL response will never be handled
7189 if (bus
->dhd
->busstate
== DHD_BUS_DOWN
) {
7190 DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__
));
7192 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
7193 bus
->dpc_return_busdown_count
++;
7196 #ifdef DHD_PCIE_RUNTIMEPM
7198 #endif /* DHD_PCIE_RUNTIMEPM */
7199 DHD_BUS_BUSY_SET_IN_DPC(bus
->dhd
);
7200 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
7202 resched
= dhdpcie_bus_process_mailbox_intr(bus
, bus
->intstatus
);
7205 bus
->dpc_intr_enable_count
++;
7206 /* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts
7207 * which has been disabled in the dhdpcie_bus_isr()
7209 dhdpcie_enable_irq(bus
); /* Enable back interrupt!! */
7210 bus
->dpc_exit_time
= OSL_LOCALTIME_NS();
7212 bus
->resched_dpc_time
= OSL_LOCALTIME_NS();
7215 bus
->dpc_sched
= resched
;
7217 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
7218 DHD_BUS_BUSY_CLEAR_IN_DPC(bus
->dhd
);
7219 dhd_os_busbusy_wake(bus
->dhd
);
7220 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
7227 dhdpcie_send_mb_data(dhd_bus_t
*bus
, uint32 h2d_mb_data
)
7229 uint32 cur_h2d_mb_data
= 0;
7231 DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__
, h2d_mb_data
));
7233 if (bus
->is_linkdown
) {
7234 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
7238 if (bus
->api
.fw_rev
>= PCIE_SHARED_VERSION_6
&& !bus
->use_mailbox
) {
7239 DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n",
7241 /* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */
7243 if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus
->dhd
, h2d_mb_data
)) {
7244 DHD_ERROR(("failure sending the H2D Mailbox message "
7252 dhd_bus_cmn_readshared(bus
, &cur_h2d_mb_data
, H2D_MB_DATA
, 0);
7254 if (cur_h2d_mb_data
!= 0) {
7256 DHD_INFO(("GRRRRRRR: MB transaction is already pending 0x%04x\n", cur_h2d_mb_data
));
7257 while ((i
++ < 100) && cur_h2d_mb_data
) {
7259 dhd_bus_cmn_readshared(bus
, &cur_h2d_mb_data
, H2D_MB_DATA
, 0);
7262 DHD_ERROR(("%s : waited 1ms for the dngl "
7263 "to ack the previous mb transaction\n", __FUNCTION__
));
7264 DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
7265 __FUNCTION__
, cur_h2d_mb_data
));
7269 dhd_bus_cmn_writeshared(bus
, &h2d_mb_data
, sizeof(uint32
), H2D_MB_DATA
, 0);
7270 dhd_bus_gen_devmb_intr(bus
);
7273 if (h2d_mb_data
== H2D_HOST_D3_INFORM
) {
7274 DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__
));
7275 bus
->last_d3_inform_time
= OSL_LOCALTIME_NS();
7276 bus
->d3_inform_cnt
++;
7278 if (h2d_mb_data
== H2D_HOST_D0_INFORM_IN_USE
) {
7279 DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__
));
7280 bus
->d0_inform_in_use_cnt
++;
7282 if (h2d_mb_data
== H2D_HOST_D0_INFORM
) {
7283 DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__
));
7284 bus
->d0_inform_cnt
++;
7292 dhd_bus_handle_d3_ack(dhd_bus_t
*bus
)
7294 unsigned long flags_bus
;
7295 DHD_BUS_LOCK(bus
->bus_lock
, flags_bus
);
7296 bus
->suspend_intr_disable_count
++;
7297 /* Disable dongle Interrupts Immediately after D3 */
7299 /* For Linux, Macos etc (otherthan NDIS) along with disabling
7300 * dongle interrupt by clearing the IntMask, disable directly
7301 * interrupt from the host side as well. Also clear the intstatus
7302 * if it is set to avoid unnecessary intrrupts after D3 ACK.
7304 dhdpcie_bus_intr_disable(bus
); /* Disable interrupt using IntMask!! */
7305 dhdpcie_bus_clear_intstatus(bus
);
7306 dhdpcie_disable_irq_nosync(bus
); /* Disable host interrupt!! */
7308 /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
7309 bus
->bus_low_power_state
= DHD_BUS_D3_ACK_RECIEVED
;
7310 DHD_BUS_UNLOCK(bus
->bus_lock
, flags_bus
);
7311 bus
->wait_for_d3_ack
= 1;
7312 dhd_os_d3ack_wake(bus
->dhd
);
7315 dhd_bus_handle_mb_data(dhd_bus_t
*bus
, uint32 d2h_mb_data
)
7317 if (MULTIBP_ENAB(bus
->sih
)) {
7318 dhd_bus_pcie_pwr_req(bus
);
7321 DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data
));
7323 if (d2h_mb_data
& D2H_DEV_FWHALT
) {
7324 DHD_ERROR(("FW trap has happened\n"));
7325 dhdpcie_checkdied(bus
, NULL
, 0);
7326 #ifdef SUPPORT_LINKDOWN_RECOVERY
7327 #ifdef CONFIG_ARCH_MSM
7328 bus
->no_cfg_restore
= 1;
7329 #endif /* CONFIG_ARCH_MSM */
7330 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7331 dhd_os_check_hang(bus
->dhd
, 0, -EREMOTEIO
);
7334 if (d2h_mb_data
& D2H_DEV_DS_ENTER_REQ
) {
7335 bool ds_acked
= FALSE
;
7336 BCM_REFERENCE(ds_acked
);
7337 if (bus
->bus_low_power_state
== DHD_BUS_D3_ACK_RECIEVED
) {
7338 DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
7339 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
7342 /* what should we do */
7343 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
7345 dhdpcie_send_mb_data(bus
, H2D_HOST_DS_ACK
);
7346 DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
7349 if (d2h_mb_data
& D2H_DEV_DS_EXIT_NOTE
) {
7350 /* what should we do */
7351 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
7353 if (d2h_mb_data
& D2HMB_DS_HOST_SLEEP_EXIT_ACK
) {
7354 /* what should we do */
7355 DHD_INFO(("D2H_MB_DATA: D0 ACK\n"));
7357 if (d2h_mb_data
& D2H_DEV_D3_ACK
) {
7358 /* what should we do */
7359 DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
7360 if (!bus
->wait_for_d3_ack
) {
7361 #if defined(DHD_HANG_SEND_UP_TEST)
7362 if (bus
->dhd
->req_hang_type
== HANG_REASON_D3_ACK_TIMEOUT
) {
7363 DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
7365 dhd_bus_handle_d3_ack(bus
);
7367 #else /* DHD_HANG_SEND_UP_TEST */
7368 dhd_bus_handle_d3_ack(bus
);
7369 #endif /* DHD_HANG_SEND_UP_TEST */
7374 if (MULTIBP_ENAB(bus
->sih
)) {
7375 dhd_bus_pcie_pwr_req_clear(bus
);
7380 dhdpcie_handle_mb_data(dhd_bus_t
*bus
)
7382 uint32 d2h_mb_data
= 0;
7385 if (MULTIBP_ENAB(bus
->sih
)) {
7386 dhd_bus_pcie_pwr_req(bus
);
7389 dhd_bus_cmn_readshared(bus
, &d2h_mb_data
, D2H_MB_DATA
, 0);
7390 if (D2H_DEV_MB_INVALIDATED(d2h_mb_data
)) {
7391 DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
7392 __FUNCTION__
, d2h_mb_data
));
7396 dhd_bus_cmn_writeshared(bus
, &zero
, sizeof(uint32
), D2H_MB_DATA
, 0);
7398 DHD_INFO_HW4(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data
));
7399 if (d2h_mb_data
& D2H_DEV_FWHALT
) {
7400 DHD_ERROR(("FW trap has happened\n"));
7401 dhdpcie_checkdied(bus
, NULL
, 0);
7402 /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
7405 if (d2h_mb_data
& D2H_DEV_DS_ENTER_REQ
) {
7406 /* what should we do */
7407 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
7408 dhdpcie_send_mb_data(bus
, H2D_HOST_DS_ACK
);
7409 DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
7411 if (d2h_mb_data
& D2H_DEV_DS_EXIT_NOTE
) {
7412 /* what should we do */
7413 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
7415 if (d2h_mb_data
& D2H_DEV_D3_ACK
) {
7416 /* what should we do */
7417 DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
7418 if (!bus
->wait_for_d3_ack
) {
7419 #if defined(DHD_HANG_SEND_UP_TEST)
7420 if (bus
->dhd
->req_hang_type
== HANG_REASON_D3_ACK_TIMEOUT
) {
7421 DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
7423 dhd_bus_handle_d3_ack(bus
);
7425 #else /* DHD_HANG_SEND_UP_TEST */
7426 dhd_bus_handle_d3_ack(bus
);
7427 #endif /* DHD_HANG_SEND_UP_TEST */
7432 if (MULTIBP_ENAB(bus
->sih
)) {
7433 dhd_bus_pcie_pwr_req_clear(bus
);
7438 dhdpcie_read_handle_mb_data(dhd_bus_t
*bus
)
7440 uint32 d2h_mb_data
= 0;
7443 if (bus
->is_linkdown
) {
7444 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__
));
7448 if (MULTIBP_ENAB(bus
->sih
)) {
7449 dhd_bus_pcie_pwr_req(bus
);
7452 dhd_bus_cmn_readshared(bus
, &d2h_mb_data
, D2H_MB_DATA
, 0);
7457 dhd_bus_cmn_writeshared(bus
, &zero
, sizeof(uint32
), D2H_MB_DATA
, 0);
7459 dhd_bus_handle_mb_data(bus
, d2h_mb_data
);
7462 if (MULTIBP_ENAB(bus
->sih
)) {
7463 dhd_bus_pcie_pwr_req_clear(bus
);
7468 dhdpcie_bus_process_mailbox_intr(dhd_bus_t
*bus
, uint32 intstatus
)
7470 bool resched
= FALSE
;
7472 if (MULTIBP_ENAB(bus
->sih
)) {
7473 dhd_bus_pcie_pwr_req(bus
);
7475 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
7476 (bus
->sih
->buscorerev
== 4)) {
7477 /* Msg stream interrupt */
7478 if (intstatus
& I_BIT1
) {
7479 resched
= dhdpci_bus_read_frames(bus
);
7480 } else if (intstatus
& I_BIT0
) {
7481 /* do nothing for Now */
7484 if (intstatus
& (PCIE_MB_TOPCIE_FN0_0
| PCIE_MB_TOPCIE_FN0_1
))
7485 bus
->api
.handle_mb_data(bus
);
7487 if ((bus
->dhd
->busstate
== DHD_BUS_SUSPEND
) || (bus
->use_mailbox
&&
7488 (bus
->bus_low_power_state
!= DHD_BUS_NO_LOW_POWER_STATE
))) {
7489 DHD_ERROR(("%s: Bus is in power save state. "
7490 "Skip processing rest of ring buffers.\n", __FUNCTION__
));
7494 /* Validate intstatus only for INTX case */
7495 if ((bus
->d2h_intr_method
== PCIE_MSI
) ||
7496 ((bus
->d2h_intr_method
== PCIE_INTX
) && (intstatus
& bus
->d2h_mb_mask
))) {
7497 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7498 if (pm_runtime_get(dhd_bus_to_dev(bus
)) >= 0) {
7499 resched
= dhdpci_bus_read_frames(bus
);
7500 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus
));
7501 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus
));
7504 resched
= dhdpci_bus_read_frames(bus
);
7505 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7510 if (MULTIBP_ENAB(bus
->sih
)) {
7511 dhd_bus_pcie_pwr_req_clear(bus
);
7517 dhdpci_bus_read_frames(dhd_bus_t
*bus
)
7521 /* First check if there a FW trap */
7522 if ((bus
->api
.fw_rev
>= PCIE_SHARED_VERSION_6
) &&
7523 (bus
->dhd
->dongle_trap_data
= dhd_prot_process_trapbuf(bus
->dhd
))) {
7524 dhd_bus_handle_mb_data(bus
, D2H_DEV_FWHALT
);
7528 /* There may be frames in both ctrl buf and data buf; check ctrl buf first */
7529 DHD_PERIM_LOCK_ALL((bus
->dhd
->fwder_unit
% FWDER_MAX_UNIT
));
7531 dhd_prot_process_ctrlbuf(bus
->dhd
);
7532 bus
->last_process_ctrlbuf_time
= OSL_LOCALTIME_NS();
7533 /* Unlock to give chance for resp to be handled */
7534 DHD_PERIM_UNLOCK_ALL((bus
->dhd
->fwder_unit
% FWDER_MAX_UNIT
));
7536 /* Do not process rest of ring buf once bus enters low power state */
7537 if (!bus
->use_mailbox
&& (bus
->bus_low_power_state
!= DHD_BUS_NO_LOW_POWER_STATE
)) {
7538 DHD_ERROR(("%s: Bus is in power save state. "
7539 "Skip processing rest of ring buffers.\n", __FUNCTION__
));
7543 DHD_PERIM_LOCK_ALL((bus
->dhd
->fwder_unit
% FWDER_MAX_UNIT
));
7544 /* update the flow ring cpls */
7545 dhd_update_txflowrings(bus
->dhd
);
7546 bus
->last_process_flowring_time
= OSL_LOCALTIME_NS();
7548 /* With heavy TX traffic, we could get a lot of TxStatus
7551 more
|= dhd_prot_process_msgbuf_txcpl(bus
->dhd
, dhd_txbound
);
7552 bus
->last_process_txcpl_time
= OSL_LOCALTIME_NS();
7554 /* With heavy RX traffic, this routine potentially could spend some time
7555 * processing RX frames without RX bound
7557 more
|= dhd_prot_process_msgbuf_rxcpl(bus
->dhd
, dhd_rxbound
);
7558 bus
->last_process_rxcpl_time
= OSL_LOCALTIME_NS();
7560 /* Process info ring completion messages */
7561 more
|= dhd_prot_process_msgbuf_infocpl(bus
->dhd
, DHD_INFORING_BOUND
);
7562 bus
->last_process_infocpl_time
= OSL_LOCALTIME_NS();
7564 #ifdef IDLE_TX_FLOW_MGMT
7565 if (bus
->enable_idle_flowring_mgmt
) {
7566 /* Look for idle flow rings */
7567 dhd_bus_check_idle_scan(bus
);
7569 #endif /* IDLE_TX_FLOW_MGMT */
7571 /* don't talk to the dongle if fw is about to be reloaded */
7572 if (bus
->dhd
->hang_was_sent
) {
7575 DHD_PERIM_UNLOCK_ALL((bus
->dhd
->fwder_unit
% FWDER_MAX_UNIT
));
7577 #ifdef SUPPORT_LINKDOWN_RECOVERY
7578 if (bus
->read_shm_fail
) {
7579 /* Read interrupt state once again to confirm linkdown */
7580 int intstatus
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
7581 bus
->pcie_mailbox_int
, 0, 0);
7582 if (intstatus
!= (uint32
)-1) {
7583 DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__
));
7584 #ifdef DHD_FW_COREDUMP
7585 if (bus
->dhd
->memdump_enabled
) {
7586 DHD_OS_WAKE_LOCK(bus
->dhd
);
7587 bus
->dhd
->memdump_type
= DUMP_TYPE_READ_SHM_FAIL
;
7588 dhd_bus_mem_dump(bus
->dhd
);
7589 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
7591 #endif /* DHD_FW_COREDUMP */
7593 DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__
));
7594 #ifdef CONFIG_ARCH_MSM
7595 bus
->no_cfg_restore
= 1;
7596 #endif /* CONFIG_ARCH_MSM */
7597 bus
->is_linkdown
= 1;
7600 dhd_prot_debug_info_print(bus
->dhd
);
7601 bus
->dhd
->hang_reason
= HANG_REASON_PCIE_LINK_DOWN
;
7602 dhd_os_send_hang_message(bus
->dhd
);
7605 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7610 dhdpcie_tcm_valid(dhd_bus_t
*bus
)
7615 pciedev_shared_t sh
;
7617 shaddr
= bus
->dongle_ram_base
+ bus
->ramsize
- 4;
7619 /* Read last word in memory to determine address of pciedev_shared structure */
7620 addr
= LTOH32(dhdpcie_bus_rtcm32(bus
, shaddr
));
7622 if ((addr
== 0) || (addr
== bus
->nvram_csm
) || (addr
< bus
->dongle_ram_base
) ||
7624 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
7625 __FUNCTION__
, addr
));
7629 /* Read hndrte_shared structure */
7630 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
, (uint8
*)&sh
,
7631 sizeof(pciedev_shared_t
))) < 0) {
7632 DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv
));
7636 /* Compare any field in pciedev_shared_t */
7637 if (sh
.console_addr
!= bus
->pcie_sh
->console_addr
) {
7638 DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n"));
7646 dhdpcie_update_bus_api_revisions(uint32 firmware_api_version
, uint32 host_api_version
)
7648 snprintf(bus_api_revision
, BUS_API_REV_STR_LEN
, "\nBus API revisions:(FW rev%d)(DHD rev%d)",
7649 firmware_api_version
, host_api_version
);
7654 dhdpcie_check_firmware_compatible(uint32 firmware_api_version
, uint32 host_api_version
)
7656 bool retcode
= FALSE
;
7658 DHD_INFO(("firmware api revision %d, host api revision %d\n",
7659 firmware_api_version
, host_api_version
));
7661 switch (firmware_api_version
) {
7662 case PCIE_SHARED_VERSION_7
:
7663 case PCIE_SHARED_VERSION_6
:
7664 case PCIE_SHARED_VERSION_5
:
7668 if (firmware_api_version
<= host_api_version
)
7675 dhdpcie_readshared(dhd_bus_t
*bus
)
7678 int rv
, dma_indx_wr_buf
, dma_indx_rd_buf
;
7680 pciedev_shared_t
*sh
= bus
->pcie_sh
;
7682 bool idma_en
= FALSE
;
7684 if (MULTIBP_ENAB(bus
->sih
)) {
7685 dhd_bus_pcie_pwr_req(bus
);
7688 shaddr
= bus
->dongle_ram_base
+ bus
->ramsize
- 4;
7689 /* start a timer for 5 seconds */
7690 dhd_timeout_start(&tmo
, MAX_READ_TIMEOUT
);
7692 while (((addr
== 0) || (addr
== bus
->nvram_csm
)) && !dhd_timeout_expired(&tmo
)) {
7693 /* Read last word in memory to determine address of pciedev_shared structure */
7694 addr
= LTOH32(dhdpcie_bus_rtcm32(bus
, shaddr
));
7697 if (addr
== (uint32
)-1) {
7698 DHD_ERROR(("%s: PCIe link might be down\n", __FUNCTION__
));
7699 #ifdef SUPPORT_LINKDOWN_RECOVERY
7700 #ifdef CONFIG_ARCH_MSM
7701 bus
->no_cfg_restore
= 1;
7702 #endif /* CONFIG_ARCH_MSM */
7703 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7704 bus
->is_linkdown
= 1;
7708 if ((addr
== 0) || (addr
== bus
->nvram_csm
) || (addr
< bus
->dongle_ram_base
) ||
7710 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
7711 __FUNCTION__
, addr
));
7712 DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo
.elapsed
));
7713 #ifdef DEBUG_DNGL_INIT_FAIL
7714 #ifdef CUSTOMER_HW4_DEBUG
7715 bus
->dhd
->memdump_enabled
= DUMP_MEMFILE_BUGON
;
7716 #endif /* CUSTOMER_HW4_DEBUG */
7717 bus
->dhd
->memdump_type
= DUMP_TYPE_DONGLE_INIT_FAILURE
;
7718 dhdpcie_mem_dump(bus
);
7719 #endif /* DEBUG_DNGL_INIT_FAIL */
7722 bus
->shared_addr
= (ulong
)addr
;
7723 DHD_ERROR(("PCIe shared addr (0x%08x) read took %u usec "
7724 "before dongle is ready\n", addr
, tmo
.elapsed
));
7727 /* Read hndrte_shared structure */
7728 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
, (uint8
*)sh
,
7729 sizeof(pciedev_shared_t
))) < 0) {
7730 DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv
));
7735 sh
->flags
= ltoh32(sh
->flags
);
7736 sh
->trap_addr
= ltoh32(sh
->trap_addr
);
7737 sh
->assert_exp_addr
= ltoh32(sh
->assert_exp_addr
);
7738 sh
->assert_file_addr
= ltoh32(sh
->assert_file_addr
);
7739 sh
->assert_line
= ltoh32(sh
->assert_line
);
7740 sh
->console_addr
= ltoh32(sh
->console_addr
);
7741 sh
->msgtrace_addr
= ltoh32(sh
->msgtrace_addr
);
7742 sh
->dma_rxoffset
= ltoh32(sh
->dma_rxoffset
);
7743 sh
->rings_info_ptr
= ltoh32(sh
->rings_info_ptr
);
7744 sh
->flags2
= ltoh32(sh
->flags2
);
7746 /* load bus console address */
7747 bus
->console_addr
= sh
->console_addr
;
7749 /* Read the dma rx offset */
7750 bus
->dma_rxoffset
= bus
->pcie_sh
->dma_rxoffset
;
7751 dhd_prot_rx_dataoffset(bus
->dhd
, bus
->dma_rxoffset
);
7753 DHD_INFO(("DMA RX offset from shared Area %d\n", bus
->dma_rxoffset
));
7755 bus
->api
.fw_rev
= sh
->flags
& PCIE_SHARED_VERSION_MASK
;
7756 if (!(dhdpcie_check_firmware_compatible(bus
->api
.fw_rev
, PCIE_SHARED_VERSION
)))
7758 DHD_ERROR(("%s: pcie_shared version %d in dhd "
7759 "is older than pciedev_shared version %d in dongle\n",
7760 __FUNCTION__
, PCIE_SHARED_VERSION
,
7764 dhdpcie_update_bus_api_revisions(bus
->api
.fw_rev
, PCIE_SHARED_VERSION
);
7766 bus
->rw_index_sz
= (sh
->flags
& PCIE_SHARED_2BYTE_INDICES
) ?
7767 sizeof(uint16
) : sizeof(uint32
);
7768 DHD_INFO(("%s: Dongle advertizes %d size indices\n",
7769 __FUNCTION__
, bus
->rw_index_sz
));
7771 #ifdef IDLE_TX_FLOW_MGMT
7772 if (sh
->flags
& PCIE_SHARED_IDLE_FLOW_RING
) {
7773 DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n",
7775 bus
->enable_idle_flowring_mgmt
= TRUE
;
7777 #endif /* IDLE_TX_FLOW_MGMT */
7779 if (IDMA_CAPABLE(bus
)) {
7780 if (bus
->sih
->buscorerev
== 23) {
7787 bus
->dhd
->idma_enable
= (sh
->flags
& PCIE_SHARED_IDMA
) ? TRUE
: FALSE
;
7788 bus
->dhd
->ifrm_enable
= (sh
->flags
& PCIE_SHARED_IFRM
) ? TRUE
: FALSE
;
7791 bus
->dhd
->d2h_sync_mode
= sh
->flags
& PCIE_SHARED_D2H_SYNC_MODE_MASK
;
7793 bus
->dhd
->dar_enable
= (sh
->flags
& PCIE_SHARED_DAR
) ? TRUE
: FALSE
;
7795 /* Does the FW support DMA'ing r/w indices */
7796 if (sh
->flags
& PCIE_SHARED_DMA_INDEX
) {
7797 if (!bus
->dhd
->dma_ring_upd_overwrite
) {
7799 if (!IFRM_ENAB(bus
->dhd
)) {
7800 bus
->dhd
->dma_h2d_ring_upd_support
= TRUE
;
7802 bus
->dhd
->dma_d2h_ring_upd_support
= TRUE
;
7806 if (bus
->dhd
->dma_d2h_ring_upd_support
)
7807 bus
->dhd
->d2h_sync_mode
= 0;
7809 DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
7811 (bus
->dhd
->dma_h2d_ring_upd_support
? 1 : 0),
7812 (bus
->dhd
->dma_d2h_ring_upd_support
? 1 : 0)));
7813 } else if (!(sh
->flags
& PCIE_SHARED_D2H_SYNC_MODE_MASK
)) {
7814 DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n",
7816 return BCME_UNSUPPORTED
;
7818 bus
->dhd
->dma_h2d_ring_upd_support
= FALSE
;
7819 bus
->dhd
->dma_d2h_ring_upd_support
= FALSE
;
7822 /* Does the firmware support fast delete ring? */
7823 if (sh
->flags2
& PCIE_SHARED2_FAST_DELETE_RING
) {
7824 DHD_INFO(("%s: Firmware supports fast delete ring\n",
7826 bus
->dhd
->fast_delete_ring_support
= TRUE
;
7828 DHD_INFO(("%s: Firmware does not support fast delete ring\n",
7830 bus
->dhd
->fast_delete_ring_support
= FALSE
;
7833 /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
7835 ring_info_t ring_info
;
7837 /* boundary check */
7838 if ((sh
->rings_info_ptr
< bus
->dongle_ram_base
) || (sh
->rings_info_ptr
> shaddr
)) {
7839 DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n",
7840 __FUNCTION__
, sh
->rings_info_ptr
));
7844 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, sh
->rings_info_ptr
,
7845 (uint8
*)&ring_info
, sizeof(ring_info_t
))) < 0)
7848 bus
->h2d_mb_data_ptr_addr
= ltoh32(sh
->h2d_mb_data_ptr
);
7849 bus
->d2h_mb_data_ptr_addr
= ltoh32(sh
->d2h_mb_data_ptr
);
7851 if (bus
->api
.fw_rev
>= PCIE_SHARED_VERSION_6
) {
7852 bus
->max_tx_flowrings
= ltoh16(ring_info
.max_tx_flowrings
);
7853 bus
->max_submission_rings
= ltoh16(ring_info
.max_submission_queues
);
7854 bus
->max_completion_rings
= ltoh16(ring_info
.max_completion_rings
);
7855 bus
->max_cmn_rings
= bus
->max_submission_rings
- bus
->max_tx_flowrings
;
7856 bus
->api
.handle_mb_data
= dhdpcie_read_handle_mb_data
;
7857 bus
->use_mailbox
= sh
->flags
& PCIE_SHARED_USE_MAILBOX
;
7860 bus
->max_tx_flowrings
= ltoh16(ring_info
.max_tx_flowrings
);
7861 bus
->max_submission_rings
= bus
->max_tx_flowrings
;
7862 bus
->max_completion_rings
= BCMPCIE_D2H_COMMON_MSGRINGS
;
7863 bus
->max_cmn_rings
= BCMPCIE_H2D_COMMON_MSGRINGS
;
7864 bus
->api
.handle_mb_data
= dhdpcie_handle_mb_data
;
7865 bus
->use_mailbox
= TRUE
;
7867 if (bus
->max_completion_rings
== 0) {
7868 DHD_ERROR(("dongle completion rings are invalid %d\n",
7869 bus
->max_completion_rings
));
7872 if (bus
->max_submission_rings
== 0) {
7873 DHD_ERROR(("dongle submission rings are invalid %d\n",
7874 bus
->max_submission_rings
));
7877 if (bus
->max_tx_flowrings
== 0) {
7878 DHD_ERROR(("dongle txflow rings are invalid %d\n", bus
->max_tx_flowrings
));
7882 /* If both FW and Host support DMA'ing indices, allocate memory and notify FW
7883 * The max_sub_queues is read from FW initialized ring_info
7885 if (bus
->dhd
->dma_h2d_ring_upd_support
|| IDMA_ENAB(bus
->dhd
)) {
7886 dma_indx_wr_buf
= dhd_prot_dma_indx_init(bus
->dhd
, bus
->rw_index_sz
,
7887 H2D_DMA_INDX_WR_BUF
, bus
->max_submission_rings
);
7888 dma_indx_rd_buf
= dhd_prot_dma_indx_init(bus
->dhd
, bus
->rw_index_sz
,
7889 D2H_DMA_INDX_RD_BUF
, bus
->max_completion_rings
);
7891 if ((dma_indx_wr_buf
!= BCME_OK
) || (dma_indx_rd_buf
!= BCME_OK
)) {
7892 DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
7893 "Host will use w/r indices in TCM\n",
7895 bus
->dhd
->dma_h2d_ring_upd_support
= FALSE
;
7896 bus
->dhd
->idma_enable
= FALSE
;
7900 if (bus
->dhd
->dma_d2h_ring_upd_support
) {
7901 dma_indx_wr_buf
= dhd_prot_dma_indx_init(bus
->dhd
, bus
->rw_index_sz
,
7902 D2H_DMA_INDX_WR_BUF
, bus
->max_completion_rings
);
7903 dma_indx_rd_buf
= dhd_prot_dma_indx_init(bus
->dhd
, bus
->rw_index_sz
,
7904 H2D_DMA_INDX_RD_BUF
, bus
->max_submission_rings
);
7906 if ((dma_indx_wr_buf
!= BCME_OK
) || (dma_indx_rd_buf
!= BCME_OK
)) {
7907 DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
7908 "Host will use w/r indices in TCM\n",
7910 bus
->dhd
->dma_d2h_ring_upd_support
= FALSE
;
7914 if (IFRM_ENAB(bus
->dhd
)) {
7915 dma_indx_wr_buf
= dhd_prot_dma_indx_init(bus
->dhd
, bus
->rw_index_sz
,
7916 H2D_IFRM_INDX_WR_BUF
, bus
->max_tx_flowrings
);
7918 if (dma_indx_wr_buf
!= BCME_OK
) {
7919 DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n",
7921 bus
->dhd
->ifrm_enable
= FALSE
;
7925 /* read ringmem and ringstate ptrs from shared area and store in host variables */
7926 dhd_fillup_ring_sharedptr_info(bus
, &ring_info
);
7927 if (dhd_msg_level
& DHD_INFO_VAL
) {
7928 bcm_print_bytes("ring_info_raw", (uchar
*)&ring_info
, sizeof(ring_info_t
));
7930 DHD_INFO(("ring_info\n"));
7932 DHD_ERROR(("%s: max H2D queues %d\n",
7933 __FUNCTION__
, ltoh16(ring_info
.max_tx_flowrings
)));
7935 DHD_INFO(("mail box address\n"));
7936 DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
7937 __FUNCTION__
, bus
->h2d_mb_data_ptr_addr
));
7938 DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n",
7939 __FUNCTION__
, bus
->d2h_mb_data_ptr_addr
));
7942 DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
7943 __FUNCTION__
, bus
->dhd
->d2h_sync_mode
));
7945 bus
->dhd
->d2h_hostrdy_supported
=
7946 ((sh
->flags
& PCIE_SHARED_HOSTRDY_SUPPORT
) == PCIE_SHARED_HOSTRDY_SUPPORT
);
7948 bus
->dhd
->ext_trap_data_supported
=
7949 ((sh
->flags2
& PCIE_SHARED2_EXTENDED_TRAP_DATA
) == PCIE_SHARED2_EXTENDED_TRAP_DATA
);
7951 if ((sh
->flags2
& PCIE_SHARED2_TXSTATUS_METADATA
) == 0)
7952 bus
->dhd
->pcie_txs_metadata_enable
= 0;
7955 bus
->d2h_minidump
= (sh
->flags2
& PCIE_SHARED2_FW_SMALL_MEMDUMP
) ? TRUE
: FALSE
;
7956 DHD_ERROR(("FW supports minidump ? %s \n", bus
->d2h_minidump
? "Y" : "N"));
7957 if (bus
->d2h_minidump_override
) {
7958 bus
->d2h_minidump
= FALSE
;
7960 DHD_ERROR(("d2h_minidump: %d d2h_minidump_override: %d\n",
7961 bus
->d2h_minidump
, bus
->d2h_minidump_override
));
7962 #endif /* D2H_MINIDUMP */
7964 if (MULTIBP_ENAB(bus
->sih
)) {
7965 dhd_bus_pcie_pwr_req_clear(bus
);
7968 } /* dhdpcie_readshared */
7970 /** Read ring mem and ring state ptr info from shared memory area in device memory */
7972 dhd_fillup_ring_sharedptr_info(dhd_bus_t
*bus
, ring_info_t
*ring_info
)
7977 uint32 d2h_w_idx_ptr
, d2h_r_idx_ptr
, h2d_w_idx_ptr
, h2d_r_idx_ptr
;
7978 uint16 max_tx_flowrings
= bus
->max_tx_flowrings
;
7980 /* Ring mem ptr info */
7981 /* Alloated in the order
7982 H2D_MSGRING_CONTROL_SUBMIT 0
7983 H2D_MSGRING_RXPOST_SUBMIT 1
7984 D2H_MSGRING_CONTROL_COMPLETE 2
7985 D2H_MSGRING_TX_COMPLETE 3
7986 D2H_MSGRING_RX_COMPLETE 4
7990 /* ringmemptr holds start of the mem block address space */
7991 tcm_memloc
= ltoh32(ring_info
->ringmem_ptr
);
7993 /* Find out ringmem ptr for each ring common ring */
7994 for (i
= 0; i
<= BCMPCIE_COMMON_MSGRING_MAX_ID
; i
++) {
7995 bus
->ring_sh
[i
].ring_mem_addr
= tcm_memloc
;
7996 /* Update mem block */
7997 tcm_memloc
= tcm_memloc
+ sizeof(ring_mem_t
);
7998 DHD_INFO(("ring id %d ring mem addr 0x%04x \n",
7999 i
, bus
->ring_sh
[i
].ring_mem_addr
));
8003 /* Ring state mem ptr info */
8005 d2h_w_idx_ptr
= ltoh32(ring_info
->d2h_w_idx_ptr
);
8006 d2h_r_idx_ptr
= ltoh32(ring_info
->d2h_r_idx_ptr
);
8007 h2d_w_idx_ptr
= ltoh32(ring_info
->h2d_w_idx_ptr
);
8008 h2d_r_idx_ptr
= ltoh32(ring_info
->h2d_r_idx_ptr
);
8010 /* Store h2d common ring write/read pointers */
8011 for (i
= 0; i
< BCMPCIE_H2D_COMMON_MSGRINGS
; i
++) {
8012 bus
->ring_sh
[i
].ring_state_w
= h2d_w_idx_ptr
;
8013 bus
->ring_sh
[i
].ring_state_r
= h2d_r_idx_ptr
;
8015 /* update mem block */
8016 h2d_w_idx_ptr
= h2d_w_idx_ptr
+ bus
->rw_index_sz
;
8017 h2d_r_idx_ptr
= h2d_r_idx_ptr
+ bus
->rw_index_sz
;
8019 DHD_INFO(("h2d w/r : idx %d write %x read %x \n", i
,
8020 bus
->ring_sh
[i
].ring_state_w
, bus
->ring_sh
[i
].ring_state_r
));
8023 /* Store d2h common ring write/read pointers */
8024 for (j
= 0; j
< BCMPCIE_D2H_COMMON_MSGRINGS
; j
++, i
++) {
8025 bus
->ring_sh
[i
].ring_state_w
= d2h_w_idx_ptr
;
8026 bus
->ring_sh
[i
].ring_state_r
= d2h_r_idx_ptr
;
8028 /* update mem block */
8029 d2h_w_idx_ptr
= d2h_w_idx_ptr
+ bus
->rw_index_sz
;
8030 d2h_r_idx_ptr
= d2h_r_idx_ptr
+ bus
->rw_index_sz
;
8032 DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i
,
8033 bus
->ring_sh
[i
].ring_state_w
, bus
->ring_sh
[i
].ring_state_r
));
8036 /* Store txflow ring write/read pointers */
8037 if (bus
->api
.fw_rev
< PCIE_SHARED_VERSION_6
) {
8038 max_tx_flowrings
-= BCMPCIE_H2D_COMMON_MSGRINGS
;
8040 /* Account for Debug info h2d ring located after the last tx flow ring */
8041 max_tx_flowrings
= max_tx_flowrings
+ 1;
8043 for (j
= 0; j
< max_tx_flowrings
; i
++, j
++)
8045 bus
->ring_sh
[i
].ring_state_w
= h2d_w_idx_ptr
;
8046 bus
->ring_sh
[i
].ring_state_r
= h2d_r_idx_ptr
;
8048 /* update mem block */
8049 h2d_w_idx_ptr
= h2d_w_idx_ptr
+ bus
->rw_index_sz
;
8050 h2d_r_idx_ptr
= h2d_r_idx_ptr
+ bus
->rw_index_sz
;
8052 DHD_INFO(("FLOW Rings h2d w/r : idx %d write %x read %x \n", i
,
8053 bus
->ring_sh
[i
].ring_state_w
,
8054 bus
->ring_sh
[i
].ring_state_r
));
8056 /* store wr/rd pointers for debug info completion ring */
8057 bus
->ring_sh
[i
].ring_state_w
= d2h_w_idx_ptr
;
8058 bus
->ring_sh
[i
].ring_state_r
= d2h_r_idx_ptr
;
8059 d2h_w_idx_ptr
= d2h_w_idx_ptr
+ bus
->rw_index_sz
;
8060 d2h_r_idx_ptr
= d2h_r_idx_ptr
+ bus
->rw_index_sz
;
8061 DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i
,
8062 bus
->ring_sh
[i
].ring_state_w
, bus
->ring_sh
[i
].ring_state_r
));
8064 } /* dhd_fillup_ring_sharedptr_info */
8067 * Initialize bus module: prepare for communication with the dongle. Called after downloading
8068 * firmware into the dongle.
8070 int dhd_bus_init(dhd_pub_t
*dhdp
, bool enforce_mutex
)
8072 dhd_bus_t
*bus
= dhdp
->bus
;
8075 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
8081 if (bus
->sih
->buscorerev
== 66) {
8082 dhd_bus_pcie_pwr_req_clear_reload_war(bus
);
8085 if (MULTIBP_ENAB(bus
->sih
)) {
8086 dhd_bus_pcie_pwr_req(bus
);
8089 /* Configure AER registers to log the TLP header */
8090 dhd_bus_aer_config(bus
);
8092 /* Make sure we're talking to the core. */
8093 bus
->reg
= si_setcore(bus
->sih
, PCIE2_CORE_ID
, 0);
8094 ASSERT(bus
->reg
!= NULL
);
8096 /* before opening up bus for data transfer, check if shared are is intact */
8097 ret
= dhdpcie_readshared(bus
);
8099 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__
));
8103 /* Make sure we're talking to the core. */
8104 bus
->reg
= si_setcore(bus
->sih
, PCIE2_CORE_ID
, 0);
8105 ASSERT(bus
->reg
!= NULL
);
8107 dhd_init_bus_lock(bus
);
8109 /* Set bus state according to enable result */
8110 dhdp
->busstate
= DHD_BUS_DATA
;
8111 bus
->bus_low_power_state
= DHD_BUS_NO_LOW_POWER_STATE
;
8112 dhdp
->dhd_bus_busy_state
= 0;
8114 /* D11 status via PCIe completion header */
8115 if ((ret
= dhdpcie_init_d11status(bus
)) < 0) {
8119 if (!dhd_download_fw_on_driverload
)
8120 dhd_dpc_enable(bus
->dhd
);
8121 /* Enable the interrupt after device is up */
8122 dhdpcie_bus_intr_enable(bus
);
8124 bus
->intr_enabled
= TRUE
;
8126 /* bcmsdh_intr_unmask(bus->sdh); */
8127 #ifdef DHD_PCIE_RUNTIMEPM
8129 bus
->idletime
= (int32
)MAX_IDLE_COUNT
;
8130 init_waitqueue_head(&bus
->rpm_queue
);
8131 mutex_init(&bus
->pm_lock
);
8134 #endif /* DHD_PCIE_RUNTIMEPM */
8136 /* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
8137 if (bus
->api
.fw_rev
< PCIE_SHARED_VERSION_6
) {
8138 bus
->use_d0_inform
= TRUE
;
8140 bus
->use_d0_inform
= FALSE
;
8144 if (MULTIBP_ENAB(bus
->sih
)) {
8145 dhd_bus_pcie_pwr_req_clear(bus
);
8151 dhdpcie_init_shared_addr(dhd_bus_t
*bus
)
8155 addr
= bus
->dongle_ram_base
+ bus
->ramsize
- 4;
8156 #ifdef DHD_PCIE_RUNTIMEPM
8157 dhdpcie_runtime_bus_wake(bus
->dhd
, TRUE
, __builtin_return_address(0));
8158 #endif /* DHD_PCIE_RUNTIMEPM */
8159 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*)&val
, sizeof(val
));
8163 dhdpcie_chipmatch(uint16 vendor
, uint16 device
)
8165 if (vendor
!= PCI_VENDOR_ID_BROADCOM
) {
8166 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__
,
8171 if ((device
== BCM4350_D11AC_ID
) || (device
== BCM4350_D11AC2G_ID
) ||
8172 (device
== BCM4350_D11AC5G_ID
) || (device
== BCM4350_CHIP_ID
) ||
8173 (device
== BCM43569_CHIP_ID
)) {
8177 if ((device
== BCM4354_D11AC_ID
) || (device
== BCM4354_D11AC2G_ID
) ||
8178 (device
== BCM4354_D11AC5G_ID
) || (device
== BCM4354_CHIP_ID
)) {
8182 if ((device
== BCM4356_D11AC_ID
) || (device
== BCM4356_D11AC2G_ID
) ||
8183 (device
== BCM4356_D11AC5G_ID
) || (device
== BCM4356_CHIP_ID
)) {
8187 if ((device
== BCM4371_D11AC_ID
) || (device
== BCM4371_D11AC2G_ID
) ||
8188 (device
== BCM4371_D11AC5G_ID
) || (device
== BCM4371_CHIP_ID
)) {
8192 if ((device
== BCM4345_D11AC_ID
) || (device
== BCM4345_D11AC2G_ID
) ||
8193 (device
== BCM4345_D11AC5G_ID
) || BCM4345_CHIP(device
)) {
8197 if ((device
== BCM43452_D11AC_ID
) || (device
== BCM43452_D11AC2G_ID
) ||
8198 (device
== BCM43452_D11AC5G_ID
)) {
8202 if ((device
== BCM4335_D11AC_ID
) || (device
== BCM4335_D11AC2G_ID
) ||
8203 (device
== BCM4335_D11AC5G_ID
) || (device
== BCM4335_CHIP_ID
)) {
8207 if ((device
== BCM43602_D11AC_ID
) || (device
== BCM43602_D11AC2G_ID
) ||
8208 (device
== BCM43602_D11AC5G_ID
) || (device
== BCM43602_CHIP_ID
)) {
8212 if ((device
== BCM43569_D11AC_ID
) || (device
== BCM43569_D11AC2G_ID
) ||
8213 (device
== BCM43569_D11AC5G_ID
) || (device
== BCM43569_CHIP_ID
)) {
8217 if ((device
== BCM4358_D11AC_ID
) || (device
== BCM4358_D11AC2G_ID
) ||
8218 (device
== BCM4358_D11AC5G_ID
)) {
8222 if ((device
== BCM4349_D11AC_ID
) || (device
== BCM4349_D11AC2G_ID
) ||
8223 (device
== BCM4349_D11AC5G_ID
) || (device
== BCM4349_CHIP_ID
)) {
8227 if ((device
== BCM4355_D11AC_ID
) || (device
== BCM4355_D11AC2G_ID
) ||
8228 (device
== BCM4355_D11AC5G_ID
) || (device
== BCM4355_CHIP_ID
)) {
8232 if ((device
== BCM4359_D11AC_ID
) || (device
== BCM4359_D11AC2G_ID
) ||
8233 (device
== BCM4359_D11AC5G_ID
)) {
8237 if ((device
== BCM43596_D11AC_ID
) || (device
== BCM43596_D11AC2G_ID
) ||
8238 (device
== BCM43596_D11AC5G_ID
)) {
8242 if ((device
== BCM43597_D11AC_ID
) || (device
== BCM43597_D11AC2G_ID
) ||
8243 (device
== BCM43597_D11AC5G_ID
)) {
8247 if ((device
== BCM4364_D11AC_ID
) || (device
== BCM4364_D11AC2G_ID
) ||
8248 (device
== BCM4364_D11AC5G_ID
) || (device
== BCM4364_CHIP_ID
)) {
8252 if ((device
== BCM4361_D11AC_ID
) || (device
== BCM4361_D11AC2G_ID
) ||
8253 (device
== BCM4361_D11AC5G_ID
) || (device
== BCM4361_CHIP_ID
)) {
8256 if ((device
== BCM4347_D11AC_ID
) || (device
== BCM4347_D11AC2G_ID
) ||
8257 (device
== BCM4347_D11AC5G_ID
) || (device
== BCM4347_CHIP_ID
)) {
8261 if ((device
== BCM4365_D11AC_ID
) || (device
== BCM4365_D11AC2G_ID
) ||
8262 (device
== BCM4365_D11AC5G_ID
) || (device
== BCM4365_CHIP_ID
)) {
8266 if ((device
== BCM4366_D11AC_ID
) || (device
== BCM4366_D11AC2G_ID
) ||
8267 (device
== BCM4366_D11AC5G_ID
) || (device
== BCM4366_CHIP_ID
) ||
8268 (device
== BCM43664_CHIP_ID
) || (device
== BCM43666_CHIP_ID
)) {
8272 if ((device
== BCM4369_D11AX_ID
) || (device
== BCM4369_D11AX2G_ID
) ||
8273 (device
== BCM4369_D11AX5G_ID
) || (device
== BCM4369_CHIP_ID
)) {
8277 if ((device
== BCM4375_D11AX_ID
) || (device
== BCM4375_D11AX2G_ID
) ||
8278 (device
== BCM4375_D11AX5G_ID
) || (device
== BCM4375_CHIP_ID
)) {
8282 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__
, vendor
, device
));
8284 } /* dhdpcie_chipmatch */
8287 * Name: dhdpcie_cc_nvmshadow
8290 * A shadow of OTP/SPROM exists in ChipCommon Region
8291 * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
8292 * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
8293 * can also be read from ChipCommon Registers.
8296 dhdpcie_cc_nvmshadow(dhd_bus_t
*bus
, struct bcmstrbuf
*b
)
8298 uint16 dump_offset
= 0;
8299 uint32 dump_size
= 0, otp_size
= 0, sprom_size
= 0;
8301 /* Table for 65nm OTP Size (in bits) */
8302 int otp_size_65nm
[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
8304 volatile uint16
*nvm_shadow
;
8308 chipcregs_t
*chipcregs
;
8310 /* Save the current core */
8311 cur_coreid
= si_coreid(bus
->sih
);
8312 /* Switch to ChipC */
8313 chipcregs
= (chipcregs_t
*)si_setcore(bus
->sih
, CC_CORE_ID
, 0);
8314 ASSERT(chipcregs
!= NULL
);
8316 chipc_corerev
= si_corerev(bus
->sih
);
8318 /* Check ChipcommonCore Rev */
8319 if (chipc_corerev
< 44) {
8320 DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__
, chipc_corerev
));
8321 return BCME_UNSUPPORTED
;
8325 if (((uint16
)bus
->sih
->chip
!= BCM4350_CHIP_ID
) && !BCM4345_CHIP((uint16
)bus
->sih
->chip
) &&
8326 ((uint16
)bus
->sih
->chip
!= BCM4355_CHIP_ID
) &&
8327 ((uint16
)bus
->sih
->chip
!= BCM4364_CHIP_ID
)) {
8328 DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips"
8329 "4350/4345/4355/4364 only\n", __FUNCTION__
));
8330 return BCME_UNSUPPORTED
;
8333 /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
8334 if (chipcregs
->sromcontrol
& SRC_PRESENT
) {
8335 /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
8336 sprom_size
= (1 << (2 * ((chipcregs
->sromcontrol
& SRC_SIZE_MASK
)
8337 >> SRC_SIZE_SHIFT
))) * 1024;
8338 bcm_bprintf(b
, "\nSPROM Present (Size %d bits)\n", sprom_size
);
8341 if (chipcregs
->sromcontrol
& SRC_OTPPRESENT
) {
8342 bcm_bprintf(b
, "\nOTP Present");
8344 if (((chipcregs
->otplayout
& OTPL_WRAP_TYPE_MASK
) >> OTPL_WRAP_TYPE_SHIFT
)
8345 == OTPL_WRAP_TYPE_40NM
) {
8346 /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
8347 /* Chipcommon rev51 is a variation on rev45 and does not support
8348 * the latest OTP configuration.
8350 if (chipc_corerev
!= 51 && chipc_corerev
>= 49) {
8351 otp_size
= (((chipcregs
->otplayout
& OTPL_ROW_SIZE_MASK
)
8352 >> OTPL_ROW_SIZE_SHIFT
) + 1) * 1024;
8353 bcm_bprintf(b
, "(Size %d bits)\n", otp_size
);
8355 otp_size
= (((chipcregs
->capabilities
& CC_CAP_OTPSIZE
)
8356 >> CC_CAP_OTPSIZE_SHIFT
) + 1) * 1024;
8357 bcm_bprintf(b
, "(Size %d bits)\n", otp_size
);
8360 /* This part is untested since newer chips have 40nm OTP */
8361 /* Chipcommon rev51 is a variation on rev45 and does not support
8362 * the latest OTP configuration.
8364 if (chipc_corerev
!= 51 && chipc_corerev
>= 49) {
8365 otp_size
= otp_size_65nm
[(chipcregs
->otplayout
& OTPL_ROW_SIZE_MASK
)
8366 >> OTPL_ROW_SIZE_SHIFT
];
8367 bcm_bprintf(b
, "(Size %d bits)\n", otp_size
);
8369 otp_size
= otp_size_65nm
[(chipcregs
->capabilities
& CC_CAP_OTPSIZE
)
8370 >> CC_CAP_OTPSIZE_SHIFT
];
8371 bcm_bprintf(b
, "(Size %d bits)\n", otp_size
);
8372 DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
8378 /* Chipcommon rev51 is a variation on rev45 and does not support
8379 * the latest OTP configuration.
8381 if (chipc_corerev
!= 51 && chipc_corerev
>= 49) {
8382 if (((chipcregs
->sromcontrol
& SRC_PRESENT
) == 0) &&
8383 ((chipcregs
->otplayout
& OTPL_ROW_SIZE_MASK
) == 0)) {
8384 DHD_ERROR(("%s: SPROM and OTP could not be found "
8385 "sromcontrol = %x, otplayout = %x \n",
8386 __FUNCTION__
, chipcregs
->sromcontrol
, chipcregs
->otplayout
));
8387 return BCME_NOTFOUND
;
8390 if (((chipcregs
->sromcontrol
& SRC_PRESENT
) == 0) &&
8391 ((chipcregs
->capabilities
& CC_CAP_OTPSIZE
) == 0)) {
8392 DHD_ERROR(("%s: SPROM and OTP could not be found "
8393 "sromcontrol = %x, capablities = %x \n",
8394 __FUNCTION__
, chipcregs
->sromcontrol
, chipcregs
->capabilities
));
8395 return BCME_NOTFOUND
;
8399 /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
8400 if ((!(chipcregs
->sromcontrol
& SRC_PRESENT
) || (chipcregs
->sromcontrol
& SRC_OTPSEL
)) &&
8401 (chipcregs
->sromcontrol
& SRC_OTPPRESENT
)) {
8403 bcm_bprintf(b
, "OTP Strap selected.\n"
8404 "\nOTP Shadow in ChipCommon:\n");
8406 dump_size
= otp_size
/ 16 ; /* 16bit words */
8408 } else if (((chipcregs
->sromcontrol
& SRC_OTPSEL
) == 0) &&
8409 (chipcregs
->sromcontrol
& SRC_PRESENT
)) {
8411 bcm_bprintf(b
, "SPROM Strap selected\n"
8412 "\nSPROM Shadow in ChipCommon:\n");
8414 /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
8415 /* dump_size in 16bit words */
8416 dump_size
= sprom_size
> 8 ? (8 * 1024) / 16 : sprom_size
/ 16;
8418 DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
8420 return BCME_NOTFOUND
;
8423 if (bus
->regs
== NULL
) {
8424 DHD_ERROR(("ChipCommon Regs. not initialized\n"));
8425 return BCME_NOTREADY
;
8427 bcm_bprintf(b
, "\n OffSet:");
8429 /* Chipcommon rev51 is a variation on rev45 and does not support
8430 * the latest OTP configuration.
8432 if (chipc_corerev
!= 51 && chipc_corerev
>= 49) {
8433 /* Chip common can read only 8kbits,
8434 * for ccrev >= 49 otp size is around 12 kbits so use GCI core
8436 nvm_shadow
= (volatile uint16
*)si_setcore(bus
->sih
, GCI_CORE_ID
, 0);
8438 /* Point to the SPROM/OTP shadow in ChipCommon */
8439 nvm_shadow
= chipcregs
->sromotp
;
8442 if (nvm_shadow
== NULL
) {
8443 DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__
));
8444 return BCME_NOTFOUND
;
8448 * Read 16 bits / iteration.
8449 * dump_size & dump_offset in 16-bit words
8451 while (dump_offset
< dump_size
) {
8452 if (dump_offset
% 2 == 0)
8453 /* Print the offset in the shadow space in Bytes */
8454 bcm_bprintf(b
, "\n 0x%04x", dump_offset
* 2);
8456 bcm_bprintf(b
, "\t0x%04x", *(nvm_shadow
+ dump_offset
));
8461 /* Switch back to the original core */
8462 si_setcore(bus
->sih
, cur_coreid
, 0);
8465 } /* dhdpcie_cc_nvmshadow */
8467 /** Flow rings are dynamically created and destroyed */
8468 void dhd_bus_clean_flow_ring(dhd_bus_t
*bus
, void *node
)
8471 flow_queue_t
*queue
;
8472 flow_ring_node_t
*flow_ring_node
= (flow_ring_node_t
*)node
;
8473 unsigned long flags
;
8475 queue
= &flow_ring_node
->queue
;
8477 #ifdef DHDTCPACK_SUPPRESS
8478 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
8479 * when there is a newly coming packet from network stack.
8481 dhd_tcpack_info_tbl_clean(bus
->dhd
);
8482 #endif /* DHDTCPACK_SUPPRESS */
8484 /* clean up BUS level info */
8485 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
8487 /* Flush all pending packets in the queue, if any */
8488 while ((pkt
= dhd_flow_queue_dequeue(bus
->dhd
, queue
)) != NULL
) {
8489 PKTFREE(bus
->dhd
->osh
, pkt
, TRUE
);
8491 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue
));
8493 /* Reinitialise flowring's queue */
8494 dhd_flow_queue_reinit(bus
->dhd
, queue
, FLOW_RING_QUEUE_THRESHOLD
);
8495 flow_ring_node
->status
= FLOW_RING_STATUS_CLOSED
;
8496 flow_ring_node
->active
= FALSE
;
8498 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
8500 /* Hold flowring_list_lock to ensure no race condition while accessing the List */
8501 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
8502 dll_delete(&flow_ring_node
->list
);
8503 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
8505 /* Release the flowring object back into the pool */
8506 dhd_prot_flowrings_pool_release(bus
->dhd
,
8507 flow_ring_node
->flowid
, flow_ring_node
->prot_info
);
8509 /* Free the flowid back to the flowid allocator */
8510 dhd_flowid_free(bus
->dhd
, flow_ring_node
->flow_info
.ifindex
,
8511 flow_ring_node
->flowid
);
8515 * Allocate a Flow ring buffer,
8516 * Init Ring buffer, send Msg to device about flow ring creation
8519 dhd_bus_flow_ring_create_request(dhd_bus_t
*bus
, void *arg
)
8521 flow_ring_node_t
*flow_ring_node
= (flow_ring_node_t
*)arg
;
8523 DHD_INFO(("%s :Flow create\n", __FUNCTION__
));
8525 /* Send Msg to device about flow ring creation */
8526 if (dhd_prot_flow_ring_create(bus
->dhd
, flow_ring_node
) != BCME_OK
)
8532 /** Handle response from dongle on a 'flow ring create' request */
8534 dhd_bus_flow_ring_create_response(dhd_bus_t
*bus
, uint16 flowid
, int32 status
)
8536 flow_ring_node_t
*flow_ring_node
;
8537 unsigned long flags
;
8539 DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__
, flowid
));
8541 /* Boundary check of the flowid */
8542 if (flowid
>= bus
->dhd
->num_flow_rings
) {
8543 DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__
,
8544 flowid
, bus
->dhd
->num_flow_rings
));
8548 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flowid
);
8549 if (!flow_ring_node
) {
8550 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__
));
8554 ASSERT(flow_ring_node
->flowid
== flowid
);
8555 if (flow_ring_node
->flowid
!= flowid
) {
8556 DHD_ERROR(("%s: flowid %d is different from the flowid "
8557 "of the flow_ring_node %d\n", __FUNCTION__
, flowid
,
8558 flow_ring_node
->flowid
));
8562 if (status
!= BCME_OK
) {
8563 DHD_ERROR(("%s Flow create Response failure error status = %d \n",
8564 __FUNCTION__
, status
));
8565 /* Call Flow clean up */
8566 dhd_bus_clean_flow_ring(bus
, flow_ring_node
);
8570 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
8571 flow_ring_node
->status
= FLOW_RING_STATUS_OPEN
;
8572 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
8574 /* Now add the Flow ring node into the active list
8575 * Note that this code to add the newly created node to the active
8576 * list was living in dhd_flowid_lookup. But note that after
8577 * adding the node to the active list the contents of node is being
8578 * filled in dhd_prot_flow_ring_create.
8579 * If there is a D2H interrupt after the node gets added to the
8580 * active list and before the node gets populated with values
8581 * from the Bottom half dhd_update_txflowrings would be called.
8582 * which will then try to walk through the active flow ring list,
8583 * pickup the nodes and operate on them. Now note that since
8584 * the function dhd_prot_flow_ring_create is not finished yet
8585 * the contents of flow_ring_node can still be NULL leading to
8586 * crashes. Hence the flow_ring_node should be added to the
8587 * active list only after its truely created, which is after
8588 * receiving the create response message from the Host.
8590 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
8591 dll_prepend(&bus
->flowring_active_list
, &flow_ring_node
->list
);
8592 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
8594 dhd_bus_schedule_queue(bus
, flowid
, FALSE
); /* from queue to flowring */
8600 dhd_bus_flow_ring_delete_request(dhd_bus_t
*bus
, void *arg
)
8603 flow_queue_t
*queue
;
8604 flow_ring_node_t
*flow_ring_node
;
8605 unsigned long flags
;
8607 DHD_INFO(("%s :Flow Delete\n", __FUNCTION__
));
8609 flow_ring_node
= (flow_ring_node_t
*)arg
;
8611 #ifdef DHDTCPACK_SUPPRESS
8612 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
8613 * when there is a newly coming packet from network stack.
8615 dhd_tcpack_info_tbl_clean(bus
->dhd
);
8616 #endif /* DHDTCPACK_SUPPRESS */
8617 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
8618 if (flow_ring_node
->status
== FLOW_RING_STATUS_DELETE_PENDING
) {
8619 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
8620 DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__
, flow_ring_node
->flowid
));
8623 flow_ring_node
->status
= FLOW_RING_STATUS_DELETE_PENDING
;
8625 queue
= &flow_ring_node
->queue
; /* queue associated with flow ring */
8627 /* Flush all pending packets in the queue, if any */
8628 while ((pkt
= dhd_flow_queue_dequeue(bus
->dhd
, queue
)) != NULL
) {
8629 PKTFREE(bus
->dhd
->osh
, pkt
, TRUE
);
8631 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue
));
8633 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
8635 /* Send Msg to device about flow ring deletion */
8636 dhd_prot_flow_ring_delete(bus
->dhd
, flow_ring_node
);
8642 dhd_bus_flow_ring_delete_response(dhd_bus_t
*bus
, uint16 flowid
, uint32 status
)
8644 flow_ring_node_t
*flow_ring_node
;
8646 DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__
, flowid
));
8648 /* Boundary check of the flowid */
8649 if (flowid
>= bus
->dhd
->num_flow_rings
) {
8650 DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__
,
8651 flowid
, bus
->dhd
->num_flow_rings
));
8655 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flowid
);
8656 if (!flow_ring_node
) {
8657 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__
));
8661 ASSERT(flow_ring_node
->flowid
== flowid
);
8662 if (flow_ring_node
->flowid
!= flowid
) {
8663 DHD_ERROR(("%s: flowid %d is different from the flowid "
8664 "of the flow_ring_node %d\n", __FUNCTION__
, flowid
,
8665 flow_ring_node
->flowid
));
8669 if (status
!= BCME_OK
) {
8670 DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
8671 __FUNCTION__
, status
));
8675 if (flow_ring_node
->status
!= FLOW_RING_STATUS_DELETE_PENDING
) {
8676 DHD_ERROR(("%s: invalid state flowid = %d, status = %d\n",
8677 __FUNCTION__
, flowid
, flow_ring_node
->status
));
8681 /* Call Flow clean up */
8682 dhd_bus_clean_flow_ring(bus
, flow_ring_node
);
8688 int dhd_bus_flow_ring_flush_request(dhd_bus_t
*bus
, void *arg
)
8691 flow_queue_t
*queue
;
8692 flow_ring_node_t
*flow_ring_node
;
8693 unsigned long flags
;
8695 DHD_INFO(("%s :Flow Flush\n", __FUNCTION__
));
8697 flow_ring_node
= (flow_ring_node_t
*)arg
;
8699 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
8700 queue
= &flow_ring_node
->queue
; /* queue associated with flow ring */
8701 /* Flow ring status will be set back to FLOW_RING_STATUS_OPEN
8702 * once flow ring flush response is received for this flowring node.
8704 flow_ring_node
->status
= FLOW_RING_STATUS_FLUSH_PENDING
;
8706 #ifdef DHDTCPACK_SUPPRESS
8707 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
8708 * when there is a newly coming packet from network stack.
8710 dhd_tcpack_info_tbl_clean(bus
->dhd
);
8711 #endif /* DHDTCPACK_SUPPRESS */
8713 /* Flush all pending packets in the queue, if any */
8714 while ((pkt
= dhd_flow_queue_dequeue(bus
->dhd
, queue
)) != NULL
) {
8715 PKTFREE(bus
->dhd
->osh
, pkt
, TRUE
);
8717 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue
));
8719 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
8721 /* Send Msg to device about flow ring flush */
8722 dhd_prot_flow_ring_flush(bus
->dhd
, flow_ring_node
);
8728 dhd_bus_flow_ring_flush_response(dhd_bus_t
*bus
, uint16 flowid
, uint32 status
)
8730 flow_ring_node_t
*flow_ring_node
;
8732 if (status
!= BCME_OK
) {
8733 DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
8734 __FUNCTION__
, status
));
8738 /* Boundary check of the flowid */
8739 if (flowid
>= bus
->dhd
->num_flow_rings
) {
8740 DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__
,
8741 flowid
, bus
->dhd
->num_flow_rings
));
8745 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flowid
);
8746 if (!flow_ring_node
) {
8747 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__
));
8751 ASSERT(flow_ring_node
->flowid
== flowid
);
8752 if (flow_ring_node
->flowid
!= flowid
) {
8753 DHD_ERROR(("%s: flowid %d is different from the flowid "
8754 "of the flow_ring_node %d\n", __FUNCTION__
, flowid
,
8755 flow_ring_node
->flowid
));
8759 flow_ring_node
->status
= FLOW_RING_STATUS_OPEN
;
8764 dhd_bus_max_h2d_queues(struct dhd_bus
*bus
)
8766 return bus
->max_submission_rings
;
8769 /* To be symmetric with SDIO */
8771 dhd_bus_pktq_flush(dhd_pub_t
*dhdp
)
8777 dhd_bus_set_linkdown(dhd_pub_t
*dhdp
, bool val
)
8779 dhdp
->bus
->is_linkdown
= val
;
8783 dhd_bus_get_linkdown(dhd_pub_t
*dhdp
)
8785 return dhdp
->bus
->is_linkdown
;
8788 #ifdef IDLE_TX_FLOW_MGMT
8789 /* resume request */
8791 dhd_bus_flow_ring_resume_request(dhd_bus_t
*bus
, void *arg
)
8793 flow_ring_node_t
*flow_ring_node
= (flow_ring_node_t
*)arg
;
8795 DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__
, flow_ring_node
->flowid
));
8797 flow_ring_node
->status
= FLOW_RING_STATUS_RESUME_PENDING
;
8799 /* Send Msg to device about flow ring resume */
8800 dhd_prot_flow_ring_resume(bus
->dhd
, flow_ring_node
);
8805 /* add the node back to active flowring */
8807 dhd_bus_flow_ring_resume_response(dhd_bus_t
*bus
, uint16 flowid
, int32 status
)
8810 flow_ring_node_t
*flow_ring_node
;
8812 DHD_TRACE(("%s :flowid %d \n", __FUNCTION__
, flowid
));
8814 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flowid
);
8815 ASSERT(flow_ring_node
->flowid
== flowid
);
8817 if (status
!= BCME_OK
) {
8818 DHD_ERROR(("%s Error Status = %d \n",
8819 __FUNCTION__
, status
));
8823 DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n",
8824 __FUNCTION__
, flow_ring_node
->flowid
, flow_ring_node
->queue
.len
));
8826 flow_ring_node
->status
= FLOW_RING_STATUS_OPEN
;
8828 dhd_bus_schedule_queue(bus
, flowid
, FALSE
);
8832 /* scan the flow rings in active list for idle time out */
8834 dhd_bus_check_idle_scan(dhd_bus_t
*bus
)
8836 uint64 time_stamp
; /* in millisec */
8839 time_stamp
= OSL_SYSUPTIME();
8840 diff
= time_stamp
- bus
->active_list_last_process_ts
;
8842 if (diff
> IDLE_FLOW_LIST_TIMEOUT
) {
8843 dhd_bus_idle_scan(bus
);
8844 bus
->active_list_last_process_ts
= OSL_SYSUPTIME();
8850 /* scan the nodes in active list till it finds a non idle node */
8852 dhd_bus_idle_scan(dhd_bus_t
*bus
)
8855 flow_ring_node_t
*flow_ring_node
;
8856 uint64 time_stamp
, diff
;
8857 unsigned long flags
;
8858 uint16 ringid
[MAX_SUSPEND_REQ
];
8861 time_stamp
= OSL_SYSUPTIME();
8862 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
8864 for (item
= dll_tail_p(&bus
->flowring_active_list
);
8865 !dll_end(&bus
->flowring_active_list
, item
); item
= prev
) {
8866 prev
= dll_prev_p(item
);
8868 flow_ring_node
= dhd_constlist_to_flowring(item
);
8870 if (flow_ring_node
->flowid
== (bus
->max_submission_rings
- 1))
8873 if (flow_ring_node
->status
!= FLOW_RING_STATUS_OPEN
) {
8874 /* Takes care of deleting zombie rings */
8875 /* delete from the active list */
8876 DHD_INFO(("deleting flow id %u from active list\n",
8877 flow_ring_node
->flowid
));
8878 __dhd_flow_ring_delete_from_active_list(bus
, flow_ring_node
);
8882 diff
= time_stamp
- flow_ring_node
->last_active_ts
;
8884 if ((diff
> IDLE_FLOW_RING_TIMEOUT
) && !(flow_ring_node
->queue
.len
)) {
8885 DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node
->flowid
));
8886 /* delete from the active list */
8887 __dhd_flow_ring_delete_from_active_list(bus
, flow_ring_node
);
8888 flow_ring_node
->status
= FLOW_RING_STATUS_SUSPENDED
;
8889 ringid
[count
] = flow_ring_node
->flowid
;
8891 if (count
== MAX_SUSPEND_REQ
) {
8892 /* create a batch message now!! */
8893 dhd_prot_flow_ring_batch_suspend_request(bus
->dhd
, ringid
, count
);
8899 /* No more scanning, break from here! */
8905 dhd_prot_flow_ring_batch_suspend_request(bus
->dhd
, ringid
, count
);
8908 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
8913 void dhd_flow_ring_move_to_active_list_head(struct dhd_bus
*bus
, flow_ring_node_t
*flow_ring_node
)
8915 unsigned long flags
;
8918 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
8919 /* check if the node is already at head, otherwise delete it and prepend */
8920 list
= dll_head_p(&bus
->flowring_active_list
);
8921 if (&flow_ring_node
->list
!= list
) {
8922 dll_delete(&flow_ring_node
->list
);
8923 dll_prepend(&bus
->flowring_active_list
, &flow_ring_node
->list
);
8926 /* update flow ring timestamp */
8927 flow_ring_node
->last_active_ts
= OSL_SYSUPTIME();
8929 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
8934 void dhd_flow_ring_add_to_active_list(struct dhd_bus
*bus
, flow_ring_node_t
*flow_ring_node
)
8936 unsigned long flags
;
8938 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
8940 dll_prepend(&bus
->flowring_active_list
, &flow_ring_node
->list
);
8941 /* update flow ring timestamp */
8942 flow_ring_node
->last_active_ts
= OSL_SYSUPTIME();
8944 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
8948 void __dhd_flow_ring_delete_from_active_list(struct dhd_bus
*bus
, flow_ring_node_t
*flow_ring_node
)
8950 dll_delete(&flow_ring_node
->list
);
8953 void dhd_flow_ring_delete_from_active_list(struct dhd_bus
*bus
, flow_ring_node_t
*flow_ring_node
)
8955 unsigned long flags
;
8957 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
8959 __dhd_flow_ring_delete_from_active_list(bus
, flow_ring_node
);
8961 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
8965 #endif /* IDLE_TX_FLOW_MGMT */
8968 dhdpcie_bus_clock_start(struct dhd_bus
*bus
)
8970 return dhdpcie_start_host_pcieclock(bus
);
8974 dhdpcie_bus_clock_stop(struct dhd_bus
*bus
)
8976 return dhdpcie_stop_host_pcieclock(bus
);
8980 dhdpcie_bus_disable_device(struct dhd_bus
*bus
)
8982 return dhdpcie_disable_device(bus
);
8986 dhdpcie_bus_enable_device(struct dhd_bus
*bus
)
8988 return dhdpcie_enable_device(bus
);
8992 dhdpcie_bus_alloc_resource(struct dhd_bus
*bus
)
8994 return dhdpcie_alloc_resource(bus
);
8998 dhdpcie_bus_free_resource(struct dhd_bus
*bus
)
9000 dhdpcie_free_resource(bus
);
9004 dhd_bus_request_irq(struct dhd_bus
*bus
)
9006 return dhdpcie_bus_request_irq(bus
);
9010 dhdpcie_bus_dongle_attach(struct dhd_bus
*bus
)
9012 return dhdpcie_dongle_attach(bus
);
9016 dhd_bus_release_dongle(struct dhd_bus
*bus
)
9018 bool dongle_isolation
;
9021 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
9028 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
9030 #endif /* DEBUGGER || DHD_DSCOPE */
9032 dongle_isolation
= bus
->dhd
->dongle_isolation
;
9033 dhdpcie_bus_release_dongle(bus
, osh
, dongle_isolation
, TRUE
);
9041 dhdpcie_cto_init(struct dhd_bus
*bus
, bool enable
)
9046 dhdpcie_bus_cfg_write_dword(bus
, PCI_INT_MASK
, 4,
9047 PCI_CTO_INT_MASK
| PCI_SBIM_MASK_SERR
);
9048 val
= dhdpcie_bus_cfg_read_dword(bus
, PCI_SPROM_CONTROL
, 4);
9049 dhdpcie_bus_cfg_write_dword(bus
, PCI_SPROM_CONTROL
, 4, val
| SPROM_BACKPLANE_EN
);
9050 if (bus
->cto_threshold
== 0) {
9051 bus
->cto_threshold
= PCIE_CTO_TO_THRESH_DEFAULT
;
9054 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9055 OFFSETOF(sbpcieregs_t
, ctoctrl
), ~0,
9056 ((bus
->cto_threshold
<< PCIE_CTO_TO_THRESHOLD_SHIFT
) &
9057 PCIE_CTO_TO_THRESHHOLD_MASK
) |
9058 ((PCIE_CTO_CLKCHKCNT_VAL
<< PCIE_CTO_CLKCHKCNT_SHIFT
) &
9059 PCIE_CTO_CLKCHKCNT_MASK
) |
9060 PCIE_CTO_ENAB_MASK
);
9062 dhdpcie_bus_cfg_write_dword(bus
, PCI_INT_MASK
, 4, 0);
9063 val
= dhdpcie_bus_cfg_read_dword(bus
, PCI_SPROM_CONTROL
, 4);
9064 dhdpcie_bus_cfg_write_dword(bus
, PCI_SPROM_CONTROL
, 4, val
& ~SPROM_BACKPLANE_EN
);
9066 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9067 OFFSETOF(sbpcieregs_t
, ctoctrl
), ~0, 0);
9072 dhdpcie_cto_error_recovery(struct dhd_bus
*bus
)
9074 uint32 pci_intmask
, err_status
, dar_val
;
9078 pci_intmask
= dhdpcie_bus_cfg_read_dword(bus
, PCI_INT_MASK
, 4);
9079 dhdpcie_bus_cfg_write_dword(bus
, PCI_INT_MASK
, 4, pci_intmask
& ~PCI_CTO_INT_MASK
);
9081 DHD_OS_WAKE_LOCK(bus
->dhd
);
9083 DHD_ERROR(("--- CTO Triggered --- %d\n", bus
->pwr_req_ref
));
9086 * DAR still accessible
9088 dar_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9089 DAR_CLK_CTRL(bus
->sih
->buscorerev
), 0, 0);
9090 DHD_ERROR((" 0x%x:0x%x\n", (uint32
) DAR_CLK_CTRL(bus
->sih
->buscorerev
), dar_val
));
9092 dar_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9093 DAR_PCIE_PWR_CTRL(bus
->sih
->buscorerev
), 0, 0);
9094 DHD_ERROR((" 0x%x:0x%x\n", (uint32
) DAR_PCIE_PWR_CTRL(bus
->sih
->buscorerev
), dar_val
));
9096 dar_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9097 DAR_INTSTAT(bus
->sih
->buscorerev
), 0, 0);
9098 DHD_ERROR((" 0x%x:0x%x\n", (uint32
) DAR_INTSTAT(bus
->sih
->buscorerev
), dar_val
));
9100 dar_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9101 DAR_ERRLOG(bus
->sih
->buscorerev
), 0, 0);
9102 DHD_ERROR((" 0x%x:0x%x\n", (uint32
) DAR_ERRLOG(bus
->sih
->buscorerev
), dar_val
));
9104 dar_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9105 DAR_ERRADDR(bus
->sih
->buscorerev
), 0, 0);
9106 DHD_ERROR((" 0x%x:0x%x\n", (uint32
) DAR_ERRADDR(bus
->sih
->buscorerev
), dar_val
));
9108 dar_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9109 DAR_PCIMailBoxInt(bus
->sih
->buscorerev
), 0, 0);
9110 DHD_ERROR((" 0x%x:0x%x\n", (uint32
) DAR_PCIMailBoxInt(bus
->sih
->buscorerev
), dar_val
));
9112 /* reset backplane */
9113 val
= dhdpcie_bus_cfg_read_dword(bus
, PCI_SPROM_CONTROL
, 4);
9114 dhdpcie_bus_cfg_write_dword(bus
, PCI_SPROM_CONTROL
, 4, val
| SPROM_CFG_TO_SB_RST
);
9116 /* clear timeout error */
9118 err_status
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9119 DAR_ERRLOG(bus
->sih
->buscorerev
),
9121 if (err_status
& PCIE_CTO_ERR_MASK
) {
9122 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9123 DAR_ERRLOG(bus
->sih
->buscorerev
),
9124 ~0, PCIE_CTO_ERR_MASK
);
9128 OSL_DELAY(CTO_TO_CLEAR_WAIT_MS
* 1000);
9130 if (i
> CTO_TO_CLEAR_WAIT_MAX_CNT
) {
9131 DHD_ERROR(("cto recovery fail\n"));
9133 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
9138 /* clear interrupt status */
9139 dhdpcie_bus_cfg_write_dword(bus
, PCI_INT_STATUS
, 4, PCI_CTO_INT_MASK
);
9141 /* Halt ARM & remove reset */
9142 /* TBD : we can add ARM Halt here in case */
9144 /* reset SPROM_CFG_TO_SB_RST */
9145 val
= dhdpcie_bus_cfg_read_dword(bus
, PCI_SPROM_CONTROL
, 4);
9147 DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
9148 PCI_SPROM_CONTROL
, SPROM_CFG_TO_SB_RST
, val
));
9149 dhdpcie_bus_cfg_write_dword(bus
, PCI_SPROM_CONTROL
, 4, val
& ~SPROM_CFG_TO_SB_RST
);
9151 val
= dhdpcie_bus_cfg_read_dword(bus
, PCI_SPROM_CONTROL
, 4);
9152 DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
9153 PCI_SPROM_CONTROL
, SPROM_CFG_TO_SB_RST
, val
));
9155 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
9159 dhdpcie_ssreset_dis_enum_rst(struct dhd_bus
*bus
)
9163 val
= dhdpcie_bus_cfg_read_dword(bus
, PCIE_CFG_SUBSYSTEM_CONTROL
, 4);
9164 dhdpcie_bus_cfg_write_dword(bus
, PCIE_CFG_SUBSYSTEM_CONTROL
, 4,
9165 val
| (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT
));
9168 #if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
9170 dhdpcie_init_d11status(struct dhd_bus
*bus
)
9176 if (bus
->pcie_sh
->flags2
& PCIE_SHARED2_D2H_D11_TX_STATUS
) {
9177 flags2
= bus
->pcie_sh
->flags2
;
9178 addr
= bus
->shared_addr
+ OFFSETOF(pciedev_shared_t
, flags2
);
9179 flags2
|= PCIE_SHARED2_H2D_D11_TX_STATUS
;
9180 ret
= dhdpcie_bus_membytes(bus
, TRUE
, addr
,
9181 (uint8
*)&flags2
, sizeof(flags2
));
9183 DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
9187 bus
->pcie_sh
->flags2
= flags2
;
9188 bus
->dhd
->d11_tx_status
= TRUE
;
9195 dhdpcie_init_d11status(struct dhd_bus
*bus
)
9199 #endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
9201 #ifdef BCMPCIE_OOB_HOST_WAKE
9203 dhd_bus_oob_intr_register(dhd_pub_t
*dhdp
)
9205 return dhdpcie_oob_intr_register(dhdp
->bus
);
9209 dhd_bus_oob_intr_unregister(dhd_pub_t
*dhdp
)
9211 dhdpcie_oob_intr_unregister(dhdp
->bus
);
9215 dhd_bus_oob_intr_set(dhd_pub_t
*dhdp
, bool enable
)
9217 dhdpcie_oob_intr_set(dhdp
->bus
, enable
);
9219 #endif /* BCMPCIE_OOB_HOST_WAKE */
9222 dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t
*bus
)
9224 return bus
->dhd
->d2h_hostrdy_supported
;
9228 dhd_pcie_dump_core_regs(dhd_pub_t
* pub
, uint32 index
, uint32 first_addr
, uint32 last_addr
)
9230 dhd_bus_t
*bus
= pub
->bus
;
9231 uint32 coreoffset
= index
<< 12;
9232 uint32 core_addr
= SI_ENUM_BASE(bus
->sih
) + coreoffset
;
9235 while (first_addr
<= last_addr
) {
9236 core_addr
= SI_ENUM_BASE(bus
->sih
) + coreoffset
+ first_addr
;
9237 if (si_backplane_access(bus
->sih
, core_addr
, 4, &value
, TRUE
) != BCME_OK
) {
9238 DHD_ERROR(("Invalid size/addr combination \n"));
9240 DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr
, value
));
9241 first_addr
= first_addr
+ 4;
9246 dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t
*bus
)
9250 else if (bus
->idma_enabled
) {
9251 return bus
->dhd
->idma_enable
;
9258 dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t
*bus
)
9262 else if (bus
->ifrm_enabled
) {
9263 return bus
->dhd
->ifrm_enable
;
9270 dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t
*bus
)
9274 } else if (bus
->dar_enabled
) {
9275 return bus
->dhd
->dar_enable
;
9282 dhdpcie_bus_enab_pcie_dw(dhd_bus_t
*bus
, uint8 dw_option
)
9284 DHD_ERROR(("ENABLING DW:%d\n", dw_option
));
9285 bus
->dw_option
= dw_option
;
9289 dhd_bus_dump_trap_info(dhd_bus_t
*bus
, struct bcmstrbuf
*strbuf
)
9291 trap_t
*tr
= &bus
->dhd
->last_trap_info
;
9293 "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
9294 " lp 0x%x, rpc 0x%x"
9295 "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
9296 "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, "
9297 "r10 0x%x, r11 0x%x, r12 0x%x\n\n",
9298 ltoh32(tr
->type
), ltoh32(tr
->epc
), ltoh32(tr
->cpsr
), ltoh32(tr
->spsr
),
9299 ltoh32(tr
->r13
), ltoh32(tr
->r14
), ltoh32(tr
->pc
),
9300 ltoh32(bus
->pcie_sh
->trap_addr
),
9301 ltoh32(tr
->r0
), ltoh32(tr
->r1
), ltoh32(tr
->r2
), ltoh32(tr
->r3
),
9302 ltoh32(tr
->r4
), ltoh32(tr
->r5
), ltoh32(tr
->r6
), ltoh32(tr
->r7
),
9303 ltoh32(tr
->r8
), ltoh32(tr
->r9
), ltoh32(tr
->r10
),
9304 ltoh32(tr
->r11
), ltoh32(tr
->r12
));
9308 dhd_bus_readwrite_bp_addr(dhd_pub_t
*dhdp
, uint addr
, uint size
, uint
* data
, bool read
)
9311 struct dhd_bus
*bus
= dhdp
->bus
;
9313 if (si_backplane_access(bus
->sih
, addr
, size
, data
, read
) != BCME_OK
) {
9314 DHD_ERROR(("Invalid size/addr combination \n"));
9315 bcmerror
= BCME_ERROR
;
9322 dhd_get_idletime(dhd_pub_t
*dhd
)
9324 return dhd
->bus
->idletime
;
9327 #ifdef DHD_SSSR_DUMP
9330 dhd_sbreg_op(dhd_pub_t
*dhd
, uint addr
, uint
*val
, bool read
)
9333 si_backplane_access(dhd
->bus
->sih
, addr
, sizeof(uint
), val
, read
);
9334 DHD_ERROR(("%s: addr:0x%x val:0x%x read:%d\n", __FUNCTION__
, addr
, *val
, read
));
9339 dhdpcie_get_sssr_fifo_dump(dhd_pub_t
*dhd
, uint
*buf
, uint fifo_size
,
9340 uint addr_reg
, uint data_reg
)
9346 DHD_ERROR(("%s\n", __FUNCTION__
));
9349 DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__
));
9354 DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__
));
9358 /* Set the base address offset to 0 */
9361 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9364 /* Read 4 bytes at once and loop for fifo_size / 4 */
9365 for (i
= 0; i
< fifo_size
/ 4; i
++) {
9366 si_backplane_access(dhd
->bus
->sih
, addr
, sizeof(uint
), &val
, TRUE
);
9374 dhdpcie_get_sssr_dig_dump(dhd_pub_t
*dhd
, uint
*buf
, uint fifo_size
,
9380 si_t
*sih
= dhd
->bus
->sih
;
9382 DHD_ERROR(("%s\n", __FUNCTION__
));
9385 DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__
));
9390 DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__
));
9396 if ((!dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_size
) &&
9397 dhd
->sssr_reg_info
.length
> OFFSETOF(sssr_reg_info_v1_t
, dig_mem_info
)) {
9398 dhdpcie_bus_membytes(dhd
->bus
, FALSE
, addr_reg
, (uint8
*)buf
, fifo_size
);
9400 /* Check if vasip clk is disabled, if yes enable it */
9401 addr
= dhd
->sssr_reg_info
.vasip_regs
.wrapper_regs
.ioctrl
;
9402 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
9405 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9409 /* Read 4 bytes at once and loop for fifo_size / 4 */
9410 for (i
= 0; i
< fifo_size
/ 4; i
++, addr
+= 4) {
9411 si_backplane_access(sih
, addr
, sizeof(uint
), &val
, TRUE
);
9419 chipcregs_t
*chipcregs
;
9421 /* Save the current core */
9422 cur_coreid
= si_coreid(sih
);
9424 /* Switch to ChipC */
9425 chipcregs
= (chipcregs_t
*)si_setcore(sih
, CC_CORE_ID
, 0);
9427 chipc_corerev
= si_corerev(sih
);
9429 if (chipc_corerev
== 64) {
9430 W_REG(si_osh(sih
), &chipcregs
->sr_memrw_addr
, 0);
9432 /* Read 4 bytes at once and loop for fifo_size / 4 */
9433 for (i
= 0; i
< fifo_size
/ 4; i
++) {
9434 buf
[i
] = R_REG(si_osh(sih
), &chipcregs
->sr_memrw_data
);
9439 /* Switch back to the original core */
9440 si_setcore(sih
, cur_coreid
, 0);
9446 #if defined(BCMPCIE) && defined(DHD_LOG_DUMP)
9448 dhdpcie_get_etd_preserve_logs(dhd_pub_t
*dhd
,
9449 uint8
*ext_trap_data
, void *event_decode_data
)
9451 hnd_ext_trap_hdr_t
*hdr
= NULL
;
9453 eventlog_trapdata_info_t
*etd_evtlog
= NULL
;
9454 eventlog_trap_buf_info_t
*evtlog_buf_arr
= NULL
;
9460 if (!ext_trap_data
|| !event_decode_data
|| !dhd
)
9463 if (!dhd
->concise_dbg_buf
)
9466 /* First word is original trap_data, skip */
9467 ext_trap_data
+= sizeof(uint32
);
9469 hdr
= (hnd_ext_trap_hdr_t
*)ext_trap_data
;
9470 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_LOG_DATA
);
9472 uint32 baseaddr
= 0;
9473 uint32 endaddr
= dhd
->bus
->dongle_ram_base
+ dhd
->bus
->ramsize
- 4;
9475 etd_evtlog
= (eventlog_trapdata_info_t
*)tlv
->data
;
9476 DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; "
9477 "seq_num=%x; log_arr_addr=%x\n", __FUNCTION__
,
9478 (etd_evtlog
->num_elements
),
9479 ntoh32(etd_evtlog
->seq_num
), (etd_evtlog
->log_arr_addr
)));
9480 arr_size
= (uint32
)sizeof(*evtlog_buf_arr
) * (etd_evtlog
->num_elements
);
9482 DHD_ERROR(("%s: num event logs is zero! \n", __FUNCTION__
));
9485 evtlog_buf_arr
= MALLOCZ(dhd
->osh
, arr_size
);
9486 if (!evtlog_buf_arr
) {
9487 DHD_ERROR(("%s: out of memory !\n", __FUNCTION__
));
9491 /* boundary check */
9492 baseaddr
= etd_evtlog
->log_arr_addr
;
9493 if ((baseaddr
< dhd
->bus
->dongle_ram_base
) ||
9494 ((baseaddr
+ arr_size
) > endaddr
)) {
9495 DHD_ERROR(("%s: Error reading invalid address\n",
9500 /* read the eventlog_trap_buf_info_t array from dongle memory */
9501 err
= dhdpcie_bus_membytes(dhd
->bus
, FALSE
,
9502 (ulong
)(etd_evtlog
->log_arr_addr
),
9503 (uint8
*)evtlog_buf_arr
, arr_size
);
9504 if (err
!= BCME_OK
) {
9505 DHD_ERROR(("%s: Error reading event log array from dongle !\n",
9509 /* ntoh is required only for seq_num, because in the original
9510 * case of event logs from info ring, it is sent from dongle in that way
9511 * so for ETD also dongle follows same convention
9513 seqnum
= ntoh32(etd_evtlog
->seq_num
);
9514 memset(dhd
->concise_dbg_buf
, 0, CONCISE_DUMP_BUFLEN
);
9515 for (i
= 0; i
< (etd_evtlog
->num_elements
); ++i
) {
9516 /* boundary check */
9517 baseaddr
= evtlog_buf_arr
[i
].buf_addr
;
9518 if ((baseaddr
< dhd
->bus
->dongle_ram_base
) ||
9519 ((baseaddr
+ evtlog_buf_arr
[i
].len
) > endaddr
)) {
9520 DHD_ERROR(("%s: Error reading invalid address\n",
9524 /* read each individual event log buf from dongle memory */
9525 err
= dhdpcie_bus_membytes(dhd
->bus
, FALSE
,
9526 ((ulong
)evtlog_buf_arr
[i
].buf_addr
),
9527 dhd
->concise_dbg_buf
, (evtlog_buf_arr
[i
].len
));
9528 if (err
!= BCME_OK
) {
9529 DHD_ERROR(("%s: Error reading event log buffer from dongle !\n",
9533 dhd_dbg_msgtrace_log_parser(dhd
, dhd
->concise_dbg_buf
,
9534 event_decode_data
, (evtlog_buf_arr
[i
].len
),
9535 FALSE
, hton32(seqnum
));
9539 MFREE(dhd
->osh
, evtlog_buf_arr
, arr_size
);
9541 DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__
));
9544 #endif /* BCMPCIE && DHD_LOG_DUMP */
9547 dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t
*dhd
)
9552 DHD_ERROR(("%s\n", __FUNCTION__
));
9554 /* conditionally clear bits [11:8] of PowerCtrl */
9555 addr
= dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.powerctrl
;
9556 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
9557 if (!(val
& dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.powerctrl_mask
)) {
9558 addr
= dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.powerctrl
;
9559 val
= dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.powerctrl_mask
;
9560 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9566 dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t
*dhd
)
9571 DHD_ERROR(("%s\n", __FUNCTION__
));
9573 /* conditionally clear bits [11:8] of PowerCtrl */
9574 addr
= dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.powerctrl
;
9575 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
9576 if (val
& dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.powerctrl_mask
) {
9577 addr
= dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.powerctrl
;
9579 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9585 dhdpcie_clear_intmask_and_timer(dhd_pub_t
*dhd
)
9590 DHD_ERROR(("%s\n", __FUNCTION__
));
9592 /* clear chipcommon intmask */
9593 addr
= dhd
->sssr_reg_info
.chipcommon_regs
.base_regs
.intmask
;
9595 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9597 /* clear PMUIntMask0 */
9598 addr
= dhd
->sssr_reg_info
.pmu_regs
.base_regs
.pmuintmask0
;
9600 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9602 /* clear PMUIntMask1 */
9603 addr
= dhd
->sssr_reg_info
.pmu_regs
.base_regs
.pmuintmask1
;
9605 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9607 /* clear res_req_timer */
9608 addr
= dhd
->sssr_reg_info
.pmu_regs
.base_regs
.resreqtimer
;
9610 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9612 /* clear macresreqtimer */
9613 addr
= dhd
->sssr_reg_info
.pmu_regs
.base_regs
.macresreqtimer
;
9615 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9617 /* clear macresreqtimer1 */
9618 addr
= dhd
->sssr_reg_info
.pmu_regs
.base_regs
.macresreqtimer1
;
9620 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9622 /* clear VasipClkEn */
9623 if (dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_size
) {
9624 addr
= dhd
->sssr_reg_info
.vasip_regs
.wrapper_regs
.ioctrl
;
9626 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9633 dhdpcie_d11_check_outofreset(dhd_pub_t
*dhd
)
9639 DHD_ERROR(("%s\n", __FUNCTION__
));
9641 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
9642 /* Check if bit 0 of resetctrl is cleared */
9643 addr
= dhd
->sssr_reg_info
.mac_regs
[i
].wrapper_regs
.resetctrl
;
9645 DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n",
9647 /* ignore invalid address */
9648 dhd
->sssr_d11_outofreset
[i
] = FALSE
;
9651 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
9653 dhd
->sssr_d11_outofreset
[i
] = TRUE
;
9655 dhd
->sssr_d11_outofreset
[i
] = FALSE
;
9657 DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
9658 __FUNCTION__
, i
, dhd
->sssr_d11_outofreset
[i
]));
9664 dhdpcie_d11_clear_clk_req(dhd_pub_t
*dhd
)
9670 DHD_ERROR(("%s\n", __FUNCTION__
));
9672 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
9673 if (dhd
->sssr_d11_outofreset
[i
]) {
9674 /* clear request clk only if itopoobb is non zero */
9675 addr
= dhd
->sssr_reg_info
.mac_regs
[i
].wrapper_regs
.itopoobb
;
9676 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
9678 /* clear clockcontrolstatus */
9679 addr
= dhd
->sssr_reg_info
.mac_regs
[i
].base_regs
.clockcontrolstatus
;
9681 dhd
->sssr_reg_info
.mac_regs
[i
].base_regs
.clockcontrolstatus_val
;
9682 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9690 dhdpcie_arm_clear_clk_req(dhd_pub_t
*dhd
)
9695 DHD_ERROR(("%s\n", __FUNCTION__
));
9697 /* Check if bit 0 of resetctrl is cleared */
9698 addr
= dhd
->sssr_reg_info
.arm_regs
.wrapper_regs
.resetctrl
;
9699 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
9701 /* clear request clk only if itopoobb is non zero */
9702 addr
= dhd
->sssr_reg_info
.arm_regs
.wrapper_regs
.itopoobb
;
9703 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
9705 /* clear clockcontrolstatus */
9706 addr
= dhd
->sssr_reg_info
.arm_regs
.base_regs
.clockcontrolstatus
;
9707 val
= dhd
->sssr_reg_info
.arm_regs
.base_regs
.clockcontrolstatus_val
;
9708 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9715 dhdpcie_pcie_clear_clk_req(dhd_pub_t
*dhd
)
9720 DHD_ERROR(("%s\n", __FUNCTION__
));
9722 /* clear request clk only if itopoobb is non zero */
9723 addr
= dhd
->sssr_reg_info
.pcie_regs
.wrapper_regs
.itopoobb
;
9724 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
9726 /* clear clockcontrolstatus */
9727 addr
= dhd
->sssr_reg_info
.pcie_regs
.base_regs
.clockcontrolstatus
;
9728 val
= dhd
->sssr_reg_info
.pcie_regs
.base_regs
.clockcontrolstatus_val
;
9729 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9735 dhdpcie_pcie_send_ltrsleep(dhd_pub_t
*dhd
)
9740 DHD_ERROR(("%s\n", __FUNCTION__
));
9742 addr
= dhd
->sssr_reg_info
.pcie_regs
.base_regs
.ltrstate
;
9744 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9747 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9753 dhdpcie_clear_clk_req(dhd_pub_t
*dhd
)
9755 DHD_ERROR(("%s\n", __FUNCTION__
));
9757 dhdpcie_arm_clear_clk_req(dhd
);
9759 dhdpcie_d11_clear_clk_req(dhd
);
9761 dhdpcie_pcie_clear_clk_req(dhd
);
9767 dhdpcie_bring_d11_outofreset(dhd_pub_t
*dhd
)
9773 DHD_ERROR(("%s\n", __FUNCTION__
));
9775 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
9776 if (dhd
->sssr_d11_outofreset
[i
]) {
9777 /* disable core by setting bit 0 */
9778 addr
= dhd
->sssr_reg_info
.mac_regs
[i
].wrapper_regs
.resetctrl
;
9780 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9783 addr
= dhd
->sssr_reg_info
.mac_regs
[i
].wrapper_regs
.ioctrl
;
9784 val
= dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[0];
9785 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9787 val
= dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[1];
9788 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9790 /* enable core by clearing bit 0 */
9791 addr
= dhd
->sssr_reg_info
.mac_regs
[i
].wrapper_regs
.resetctrl
;
9793 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9795 addr
= dhd
->sssr_reg_info
.mac_regs
[i
].wrapper_regs
.ioctrl
;
9796 val
= dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[2];
9797 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9799 val
= dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[3];
9800 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9802 val
= dhd
->sssr_reg_info
.mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[4];
9803 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
9810 dhdpcie_sssr_dump_get_before_sr(dhd_pub_t
*dhd
)
9814 DHD_ERROR(("%s\n", __FUNCTION__
));
9816 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
9817 if (dhd
->sssr_d11_outofreset
[i
]) {
9818 dhdpcie_get_sssr_fifo_dump(dhd
, dhd
->sssr_d11_before
[i
],
9819 dhd
->sssr_reg_info
.mac_regs
[i
].sr_size
,
9820 dhd
->sssr_reg_info
.mac_regs
[i
].base_regs
.xmtaddress
,
9821 dhd
->sssr_reg_info
.mac_regs
[i
].base_regs
.xmtdata
);
9825 if (dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_size
) {
9826 dhdpcie_get_sssr_dig_dump(dhd
, dhd
->sssr_dig_buf_before
,
9827 dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_size
,
9828 dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_addr
);
9829 } else if ((dhd
->sssr_reg_info
.length
> OFFSETOF(sssr_reg_info_v1_t
, dig_mem_info
)) &&
9830 dhd
->sssr_reg_info
.dig_mem_info
.dig_sr_addr
) {
9831 dhdpcie_get_sssr_dig_dump(dhd
, dhd
->sssr_dig_buf_before
,
9832 dhd
->sssr_reg_info
.dig_mem_info
.dig_sr_size
,
9833 dhd
->sssr_reg_info
.dig_mem_info
.dig_sr_addr
);
9840 dhdpcie_sssr_dump_get_after_sr(dhd_pub_t
*dhd
)
9844 DHD_ERROR(("%s\n", __FUNCTION__
));
9846 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
9847 if (dhd
->sssr_d11_outofreset
[i
]) {
9848 dhdpcie_get_sssr_fifo_dump(dhd
, dhd
->sssr_d11_after
[i
],
9849 dhd
->sssr_reg_info
.mac_regs
[i
].sr_size
,
9850 dhd
->sssr_reg_info
.mac_regs
[i
].base_regs
.xmtaddress
,
9851 dhd
->sssr_reg_info
.mac_regs
[i
].base_regs
.xmtdata
);
9855 if (dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_size
) {
9856 dhdpcie_get_sssr_dig_dump(dhd
, dhd
->sssr_dig_buf_after
,
9857 dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_size
,
9858 dhd
->sssr_reg_info
.vasip_regs
.vasip_sr_addr
);
9859 } else if ((dhd
->sssr_reg_info
.length
> OFFSETOF(sssr_reg_info_v1_t
, dig_mem_info
)) &&
9860 dhd
->sssr_reg_info
.dig_mem_info
.dig_sr_addr
) {
9861 dhdpcie_get_sssr_dig_dump(dhd
, dhd
->sssr_dig_buf_after
,
9862 dhd
->sssr_reg_info
.dig_mem_info
.dig_sr_size
,
9863 dhd
->sssr_reg_info
.dig_mem_info
.dig_sr_addr
);
9870 dhdpcie_sssr_dump(dhd_pub_t
*dhd
)
9872 if (!dhd
->sssr_inited
) {
9873 DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__
));
9877 if (dhd
->bus
->is_linkdown
) {
9878 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__
));
9882 dhdpcie_d11_check_outofreset(dhd
);
9884 DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__
));
9885 if (dhdpcie_sssr_dump_get_before_sr(dhd
) != BCME_OK
) {
9886 DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__
));
9890 dhdpcie_clear_intmask_and_timer(dhd
);
9891 dhdpcie_suspend_chipcommon_powerctrl(dhd
);
9892 dhdpcie_clear_clk_req(dhd
);
9893 dhdpcie_pcie_send_ltrsleep(dhd
);
9895 /* Wait for some time before Restore */
9898 dhdpcie_resume_chipcommon_powerctrl(dhd
);
9899 dhdpcie_bring_d11_outofreset(dhd
);
9901 DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__
));
9902 if (dhdpcie_sssr_dump_get_after_sr(dhd
) != BCME_OK
) {
9903 DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__
));
9907 dhd_schedule_sssr_dump(dhd
);
9913 dhd_bus_sssr_dump(dhd_pub_t
*dhd
)
9915 return dhdpcie_sssr_dump(dhd
);
9917 #endif /* DHD_SSSR_DUMP */
9919 #ifdef DHD_WAKE_STATUS
9921 dhd_bus_get_wakecount(dhd_pub_t
*dhd
)
9923 return &dhd
->bus
->wake_counts
;
9926 dhd_bus_get_bus_wake(dhd_pub_t
*dhd
)
9928 return bcmpcie_set_get_wake(dhd
->bus
, 0);
9930 #endif /* DHD_WAKE_STATUS */
9932 #define OTP_ADDRESS (SI_ENUM_BASE_DEFAULT + CC_SROM_OTP)
9933 #define OTP_USER_AREA_OFFSET 0x80
9934 #define OTP_USER_AREA_ADDR (OTP_ADDRESS + OTP_USER_AREA_OFFSET)
9935 #define OTP_VERSION_TUPLE_ID 0x15
9936 #define OTP_VENDOR_TUPLE_ID 0x80
9937 #define OTP_CIS_REGION_END_TUPLE_ID 0XFF
9938 #define PMU_RES_STATE_REG_ADDR (SI_ENUM_BASE_DEFAULT + PMU_RES_STATE)
9939 #define PMU_MINRESMASK_REG_ADDR (SI_ENUM_BASE_DEFAULT + MINRESMASKREG)
9940 #define OTP_CTRL1_REG_ADDR (SI_ENUM_BASE_DEFAULT + 0xF4)
9941 #define SPROM_CTRL_REG_ADDR (SI_ENUM_BASE_DEFAULT + CC_SROM_CTRL)
9942 #define CHIP_COMMON_STATUS_REG_ADDR (SI_ENUM_BASE_DEFAULT + 0x2C)
9943 #define PMU_OTP_PWR_ON_MASK 0xC47
9946 dhdpcie_get_nvpath_otp(dhd_bus_t
*bus
, char* program
, char *nv_path
)
9952 char module_name
[5];
9953 char module_vendor
= 0;
9956 uint8 tuple_len
= 0;
9957 uint32 cur_offset
= 0;
9958 uint32 version_tuple_offset
= 0;
9959 char module_info
[64];
9961 bool srom_present
= 0, otp_present
= 0;
9962 uint32 sprom_ctrl
= 0;
9963 uint32 otp_ctrl
= 0, minres_mask
= 0;
9964 int i
= 0, j
= 0, status
= BCME_ERROR
;
9966 if (!nv_path
|| !bus
) {
9970 /* read chip id first */
9971 if (si_backplane_access(bus
->sih
, SI_ENUM_BASE_DEFAULT
, 4, &val
, TRUE
) != BCME_OK
) {
9972 DHD_ERROR(("%s: bkplane access error ! \n", __FUNCTION__
));
9975 chip_id
= val
& 0xffff;
9978 /* read SpromCtrl register */
9979 si_backplane_access(bus
->sih
, SPROM_CTRL_REG_ADDR
, 4, &sprom_ctrl
, TRUE
);
9982 /* proceed only if OTP is present - i.e, the 5th bit OtpPresent is set
9983 * and chip is 4355 or 4364
9985 if ((val
& 0x20) && (chip_id
== 0x4355 || chip_id
== 0x4364)) {
9988 /* Check if the 4th bit (sprom_present) in CC Status REG is set */
9989 si_backplane_access(bus
->sih
, CHIP_COMMON_STATUS_REG_ADDR
, 4, &val
, TRUE
);
9994 /* OTP power up sequence */
9995 /* 1. cache otp ctrl and enable OTP clock through OTPCtrl1 register */
9996 si_backplane_access(bus
->sih
, OTP_CTRL1_REG_ADDR
, 4, &otp_ctrl
, TRUE
);
9998 si_backplane_access(bus
->sih
, OTP_CTRL1_REG_ADDR
, 4, &val
, FALSE
);
10000 /* 2. enable OTP power through min res mask register in PMU */
10001 si_backplane_access(bus
->sih
, PMU_MINRESMASK_REG_ADDR
, 4, &minres_mask
, TRUE
);
10002 val
= minres_mask
| PMU_OTP_PWR_ON_MASK
;
10003 si_backplane_access(bus
->sih
, PMU_MINRESMASK_REG_ADDR
, 4, &val
, FALSE
);
10005 /* 3. if srom is present, need to set OtpSelect 4th bit
10006 * in SpromCtrl register to read otp
10008 if (srom_present
) {
10010 val
= sprom_ctrl
| 0x10;
10011 si_backplane_access(bus
->sih
, SPROM_CTRL_REG_ADDR
, 4, &val
, FALSE
);
10014 /* Wait for PMU to power up. */
10016 si_backplane_access(bus
->sih
, PMU_RES_STATE_REG_ADDR
, 4, &val
, TRUE
);
10017 DHD_INFO(("%s: PMU_RES_STATE_REG_ADDR %x \n", __FUNCTION__
, val
));
10019 si_backplane_access(bus
->sih
, SI_ENUM_BASE_DEFAULT
, 4, &val
, TRUE
);
10020 DHD_INFO(("%s: _SI_ENUM_BASE %x \n", __FUNCTION__
, val
));
10022 si_backplane_access(bus
->sih
, OTP_ADDRESS
, 2, &val
, TRUE
);
10023 DHD_INFO(("%s: OTP_ADDRESS %x \n", __FUNCTION__
, val
));
10025 cur_offset
= OTP_USER_AREA_ADDR
+ 0x40;
10026 /* read required data from otp to construct FW string name
10027 * data like - chip info, module info. This is present in the
10028 * form of a Vendor CIS Tuple whose format is provided by Olympic.
10029 * The data is in the form of ASCII character strings.
10030 * The Vendor tuple along with other CIS tuples are present
10031 * in the OTP user area. A CIS tuple is a TLV format.
10032 * (T = 1-byte, L = 1-byte, V = n-bytes)
10035 /* Find the version tuple */
10036 while (tuple_id
!= OTP_CIS_REGION_END_TUPLE_ID
) {
10037 si_backplane_access(bus
->sih
, cur_offset
,
10038 2, (uint
*)otp_data
, TRUE
);
10040 tuple_id
= otp_data
[0];
10041 tuple_len
= otp_data
[1];
10042 if (tuple_id
== OTP_VERSION_TUPLE_ID
) {
10043 version_tuple_offset
= cur_offset
;
10046 /* if its NULL tuple, skip */
10050 cur_offset
+= tuple_len
+ 2;
10053 /* skip the major, minor ver. numbers, manufacturer and product names */
10054 cur_offset
= version_tuple_offset
+ 6;
10056 /* read the chip info */
10057 si_backplane_access(bus
->sih
, cur_offset
,
10058 2, (uint
*)otp_data
, TRUE
);
10059 if (otp_data
[0] == 's' && otp_data
[1] == '=') {
10060 /* read the stepping */
10063 si_backplane_access(bus
->sih
, cur_offset
,
10064 2, (uint
*)stepping
, TRUE
);
10065 /* read module info */
10066 memset(module_info
, 0, 64);
10068 si_backplane_access(bus
->sih
, cur_offset
,
10069 2, (uint
*)otp_data
, TRUE
);
10070 while (otp_data
[0] != OTP_CIS_REGION_END_TUPLE_ID
&&
10071 otp_data
[1] != OTP_CIS_REGION_END_TUPLE_ID
) {
10072 memcpy(&module_info
[i
], otp_data
, 2);
10075 si_backplane_access(bus
->sih
, cur_offset
,
10076 2, (uint
*)otp_data
, TRUE
);
10078 /* replace any null characters found at the beginning
10079 * and middle of the string
10081 for (j
= 0; j
< i
; ++j
) {
10082 if (module_info
[j
] == 0)
10083 module_info
[j
] = ' ';
10085 DHD_ERROR(("OTP chip_info: s=%c%c; module info: %s \n",
10086 stepping
[0], stepping
[1], module_info
));
10087 /* extract the module name, revision and vendor
10088 * information from the module info string
10090 for (i
= 0; module_info
[i
]; i
++) {
10091 if (module_info
[i
] == 'M' && module_info
[i
+ 1] == '=') {
10092 memcpy(module_name
, &module_info
[i
+ 2], 4);
10093 module_name
[4] = 0;
10096 else if (module_info
[i
] == 'm' && module_info
[i
+ 1] == '=') {
10097 memcpy(module_rev
, &module_info
[i
+ 2], 3);
10101 else if (module_info
[i
] == 'V' && module_info
[i
+ 1] == '=') {
10102 module_vendor
= module_info
[i
+ 2];
10107 /* construct the complete file path to nvram as per
10108 * olympic conventions
10110 strncpy(progname
, program
, sizeof(progname
));
10111 sprintf(nv_path
, "P-%s_M-%s_V-%c__m-%s.txt", progname
, module_name
,
10112 module_vendor
, module_rev
);
10113 DHD_ERROR(("%s NVRAM path = %s\n", __FUNCTION__
, nv_path
));
10117 /* restore back the registers to their previous values */
10118 if (srom_present
) {
10119 si_backplane_access(bus
->sih
, SPROM_CTRL_REG_ADDR
, 4, &sprom_ctrl
, FALSE
);
10123 si_backplane_access(bus
->sih
, PMU_MINRESMASK_REG_ADDR
, 4,
10124 &minres_mask
, FALSE
);
10125 si_backplane_access(bus
->sih
, OTP_CTRL1_REG_ADDR
, 4, &otp_ctrl
, FALSE
);
10132 /* Writes random number(s) to the TCM. FW upon initialization reads this register
10133 * to fetch the random number, and uses it to randomize heap address space layout.
10136 dhdpcie_wrt_rnd(struct dhd_bus
*bus
)
10138 bcm_rand_metadata_t rnd_data
;
10139 uint8 rand_buf
[BCM_ENTROPY_HOST_NBYTES
];
10140 uint32 count
= BCM_ENTROPY_HOST_NBYTES
;
10142 uint32 addr
= bus
->dongle_ram_base
+ (bus
->ramsize
- BCM_NVRAM_OFFSET_TCM
) -
10143 ((bus
->nvram_csm
& 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR
+ sizeof(rnd_data
));
10145 memset(rand_buf
, 0, BCM_ENTROPY_HOST_NBYTES
);
10146 rnd_data
.signature
= htol32(BCM_NVRAM_RNG_SIGNATURE
);
10147 rnd_data
.count
= htol32(count
);
10148 /* write the metadata about random number */
10149 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*)&rnd_data
, sizeof(rnd_data
));
10150 /* scale back by number of random number counts */
10153 /* Now get & write the random number(s) */
10154 ret
= dhd_get_random_bytes(rand_buf
, count
);
10155 if (ret
!= BCME_OK
) {
10158 dhdpcie_bus_membytes(bus
, TRUE
, addr
, rand_buf
, count
);
10163 #ifdef D2H_MINIDUMP
10165 dhd_bus_is_minidump_enabled(dhd_pub_t
*dhdp
)
10167 return dhdp
->bus
->d2h_minidump
;
10169 #endif /* D2H_MINIDUMP */
10172 dhd_pcie_intr_count_dump(dhd_pub_t
*dhd
)
10174 struct dhd_bus
*bus
= dhd
->bus
;
10175 uint64 current_time
;
10177 DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters ------- \r\n"));
10178 DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n",
10179 bus
->resume_intr_enable_count
, bus
->dpc_intr_enable_count
));
10180 DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n",
10181 bus
->isr_intr_disable_count
, bus
->suspend_intr_disable_count
));
10182 #ifdef BCMPCIE_OOB_HOST_WAKE
10183 DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu oob_intr_disable_count=%lu\n",
10184 bus
->oob_intr_count
, bus
->oob_intr_enable_count
,
10185 bus
->oob_intr_disable_count
));
10186 DHD_ERROR(("oob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT
"\n",
10187 dhdpcie_get_oob_irq_num(bus
),
10188 GET_SEC_USEC(bus
->last_oob_irq_time
)));
10189 #endif /* BCMPCIE_OOB_HOST_WAKE */
10190 DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
10191 bus
->dpc_return_busdown_count
, bus
->non_ours_irq_count
));
10193 current_time
= OSL_LOCALTIME_NS();
10194 DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT
"\n",
10195 GET_SEC_USEC(current_time
)));
10196 DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
10197 " isr_exit_time="SEC_USEC_FMT
"\n",
10198 GET_SEC_USEC(bus
->isr_entry_time
),
10199 GET_SEC_USEC(bus
->isr_exit_time
)));
10200 DHD_ERROR(("dpc_sched_time="SEC_USEC_FMT
10201 " last_non_ours_irq_time="SEC_USEC_FMT
"\n",
10202 GET_SEC_USEC(bus
->dpc_sched_time
),
10203 GET_SEC_USEC(bus
->last_non_ours_irq_time
)));
10204 DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT
10205 " last_process_ctrlbuf_time="SEC_USEC_FMT
"\n",
10206 GET_SEC_USEC(bus
->dpc_entry_time
),
10207 GET_SEC_USEC(bus
->last_process_ctrlbuf_time
)));
10208 DHD_ERROR(("last_process_flowring_time="SEC_USEC_FMT
10209 " last_process_txcpl_time="SEC_USEC_FMT
"\n",
10210 GET_SEC_USEC(bus
->last_process_flowring_time
),
10211 GET_SEC_USEC(bus
->last_process_txcpl_time
)));
10212 DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT
10213 " last_process_infocpl_time="SEC_USEC_FMT
"\n",
10214 GET_SEC_USEC(bus
->last_process_rxcpl_time
),
10215 GET_SEC_USEC(bus
->last_process_infocpl_time
)));
10216 DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT
10217 " resched_dpc_time="SEC_USEC_FMT
"\n",
10218 GET_SEC_USEC(bus
->dpc_exit_time
),
10219 GET_SEC_USEC(bus
->resched_dpc_time
)));
10220 DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT
"\n",
10221 GET_SEC_USEC(bus
->last_d3_inform_time
)));
10223 DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT
10224 " last_suspend_end_time="SEC_USEC_FMT
"\n",
10225 GET_SEC_USEC(bus
->last_suspend_start_time
),
10226 GET_SEC_USEC(bus
->last_suspend_end_time
)));
10227 DHD_ERROR(("last_resume_start_time="SEC_USEC_FMT
10228 " last_resume_end_time="SEC_USEC_FMT
"\n",
10229 GET_SEC_USEC(bus
->last_resume_start_time
),
10230 GET_SEC_USEC(bus
->last_resume_end_time
)));
10234 dhd_bus_intr_count_dump(dhd_pub_t
*dhd
)
10236 dhd_pcie_intr_count_dump(dhd
);
10240 dhd_pcie_dma_info_dump(dhd_pub_t
*dhd
)
10242 if (dhd
->bus
->is_linkdown
) {
10243 DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
10244 "due to PCIe link down ------- \r\n"));
10248 DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
10251 DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
10252 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x200, 0, 0),
10253 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x204, 0, 0)));
10254 DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
10255 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x208, 0, 0),
10256 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x20C, 0, 0)));
10257 DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
10258 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x210, 0, 0),
10259 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x214, 0, 0)));
10261 DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
10262 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x220, 0, 0),
10263 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x224, 0, 0)));
10264 DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
10265 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x228, 0, 0),
10266 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x22C, 0, 0)));
10267 DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
10268 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x230, 0, 0),
10269 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x234, 0, 0)));
10272 DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
10273 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x240, 0, 0),
10274 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x244, 0, 0)));
10275 DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
10276 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x248, 0, 0),
10277 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x24C, 0, 0)));
10278 DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
10279 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x250, 0, 0),
10280 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x254, 0, 0)));
10282 DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
10283 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x260, 0, 0),
10284 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x264, 0, 0)));
10285 DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
10286 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x268, 0, 0),
10287 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x26C, 0, 0)));
10288 DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
10289 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x270, 0, 0),
10290 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x274, 0, 0)));
10296 dhd_pcie_dump_int_regs(dhd_pub_t
*dhd
)
10298 uint32 intstatus
= 0;
10299 uint32 intmask
= 0;
10300 uint32 d2h_db0
= 0;
10301 uint32 d2h_mb_data
= 0;
10303 DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
10304 intstatus
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10305 dhd
->bus
->pcie_mailbox_int
, 0, 0);
10306 if (intstatus
== (uint32
)-1) {
10307 DHD_ERROR(("intstatus=0x%x \n", intstatus
));
10311 intmask
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10312 dhd
->bus
->pcie_mailbox_mask
, 0, 0);
10313 if (intmask
== (uint32
) -1) {
10314 DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus
, intmask
));
10318 d2h_db0
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10319 PCID2H_MailBox
, 0, 0);
10320 if (d2h_db0
== (uint32
)-1) {
10321 DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
10322 intstatus
, intmask
, d2h_db0
));
10326 DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
10327 intstatus
, intmask
, d2h_db0
));
10328 dhd_bus_cmn_readshared(dhd
->bus
, &d2h_mb_data
, D2H_MB_DATA
, 0);
10329 DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data
,
10330 dhd
->bus
->def_intmask
));
10336 dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t
*dhd
)
10338 DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
10339 DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n",
10340 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
10341 PCIE_EXTCAP_AER_UCERR_OFFSET
, TRUE
, FALSE
, 0)));
10342 #ifdef EXTENDED_PCIE_DEBUG_DUMP
10343 DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n",
10344 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
10345 PCIE_EXTCAP_ERR_HEADER_LOG_0
, TRUE
, FALSE
, 0),
10346 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
10347 PCIE_EXTCAP_ERR_HEADER_LOG_1
, TRUE
, FALSE
, 0),
10348 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
10349 PCIE_EXTCAP_ERR_HEADER_LOG_2
, TRUE
, FALSE
, 0),
10350 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
10351 PCIE_EXTCAP_ERR_HEADER_LOG_3
, TRUE
, FALSE
, 0)));
10352 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
10356 dhd_pcie_debug_info_dump(dhd_pub_t
*dhd
)
10358 int host_irq_disabled
;
10360 DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd
->bus
->bus_low_power_state
));
10361 host_irq_disabled
= dhdpcie_irq_disabled(dhd
->bus
);
10362 DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled
));
10363 dhd_print_tasklet_status(dhd
);
10364 dhd_pcie_intr_count_dump(dhd
);
10366 DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
10367 dhdpcie_dump_resource(dhd
->bus
);
10369 dhd_pcie_dump_rc_conf_space_cap(dhd
);
10371 DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
10372 dhd_debug_get_rc_linkcap(dhd
->bus
)));
10374 #ifdef CUSTOMER_HW4_DEBUG
10375 if (dhd
->bus
->is_linkdown
) {
10376 DHD_ERROR(("Skip dumping the PCIe registers due to PCIe Link down\n"));
10379 #endif /* CUSTOMER_HW4_DEBUG */
10381 DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n"));
10382 DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x "
10383 "PCIE_CFG_PMCSR(0x%x)=0x%x\n",
10384 PCIECFGREG_STATUS_CMD
,
10385 dhd_pcie_config_read(dhd
->bus
->osh
, PCIECFGREG_STATUS_CMD
, sizeof(uint32
)),
10386 PCIECFGREG_BASEADDR0
,
10387 dhd_pcie_config_read(dhd
->bus
->osh
, PCIECFGREG_BASEADDR0
, sizeof(uint32
)),
10388 PCIECFGREG_BASEADDR1
,
10389 dhd_pcie_config_read(dhd
->bus
->osh
, PCIECFGREG_BASEADDR1
, sizeof(uint32
)),
10391 dhd_pcie_config_read(dhd
->bus
->osh
, PCIE_CFG_PMCSR
, sizeof(uint32
))));
10392 DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x "
10393 "L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
,
10394 dhd_pcie_config_read(dhd
->bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
,
10395 sizeof(uint32
)), PCIECFGGEN_DEV_STATUS_CTRL2
,
10396 dhd_pcie_config_read(dhd
->bus
->osh
, PCIECFGGEN_DEV_STATUS_CTRL2
,
10397 sizeof(uint32
)), PCIECFGREG_PML1_SUB_CTRL1
,
10398 dhd_pcie_config_read(dhd
->bus
->osh
, PCIECFGREG_PML1_SUB_CTRL1
,
10400 #ifdef EXTENDED_PCIE_DEBUG_DUMP
10401 DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
10402 dhdpcie_ep_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
10403 PCIE_EXTCAP_AER_UCERR_OFFSET
, TRUE
, FALSE
, 0)));
10404 DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
10405 "hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1
,
10406 dhd_pcie_config_read(dhd
->bus
->osh
, PCI_TLP_HDR_LOG1
, sizeof(uint32
)),
10408 dhd_pcie_config_read(dhd
->bus
->osh
, PCI_TLP_HDR_LOG2
, sizeof(uint32
)),
10410 dhd_pcie_config_read(dhd
->bus
->osh
, PCI_TLP_HDR_LOG3
, sizeof(uint32
)),
10412 dhd_pcie_config_read(dhd
->bus
->osh
, PCI_TLP_HDR_LOG4
, sizeof(uint32
))));
10413 if (dhd
->bus
->sih
->buscorerev
>= 24) {
10414 DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
10415 "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL
,
10416 dhd_pcie_config_read(dhd
->bus
->osh
, PCIECFGREG_DEV_STATUS_CTRL
,
10417 sizeof(uint32
)), PCIE_CFG_SUBSYSTEM_CONTROL
,
10418 dhd_pcie_config_read(dhd
->bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
,
10419 sizeof(uint32
)), PCIECFGREG_PML1_SUB_CTRL2
,
10420 dhd_pcie_config_read(dhd
->bus
->osh
, PCIECFGREG_PML1_SUB_CTRL2
,
10423 DHD_ERROR(("\n ------- DUMPING PCIE DAR Registers ------- \r\n"));
10424 DHD_ERROR(("clkctl(0x%x)=0x%x pwrctl(0x%x)=0x%x H2D_DB0(0x%x)=0x%x\n",
10425 PCIDARClkCtl(dhd
->bus
->sih
->buscorerev
),
10426 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10427 PCIDARClkCtl(dhd
->bus
->sih
->buscorerev
), 0, 0),
10428 PCIDARPwrCtl(dhd
->bus
->sih
->buscorerev
),
10429 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10430 PCIDARPwrCtl(dhd
->bus
->sih
->buscorerev
), 0, 0),
10431 PCIDARH2D_DB0(dhd
->bus
->sih
->buscorerev
),
10432 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10433 PCIDARH2D_DB0(dhd
->bus
->sih
->buscorerev
), 0, 0)));
10435 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
10436 if (!dhd_pcie_dump_int_regs(dhd
)) {
10437 DHD_ERROR(("Skip dumping the PCIe Core registers due to invalid int regs\n"));
10441 DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n"));
10443 #ifdef EXTENDED_PCIE_DEBUG_DUMP
10444 if (dhd
->bus
->sih
->buscorerev
>= 24) {
10445 DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x\n",
10446 PCIDARErrlog(dhd
->bus
->sih
->buscorerev
),
10447 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10448 PCIDARErrlog(dhd
->bus
->sih
->buscorerev
), 0, 0),
10449 PCIDARErrlog_Addr(dhd
->bus
->sih
->buscorerev
),
10450 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10451 PCIDARErrlog_Addr(dhd
->bus
->sih
->buscorerev
), 0, 0)));
10452 DHD_ERROR(("FunctionINtstatus(0x%x)=0x%x, Mailboxint(0x%x)=0x%x\n",
10453 PCIDARFunctionIntstatus(dhd
->bus
->sih
->buscorerev
),
10454 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10455 PCIDARFunctionIntstatus(dhd
->bus
->sih
->buscorerev
), 0, 0),
10456 PCIDARMailboxint(dhd
->bus
->sih
->buscorerev
),
10457 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10458 PCIDARMailboxint(dhd
->bus
->sih
->buscorerev
), 0, 0)));
10460 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
10462 DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
10463 "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0
,
10464 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_DBG_CLKREQ0
),
10465 PCIECFGREG_PHY_DBG_CLKREQ1
,
10466 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_DBG_CLKREQ1
),
10467 PCIECFGREG_PHY_DBG_CLKREQ2
,
10468 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_DBG_CLKREQ2
),
10469 PCIECFGREG_PHY_DBG_CLKREQ3
,
10470 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_DBG_CLKREQ3
)));
10472 #ifdef EXTENDED_PCIE_DEBUG_DUMP
10473 if (dhd
->bus
->sih
->buscorerev
>= 24) {
10474 DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x "
10475 "ltssm_hist_2(0x%x)=0x%x "
10476 "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0
,
10477 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_LTSSM_HIST_0
),
10478 PCIECFGREG_PHY_LTSSM_HIST_1
,
10479 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_LTSSM_HIST_1
),
10480 PCIECFGREG_PHY_LTSSM_HIST_2
,
10481 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_LTSSM_HIST_2
),
10482 PCIECFGREG_PHY_LTSSM_HIST_3
,
10483 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_LTSSM_HIST_3
)));
10484 DHD_ERROR(("clkctl(0x%x)=0x%x pwrctl(0x%x)=0x%x H2D_DB0(0x%x)=0x%x\n",
10486 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, PCIE_CLK_CTRL
, 0, 0),
10488 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, PCIE_PWR_CTRL
, 0, 0),
10490 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10491 PCIH2D_MailBox
, 0, 0)));
10492 DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n",
10494 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_TREFUP
),
10495 PCIECFGREG_TREFUP_EXT
,
10496 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_TREFUP_EXT
)));
10497 DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x "
10498 "Function_Intstatus(0x%x)=0x%x "
10499 "Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x "
10500 "Power_Intmask(0x%x)=0x%x\n",
10501 PCIE_CORE_REG_ERRLOG
,
10502 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10503 PCIE_CORE_REG_ERRLOG
, 0, 0),
10504 PCIE_CORE_REG_ERR_ADDR
,
10505 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10506 PCIE_CORE_REG_ERR_ADDR
, 0, 0),
10507 PCIFunctionIntstatus(dhd
->bus
->sih
->buscorerev
),
10508 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10509 PCIFunctionIntstatus(dhd
->bus
->sih
->buscorerev
), 0, 0),
10510 PCIFunctionIntmask(dhd
->bus
->sih
->buscorerev
),
10511 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10512 PCIFunctionIntmask(dhd
->bus
->sih
->buscorerev
), 0, 0),
10513 PCIPowerIntstatus(dhd
->bus
->sih
->buscorerev
),
10514 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10515 PCIPowerIntstatus(dhd
->bus
->sih
->buscorerev
), 0, 0),
10516 PCIPowerIntmask(dhd
->bus
->sih
->buscorerev
),
10517 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10518 PCIPowerIntmask(dhd
->bus
->sih
->buscorerev
), 0, 0)));
10519 DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x "
10520 "err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n",
10521 (uint
)OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg1
),
10522 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10523 OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg1
), 0, 0),
10524 (uint
)OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg2
),
10525 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10526 OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg2
), 0, 0),
10527 (uint
)OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg3
),
10528 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10529 OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg3
), 0, 0),
10530 (uint
)OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg4
),
10531 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10532 OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg4
), 0, 0)));
10533 DHD_ERROR(("err_code(0x%x)=0x%x\n",
10534 (uint
)OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_code_logreg
),
10535 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
10536 OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_code_logreg
), 0, 0)));
10538 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
10540 dhd_pcie_dma_info_dump(dhd
);
10546 * TLV ID for Host whitelist Region.
10548 #define BCM_NVRAM_WHTLST_SIGNATURE 0xFEED4B1Du
10551 * For the time being only one whitelist region supported and 64 Bit high and
10552 * 64 bit low address should be written.
10554 #define BCM_HOST_WHITELIST_NBYTES 16u
10556 /* Writes host whitelist region to the TCM. FW upon initialization reads this register
10557 * to fetch whitelist regions, and validate DMA descriptors before programming
10558 * against these whitelist regions.
10561 dhdpcie_wrt_host_whitelist_region(struct dhd_bus
*bus
)
10564 bcm_host_whitelist_metadata_t whitelist_data
;
10565 uint8 whtlst_buff
[BCM_HOST_WHITELIST_NBYTES
];
10566 bcm_rand_metadata_t rnd_data
;
10567 uint32 addr
= bus
->dongle_ram_base
+ (uint32
)((bus
->ramsize
- BCM_NVRAM_OFFSET_TCM
) -
10568 ((bus
->nvram_csm
& 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR
+ sizeof(rnd_data
) +
10569 BCM_ENTROPY_HOST_NBYTES
+ sizeof(whitelist_data
)));
10570 whitelist_data
.signature
= htol32(BCM_NVRAM_WHTLST_SIGNATURE
);
10571 whitelist_data
.count
= htol32(BCM_HOST_WHITELIST_NBYTES
);
10572 ret
= dhd_get_host_whitelist_region((void*)whtlst_buff
,
10573 whitelist_data
.count
);
10574 if (ret
== BCME_RANGE
) {
10575 DHD_INFO(("%s: No Whitelist region programmed !\n",
10579 if (ret
== BCME_OK
) {
10580 /* write the metadata about whitelist region */
10581 ret
= dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*)&whitelist_data
,
10582 sizeof(whitelist_data
));
10583 if (ret
== BCME_OK
) {
10584 /* scale back by number of whitelist region counts */
10585 addr
-= BCM_HOST_WHITELIST_NBYTES
;
10587 /* Now write whitelist region(s) */
10588 ret
= dhdpcie_bus_membytes(bus
, TRUE
, addr
, whtlst_buff
,
10589 BCM_HOST_WHITELIST_NBYTES
);
10596 dhd_bus_force_bt_quiesce_enabled(struct dhd_bus
*bus
)
10598 return bus
->force_bt_quiesce
;