2 * DHD Bus Module for PCIE
4 * Copyright (C) 2020, Broadcom.
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
21 * <<Broadcom-WL-IPTag/Open:>>
26 /** XXX Twiki: [PCIeFullDongleArchitecture] */
33 #include <bcmdevs_legacy.h> /* need to still support chips no longer in trunk firmware */
39 #include <hnd_debug.h>
42 #include <hnd_armtrap.h>
43 #if defined(DHD_DEBUG)
45 #endif /* defined(DHD_DEBUG) */
46 #include <dngl_stats.h>
47 #include <pcie_core.h>
50 #include <dhd_flowring.h>
51 #include <dhd_proto.h>
53 #include <dhd_debug.h>
54 #include <dhd_daemon.h>
57 #include <bcmmsgbuf.h>
61 #include <bcmendian.h>
62 #include <bcmstdlib_s.h>
63 #ifdef DHDTCPACK_SUPPRESS
65 #endif /* DHDTCPACK_SUPPRESS */
67 #include <dhd_config.h>
69 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
70 #include <linux/pm_runtime.h>
71 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
73 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
75 #endif /* DEBUGGER || DHD_DSCOPE */
77 #if defined(FW_SIGNATURE)
78 #include <dngl_rtlv.h>
79 #include <bcm_fwsign.h>
80 #endif /* FW_SIGNATURE */
82 #ifdef DNGL_AXI_ERROR_LOGGING
83 #include <dhd_linux_wq.h>
84 #include <dhd_linux.h>
85 #endif /* DNGL_AXI_ERROR_LOGGING */
87 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
88 #include <dhd_linux_priv.h>
89 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
91 #define EXTENDED_PCIE_DEBUG_DUMP 1 /* Enable Extended pcie registers dump */
93 #define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
94 #define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */
96 #define DHD_MAX_ITEMS_HPP_TXCPL_RING 512
97 #define DHD_MAX_ITEMS_HPP_RXCPL_RING 512
98 #define MAX_HP2P_CMPL_RINGS 2u
100 /* XXX defines for 4378 */
101 #define ARMCR4REG_CORECAP (0x4/sizeof(uint32))
102 #define ARMCR4REG_MPUCTRL (0x90/sizeof(uint32))
103 #define ACC_MPU_SHIFT 25
104 #define ACC_MPU_MASK (0x1u << ACC_MPU_SHIFT)
106 /* XXX Offset for 4375 work around register */
107 #define REG_WORK_AROUND (0x1e4/sizeof(uint32))
109 /* XXX defines for 43602a0 workaround JIRA CRWLARMCR4-53 */
110 #define ARMCR4REG_BANKIDX (0x40/sizeof(uint32))
111 #define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32))
112 /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
114 /* CTO Prevention Recovery */
115 #define CTO_TO_CLEAR_WAIT_MS 50
116 #define CTO_TO_CLEAR_WAIT_MAX_CNT 200
119 #define PCIE_FLR_CAPAB_BIT 28
120 #define PCIE_FUNCTION_LEVEL_RESET_BIT 15
122 #define DHD_FUNCTION_LEVEL_RESET_DELAY 70u /* 70 msec delay */
123 #define DHD_SSRESET_STATUS_RETRY_DELAY 40u
125 * Increase SSReset de-assert time to 8ms.
126 * since it takes longer time if re-scan time on 4378B0.
128 #define DHD_SSRESET_STATUS_RETRIES 200u
130 /* Fetch address of a member in the pciedev_shared structure in dongle memory */
131 #define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member) \
132 (bus)->shared_addr + OFFSETOF(pciedev_shared_t, member)
134 /* Fetch address of a member in rings_info_ptr structure in dongle memory */
135 #define DHD_RING_INFO_MEMBER_ADDR(bus, member) \
136 (bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member)
138 /* Fetch address of a member in the ring_mem structure in dongle memory */
139 #define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member) \
140 (bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member)
142 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
143 extern unsigned int system_rev
;
144 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
147 extern int host_edl_support
;
150 /* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */
151 uint dma_ring_indices
= 0;
152 /* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */
154 /* This can be overwritten by module parameter(force_trap_bad_h2d_phase)
155 * defined in dhd_linux.c
157 bool force_trap_bad_h2d_phase
= 0;
159 int dhd_dongle_ramsize
;
160 struct dhd_bus
*g_dhd_bus
= NULL
;
161 #ifdef DNGL_AXI_ERROR_LOGGING
162 static void dhd_log_dump_axi_error(uint8
*axi_err
);
163 #endif /* DNGL_AXI_ERROR_LOGGING */
165 static int dhdpcie_checkdied(dhd_bus_t
*bus
, char *data
, uint size
);
166 static int dhdpcie_bus_readconsole(dhd_bus_t
*bus
);
167 #if defined(DHD_FW_COREDUMP)
168 static int dhdpcie_mem_dump(dhd_bus_t
*bus
);
169 static int dhdpcie_get_mem_dump(dhd_bus_t
*bus
);
170 #endif /* DHD_FW_COREDUMP */
172 static int dhdpcie_bus_membytes(dhd_bus_t
*bus
, bool write
, ulong address
, uint8
*data
, uint size
);
173 static int dhdpcie_bus_doiovar(dhd_bus_t
*bus
, const bcm_iovar_t
*vi
, uint32 actionid
,
174 const char *name
, void *params
,
175 uint plen
, void *arg
, uint len
, int val_size
);
176 static int dhdpcie_bus_lpback_req(struct dhd_bus
*bus
, uint32 intval
);
177 static int dhdpcie_bus_dmaxfer_req(struct dhd_bus
*bus
,
178 uint32 len
, uint32 srcdelay
, uint32 destdelay
,
179 uint32 d11_lpbk
, uint32 core_num
, uint32 wait
);
180 static uint
serialized_backplane_access(dhd_bus_t
* bus
, uint addr
, uint size
, uint
* val
, bool read
);
181 static int dhdpcie_bus_download_state(dhd_bus_t
*bus
, bool enter
);
182 static int _dhdpcie_download_firmware(struct dhd_bus
*bus
);
183 static int dhdpcie_download_firmware(dhd_bus_t
*bus
, osl_t
*osh
);
185 #if defined(FW_SIGNATURE)
186 static int dhdpcie_bus_download_fw_signature(dhd_bus_t
*bus
, bool *do_write
);
187 static int dhdpcie_bus_download_ram_bootloader(dhd_bus_t
*bus
);
188 static int dhdpcie_bus_write_fws_status(dhd_bus_t
*bus
);
189 static int dhdpcie_bus_write_fws_mem_info(dhd_bus_t
*bus
);
190 static int dhdpcie_bus_write_fwsig(dhd_bus_t
*bus
, char *fwsig_path
, char *nvsig_path
);
191 static int dhdpcie_download_rtlv_end(dhd_bus_t
*bus
);
192 static int dhdpcie_bus_save_download_info(dhd_bus_t
*bus
, uint32 download_addr
,
193 uint32 download_size
, const char *signature_fname
,
194 const char *bloader_fname
, uint32 bloader_download_addr
);
195 #endif /* FW_SIGNATURE */
197 static int dhdpcie_bus_write_vars(dhd_bus_t
*bus
);
198 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t
*bus
, uint32 intstatus
);
199 static bool dhdpci_bus_read_frames(dhd_bus_t
*bus
);
200 static int dhdpcie_readshared(dhd_bus_t
*bus
);
201 static void dhdpcie_init_shared_addr(dhd_bus_t
*bus
);
202 static bool dhdpcie_dongle_attach(dhd_bus_t
*bus
);
203 static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t
*bus
, int mem_size
);
204 static void dhdpcie_bus_release_dongle(dhd_bus_t
*bus
, osl_t
*osh
,
205 bool dongle_isolation
, bool reset_flag
);
206 static void dhdpcie_bus_release_malloc(dhd_bus_t
*bus
, osl_t
*osh
);
207 static int dhdpcie_downloadvars(dhd_bus_t
*bus
, void *arg
, int len
);
208 static void dhdpcie_setbar1win(dhd_bus_t
*bus
, uint32 addr
);
209 static void dhd_init_bar1_switch_lock(dhd_bus_t
*bus
);
210 static void dhd_deinit_bar1_switch_lock(dhd_bus_t
*bus
);
211 static void dhd_init_pwr_req_lock(dhd_bus_t
*bus
);
212 static void dhd_deinit_pwr_req_lock(dhd_bus_t
*bus
);
213 static void dhd_init_bus_lp_state_lock(dhd_bus_t
*bus
);
214 static void dhd_deinit_bus_lp_state_lock(dhd_bus_t
*bus
);
215 static void dhd_init_backplane_access_lock(dhd_bus_t
*bus
);
216 static void dhd_deinit_backplane_access_lock(dhd_bus_t
*bus
);
217 static uint8
dhdpcie_bus_rtcm8(dhd_bus_t
*bus
, ulong offset
);
218 static void dhdpcie_bus_wtcm8(dhd_bus_t
*bus
, ulong offset
, uint8 data
);
219 static void dhdpcie_bus_wtcm16(dhd_bus_t
*bus
, ulong offset
, uint16 data
);
220 static uint16
dhdpcie_bus_rtcm16(dhd_bus_t
*bus
, ulong offset
);
221 static void dhdpcie_bus_wtcm32(dhd_bus_t
*bus
, ulong offset
, uint32 data
);
222 static uint32
dhdpcie_bus_rtcm32(dhd_bus_t
*bus
, ulong offset
);
223 #ifdef DHD_SUPPORT_64BIT
224 static void dhdpcie_bus_wtcm64(dhd_bus_t
*bus
, ulong offset
, uint64 data
) __attribute__ ((used
));
225 static uint64
dhdpcie_bus_rtcm64(dhd_bus_t
*bus
, ulong offset
) __attribute__ ((used
));
226 #endif /* DHD_SUPPORT_64BIT */
227 static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t
*bus
, uint32 data
);
228 static void dhdpcie_bus_reg_unmap(osl_t
*osh
, volatile char *addr
, int size
);
229 static int dhdpcie_cc_nvmshadow(dhd_bus_t
*bus
, struct bcmstrbuf
*b
);
230 static void dhdpcie_fw_trap(dhd_bus_t
*bus
);
231 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t
*bus
, ring_info_t
*ring_info
);
232 static void dhdpcie_handle_mb_data(dhd_bus_t
*bus
);
233 extern void dhd_dpc_enable(dhd_pub_t
*dhdp
);
235 static void dhd_bus_ds_trace(dhd_bus_t
*bus
, uint32 dsval
,
236 bool d2h
, enum dhd_bus_ds_state inbstate
);
238 static void dhd_bus_ds_trace(dhd_bus_t
*bus
, uint32 dsval
, bool d2h
);
239 #endif /* PCIE_INB_DW */
240 extern void dhd_dpc_kill(dhd_pub_t
*dhdp
);
242 #ifdef IDLE_TX_FLOW_MGMT
243 static void dhd_bus_check_idle_scan(dhd_bus_t
*bus
);
244 static void dhd_bus_idle_scan(dhd_bus_t
*bus
);
245 #endif /* IDLE_TX_FLOW_MGMT */
247 #ifdef EXYNOS_PCIE_DEBUG
248 extern void exynos_pcie_register_dump(int ch_num
);
249 #endif /* EXYNOS_PCIE_DEBUG */
251 #if defined(DHD_H2D_LOG_TIME_SYNC)
252 static void dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t
*bus
);
253 #endif /* DHD_H2D_LOG_TIME_SYNC */
255 #define PCI_VENDOR_ID_BROADCOM 0x14e4
257 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
258 #define MAX_D3_ACK_TIMEOUT 100
259 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
261 #define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */
262 #if defined(PCIE_INB_DW)
263 static uint dhd_doorbell_timeout
= DHD_DEFAULT_DOORBELL_TIMEOUT
;
266 static bool dhdpcie_check_firmware_compatible(uint32 f_api_version
, uint32 h_api_version
);
267 static int dhdpcie_cto_error_recovery(struct dhd_bus
*bus
);
269 static int dhdpcie_init_d11status(struct dhd_bus
*bus
);
271 static int dhdpcie_wrt_rnd(struct dhd_bus
*bus
);
273 #define NUM_PATTERNS 2
274 static bool dhd_bus_tcm_test(struct dhd_bus
*bus
);
276 #if defined(FW_SIGNATURE)
277 static int dhd_bus_dump_fws(dhd_bus_t
*bus
, struct bcmstrbuf
*strbuf
);
279 static void dhdpcie_pme_stat_clear(dhd_bus_t
*bus
);
285 IOV_SET_DOWNLOAD_STATE
,
286 IOV_SET_DOWNLOAD_INFO
,
298 IOV_LTRSLEEPON_UNLOOAD
,
304 IOV_DUMP_RINGUPD_BLOCK
,
309 #ifdef DHD_PCIE_RUNTIMEPM
311 #endif /* DHD_PCIE_RUNTIMEPM */
318 IOV_H2D_ENABLE_TRAP_BADPHASE
,
319 IOV_H2D_TXPOST_MAX_ITEM
,
320 #if defined(DHD_HTPUT_TUNABLES)
321 IOV_H2D_HTPUT_TXPOST_MAX_ITEM
,
322 #endif /* DHD_HTPUT_TUNABLES */
333 IOV_DNGL_CAPS
, /**< returns string with dongle capabilities */
334 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
335 IOV_GDB_SERVER
, /**< starts gdb server on given interface */
336 #endif /* DEBUGGER || DHD_DSCOPE */
337 #if defined(GDB_PROXY)
338 IOV_GDB_PROXY_PROBE
, /**< gdb proxy support presence check */
339 IOV_GDB_PROXY_STOP_COUNT
, /**< gdb proxy firmware stop count */
340 #endif /* GDB_PROXY */
342 #if defined(PCIE_INB_DW)
346 IOV_HSCBSIZE
, /* get HSCB buffer size */
348 IOV_HP2P_PKT_THRESHOLD
,
349 IOV_HP2P_TIME_THRESHOLD
,
351 IOV_HP2P_TXCPL_MAXITEMS
,
352 IOV_HP2P_RXCPL_MAXITEMS
,
353 IOV_EXTDTXS_IN_TXCPL
,
354 IOV_HOSTRDY_AFTER_INIT
,
356 IOV_PCIE_LAST
/**< unused IOVAR */
359 const bcm_iovar_t dhdpcie_iovars
[] = {
360 {"intr", IOV_INTR
, 0, 0, IOVT_BOOL
, 0 },
361 {"memsize", IOV_MEMSIZE
, 0, 0, IOVT_UINT32
, 0 },
362 {"dwnldstate", IOV_SET_DOWNLOAD_STATE
, 0, 0, IOVT_BOOL
, 0 },
363 {"dwnldinfo", IOV_SET_DOWNLOAD_INFO
, 0, 0, IOVT_BUFFER
,
364 sizeof(fw_download_info_t
) },
365 {"vars", IOV_VARS
, 0, 0, IOVT_BUFFER
, 0 },
366 {"devreset", IOV_DEVRESET
, 0, 0, IOVT_UINT8
, 0 },
367 {"pcie_device_trap", IOV_FORCE_FW_TRAP
, 0, 0, 0, 0 },
368 {"pcie_lpbk", IOV_PCIE_LPBK
, 0, 0, IOVT_UINT32
, 0 },
369 {"cc_nvmshadow", IOV_CC_NVMSHADOW
, 0, 0, IOVT_BUFFER
, 0 },
370 {"ramsize", IOV_RAMSIZE
, 0, 0, IOVT_UINT32
, 0 },
371 {"ramstart", IOV_RAMSTART
, 0, 0, IOVT_UINT32
, 0 },
372 {"pcie_dmaxfer", IOV_PCIE_DMAXFER
, 0, 0, IOVT_BUFFER
, sizeof(dma_xfer_info_t
)},
373 {"pcie_suspend", IOV_PCIE_SUSPEND
, DHD_IOVF_PWRREQ_BYPASS
, 0, IOVT_UINT32
, 0 },
374 {"sleep_allowed", IOV_SLEEP_ALLOWED
, 0, 0, IOVT_BOOL
, 0 },
375 {"dngl_isolation", IOV_DONGLEISOLATION
, 0, 0, IOVT_UINT32
, 0 },
376 {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD
, 0, 0, IOVT_UINT32
, 0 },
377 {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK
, 0, 0, IOVT_BUFFER
, 0 },
378 {"dma_ring_indices", IOV_DMA_RINGINDICES
, 0, 0, IOVT_UINT32
, 0},
379 {"metadata_dbg", IOV_METADATA_DBG
, 0, 0, IOVT_BOOL
, 0 },
380 {"rx_metadata_len", IOV_RX_METADATALEN
, 0, 0, IOVT_UINT32
, 0 },
381 {"tx_metadata_len", IOV_TX_METADATALEN
, 0, 0, IOVT_UINT32
, 0 },
382 {"db1_for_mb", IOV_DB1_FOR_MB
, 0, 0, IOVT_UINT32
, 0 },
383 {"txp_thresh", IOV_TXP_THRESHOLD
, 0, 0, IOVT_UINT32
, 0 },
384 {"buzzz_dump", IOV_BUZZZ_DUMP
, 0, 0, IOVT_UINT32
, 0 },
385 {"flow_prio_map", IOV_FLOW_PRIO_MAP
, 0, 0, IOVT_UINT32
, 0 },
386 #ifdef DHD_PCIE_RUNTIMEPM
387 {"idletime", IOV_IDLETIME
, 0, 0, IOVT_INT32
, 0 },
388 #endif /* DHD_PCIE_RUNTIMEPM */
389 {"rxbound", IOV_RXBOUND
, 0, 0, IOVT_UINT32
, 0 },
390 {"txbound", IOV_TXBOUND
, 0, 0, IOVT_UINT32
, 0 },
391 {"fw_hang_report", IOV_HANGREPORT
, 0, 0, IOVT_BOOL
, 0 },
392 {"h2d_mb_data", IOV_H2D_MAILBOXDATA
, 0, 0, IOVT_UINT32
, 0 },
393 {"inforings", IOV_INFORINGS
, 0, 0, IOVT_UINT32
, 0 },
394 {"h2d_phase", IOV_H2D_PHASE
, 0, 0, IOVT_UINT32
, 0 },
395 {"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE
, 0, 0,
397 {"h2d_max_txpost", IOV_H2D_TXPOST_MAX_ITEM
, 0, 0, IOVT_UINT32
, 0 },
398 #if defined(DHD_HTPUT_TUNABLES)
399 {"h2d_htput_max_txpost", IOV_H2D_HTPUT_TXPOST_MAX_ITEM
, 0, 0, IOVT_UINT32
, 0 },
400 #endif /* DHD_HTPUT_TUNABLES */
401 {"trap_data", IOV_TRAPDATA
, 0, 0, IOVT_BUFFER
, 0 },
402 {"trap_data_raw", IOV_TRAPDATA_RAW
, 0, 0, IOVT_BUFFER
, 0 },
403 {"cto_prevention", IOV_CTO_PREVENTION
, 0, 0, IOVT_UINT32
, 0 },
404 {"pcie_wd_reset", IOV_PCIE_WD_RESET
, 0, 0, IOVT_BOOL
, 0 },
405 {"dump_dongle", IOV_DUMP_DONGLE
, 0, 0, IOVT_BUFFER
,
406 MAX(sizeof(dump_dongle_in_t
), sizeof(dump_dongle_out_t
))},
407 {"clear_ring", IOV_CLEAR_RING
, 0, 0, IOVT_UINT32
, 0 },
408 {"hwa_enable", IOV_HWA_ENABLE
, 0, 0, IOVT_UINT32
, 0 },
409 {"idma_enable", IOV_IDMA_ENABLE
, 0, 0, IOVT_UINT32
, 0 },
410 {"ifrm_enable", IOV_IFRM_ENABLE
, 0, 0, IOVT_UINT32
, 0 },
411 {"dar_enable", IOV_DAR_ENABLE
, 0, 0, IOVT_UINT32
, 0 },
412 {"cap", IOV_DNGL_CAPS
, 0, 0, IOVT_BUFFER
, 0},
413 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
414 {"gdb_server", IOV_GDB_SERVER
, 0, 0, IOVT_UINT32
, 0 },
415 #endif /* DEBUGGER || DHD_DSCOPE */
416 #if defined(GDB_PROXY)
417 {"gdb_proxy_probe", IOV_GDB_PROXY_PROBE
, 0, 0, IOVT_BUFFER
, 2 * sizeof(int32
) },
418 {"gdb_proxy_stop_count", IOV_GDB_PROXY_STOP_COUNT
, 0, 0, IOVT_UINT32
, 0 },
419 #endif /* GDB_PROXY */
420 {"inb_dw_enable", IOV_INB_DW_ENABLE
, 0, 0, IOVT_UINT32
, 0 },
421 #if defined(PCIE_INB_DW)
422 {"deep_sleep", IOV_DEEP_SLEEP
, 0, 0, IOVT_UINT32
, 0},
424 {"cto_threshold", IOV_CTO_THRESHOLD
, 0, 0, IOVT_UINT32
, 0 },
425 {"hscbsize", IOV_HSCBSIZE
, 0, 0, IOVT_UINT32
, 0 },
427 {"extdtxs_in_txcpl", IOV_EXTDTXS_IN_TXCPL
, 0, 0, IOVT_UINT32
, 0 },
428 {"hostrdy_after_init", IOV_HOSTRDY_AFTER_INIT
, 0, 0, IOVT_UINT32
, 0 },
429 {"hp2p_mf_enable", IOV_HP2P_MF_ENABLE
, 0, 0, IOVT_UINT32
, 0 },
430 {NULL
, 0, 0, 0, 0, 0 }
433 #define MAX_READ_TIMEOUT 2 * 1000 * 1000
436 #define DHD_RXBOUND 64
439 #define DHD_TXBOUND 64
442 #define DHD_INFORING_BOUND 32
443 #define DHD_BTLOGRING_BOUND 32
445 uint dhd_rxbound
= DHD_RXBOUND
;
446 uint dhd_txbound
= DHD_TXBOUND
;
448 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
449 /** the GDB debugger layer will call back into this (bus) layer to read/write dongle memory */
450 static struct dhd_gdb_bus_ops_s bus_ops
= {
451 .read_u16
= dhdpcie_bus_rtcm16
,
452 .read_u32
= dhdpcie_bus_rtcm32
,
453 .write_u32
= dhdpcie_bus_wtcm32
,
455 #endif /* DEBUGGER || DHD_DSCOPE */
458 dhd_bus_get_flr_force_fail(struct dhd_bus
*bus
)
460 return bus
->flr_force_fail
;
464 * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to
465 * link with the bus driver, in order to look for or await the device.
468 dhd_bus_register(void)
470 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
472 return dhdpcie_bus_register();
476 dhd_bus_unregister(void)
478 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
480 dhdpcie_bus_unregister();
484 /** returns a host virtual address */
486 dhdpcie_bus_reg_map(osl_t
*osh
, ulong addr
, int size
)
488 return (uint32
*)REG_MAP(addr
, size
);
492 dhdpcie_bus_reg_unmap(osl_t
*osh
, volatile char *addr
, int size
)
499 * retrun H2D Doorbell registers address
500 * use DAR registers instead of enum register for corerev >= 23 (4347B0)
503 dhd_bus_db0_addr_get(struct dhd_bus
*bus
)
505 uint addr
= PCIH2D_MailBox
;
506 uint dar_addr
= DAR_PCIH2D_DB0_0(bus
->sih
->buscorerev
);
508 return ((DAR_ACTIVE(bus
->dhd
)) ? dar_addr
: addr
);
512 dhd_bus_db0_addr_2_get(struct dhd_bus
*bus
)
514 return ((DAR_ACTIVE(bus
->dhd
)) ? DAR_PCIH2D_DB2_0(bus
->sih
->buscorerev
) : PCIH2D_MailBox_2
);
518 dhd_bus_db1_addr_get(struct dhd_bus
*bus
)
520 return ((DAR_ACTIVE(bus
->dhd
)) ? DAR_PCIH2D_DB0_1(bus
->sih
->buscorerev
) : PCIH2D_DB1
);
524 dhd_bus_db1_addr_3_get(struct dhd_bus
*bus
)
526 return ((DAR_ACTIVE(bus
->dhd
)) ? DAR_PCIH2D_DB3_1(bus
->sih
->buscorerev
) : PCIH2D_DB1_3
);
530 dhd_init_pwr_req_lock(dhd_bus_t
*bus
)
532 if (!bus
->pwr_req_lock
) {
533 bus
->pwr_req_lock
= osl_spin_lock_init(bus
->osh
);
538 dhd_deinit_pwr_req_lock(dhd_bus_t
*bus
)
540 if (bus
->pwr_req_lock
) {
541 osl_spin_lock_deinit(bus
->osh
, bus
->pwr_req_lock
);
542 bus
->pwr_req_lock
= NULL
;
547 * WAR for SWWLAN-215055 - [4378B0] ARM fails to boot without DAR WL domain request
550 dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus
*bus
, uint offset
, bool enable
)
553 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, offset
,
554 SRPWR_DMN1_ARMBPSD_MASK
<< SRPWR_REQON_SHIFT
,
555 SRPWR_DMN1_ARMBPSD_MASK
<< SRPWR_REQON_SHIFT
);
557 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, offset
,
558 SRPWR_DMN1_ARMBPSD_MASK
<< SRPWR_REQON_SHIFT
, 0);
563 _dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus
*bus
)
568 * If multiple de-asserts, decrement ref and return
569 * Clear power request when only one pending
570 * so initial request is not removed unexpectedly
572 if (bus
->pwr_req_ref
> 1) {
577 ASSERT(bus
->pwr_req_ref
== 1);
579 if (MULTIBP_ENAB(bus
->sih
)) {
580 /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
581 mask
= SRPWR_DMN1_ARMBPSD_MASK
;
583 mask
= SRPWR_DMN0_PCIE_MASK
| SRPWR_DMN1_ARMBPSD_MASK
;
586 si_srpwr_request(bus
->sih
, mask
, 0);
587 bus
->pwr_req_ref
= 0;
591 dhd_bus_pcie_pwr_req_clear(struct dhd_bus
*bus
)
593 unsigned long flags
= 0;
595 DHD_BUS_PWR_REQ_LOCK(bus
->pwr_req_lock
, flags
);
596 _dhd_bus_pcie_pwr_req_clear_cmn(bus
);
597 DHD_BUS_PWR_REQ_UNLOCK(bus
->pwr_req_lock
, flags
);
601 dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus
*bus
)
603 _dhd_bus_pcie_pwr_req_clear_cmn(bus
);
607 _dhd_bus_pcie_pwr_req_cmn(struct dhd_bus
*bus
)
611 /* If multiple request entries, increment reference and return */
612 if (bus
->pwr_req_ref
> 0) {
617 ASSERT(bus
->pwr_req_ref
== 0);
619 if (MULTIBP_ENAB(bus
->sih
)) {
620 /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
621 mask
= SRPWR_DMN1_ARMBPSD_MASK
;
622 val
= SRPWR_DMN1_ARMBPSD_MASK
;
624 mask
= SRPWR_DMN0_PCIE_MASK
| SRPWR_DMN1_ARMBPSD_MASK
;
625 val
= SRPWR_DMN0_PCIE_MASK
| SRPWR_DMN1_ARMBPSD_MASK
;
628 si_srpwr_request(bus
->sih
, mask
, val
);
630 bus
->pwr_req_ref
= 1;
634 dhd_bus_pcie_pwr_req(struct dhd_bus
*bus
)
636 unsigned long flags
= 0;
638 DHD_BUS_PWR_REQ_LOCK(bus
->pwr_req_lock
, flags
);
639 _dhd_bus_pcie_pwr_req_cmn(bus
);
640 DHD_BUS_PWR_REQ_UNLOCK(bus
->pwr_req_lock
, flags
);
644 _dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus
*bus
)
648 mask
= SRPWR_DMN_ALL_MASK(bus
->sih
);
649 val
= SRPWR_DMN_ALL_MASK(bus
->sih
);
651 si_srpwr_request(bus
->sih
, mask
, val
);
655 dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus
*bus
)
657 unsigned long flags
= 0;
660 * Few corerevs need the power domain to be active for FLR.
661 * Return if the pwr req is not applicable for the corerev
663 if (!(PCIE_PWR_REQ_RELOAD_WAR_ENAB(bus
->sih
->buscorerev
))) {
667 DHD_BUS_PWR_REQ_LOCK(bus
->pwr_req_lock
, flags
);
668 _dhd_bus_pcie_pwr_req_pd0123_cmn(bus
);
669 DHD_BUS_PWR_REQ_UNLOCK(bus
->pwr_req_lock
, flags
);
673 _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus
*bus
)
677 mask
= SRPWR_DMN_ALL_MASK(bus
->sih
);
679 si_srpwr_request(bus
->sih
, mask
, 0);
683 dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus
*bus
)
685 unsigned long flags
= 0;
687 /* return if the pwr clear is not applicable for the corerev */
688 if (!(PCIE_PWR_REQ_RELOAD_WAR_ENAB(bus
->sih
->buscorerev
))) {
691 DHD_BUS_PWR_REQ_LOCK(bus
->pwr_req_lock
, flags
);
692 _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(bus
);
693 DHD_BUS_PWR_REQ_UNLOCK(bus
->pwr_req_lock
, flags
);
697 dhd_bus_pcie_pwr_req_nolock(struct dhd_bus
*bus
)
699 _dhd_bus_pcie_pwr_req_cmn(bus
);
703 dhdpcie_chip_support_msi(dhd_bus_t
*bus
)
705 /* XXX For chips with buscorerev <= 14 intstatus
706 * is not getting cleared from these firmwares.
707 * Either host can read and clear intstatus for these
708 * or not enable MSI at all.
709 * Here option 2 of not enabling MSI is choosen.
710 * Also for hw4 chips, msi is not enabled.
712 DHD_INFO(("%s: buscorerev=%d chipid=0x%x\n",
713 __FUNCTION__
, bus
->sih
->buscorerev
, si_chipid(bus
->sih
)));
714 if (bus
->sih
->buscorerev
<= 14 ||
715 si_chipid(bus
->sih
) == BCM4389_CHIP_ID
||
716 si_chipid(bus
->sih
) == BCM4385_CHIP_ID
||
717 si_chipid(bus
->sih
) == BCM4375_CHIP_ID
||
718 si_chipid(bus
->sih
) == BCM4376_CHIP_ID
||
719 si_chipid(bus
->sih
) == BCM4362_CHIP_ID
||
720 si_chipid(bus
->sih
) == BCM43751_CHIP_ID
||
721 si_chipid(bus
->sih
) == BCM43752_CHIP_ID
||
722 si_chipid(bus
->sih
) == BCM4361_CHIP_ID
||
723 si_chipid(bus
->sih
) == BCM4359_CHIP_ID
) {
731 * Called once for each hardware (dongle) instance that this DHD manages.
733 * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
734 * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
735 * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
737 * 'tcm' is the *host* virtual address at which tcm is mapped.
739 int dhdpcie_bus_attach(osl_t
*osh
, dhd_bus_t
**bus_ptr
,
740 volatile char *regs
, volatile char *tcm
, void *pci_dev
)
742 dhd_bus_t
*bus
= NULL
;
745 DHD_TRACE(("%s: ENTER\n", __FUNCTION__
));
748 if (!(bus
= MALLOCZ(osh
, sizeof(dhd_bus_t
)))) {
749 DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__
));
750 ret
= BCME_NORESOURCE
;
757 /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
758 bus
->dev
= (struct pci_dev
*)pci_dev
;
760 dll_init(&bus
->flowring_active_list
);
761 #ifdef IDLE_TX_FLOW_MGMT
762 bus
->active_list_last_process_ts
= OSL_SYSUPTIME();
763 #endif /* IDLE_TX_FLOW_MGMT */
765 /* Attach pcie shared structure */
766 if (!(bus
->pcie_sh
= MALLOCZ(osh
, sizeof(pciedev_shared_t
)))) {
767 DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__
));
768 ret
= BCME_NORESOURCE
;
772 /* dhd_common_init(osh); */
774 if (dhdpcie_dongle_attach(bus
)) {
775 DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__
));
780 /* software resources */
781 if (!(bus
->dhd
= dhd_attach(osh
, bus
, PCMSGBUF_HDRLEN
))) {
782 DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__
));
783 ret
= BCME_NORESOURCE
;
786 DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__
));
787 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
788 bus
->dhd
->hostrdy_after_init
= TRUE
;
789 bus
->db1_for_mb
= TRUE
;
790 bus
->dhd
->hang_report
= TRUE
;
791 bus
->use_mailbox
= FALSE
;
792 bus
->use_d0_inform
= FALSE
;
793 bus
->intr_enabled
= FALSE
;
794 bus
->flr_force_fail
= FALSE
;
795 /* update the dma indices if set through module parameter. */
796 if (dma_ring_indices
!= 0) {
797 dhdpcie_set_dma_ring_indices(bus
->dhd
, dma_ring_indices
);
799 /* update h2d phase support if set through module parameter */
800 bus
->dhd
->h2d_phase_supported
= h2d_phase
? TRUE
: FALSE
;
801 /* update force trap on bad phase if set through module parameter */
802 bus
->dhd
->force_dongletrap_on_bad_h2d_phase
=
803 force_trap_bad_h2d_phase
? TRUE
: FALSE
;
804 #ifdef IDLE_TX_FLOW_MGMT
805 bus
->enable_idle_flowring_mgmt
= FALSE
;
806 #endif /* IDLE_TX_FLOW_MGMT */
807 bus
->irq_registered
= FALSE
;
809 #ifdef DHD_MSI_SUPPORT
810 bus
->d2h_intr_method
= enable_msi
&& dhdpcie_chip_support_msi(bus
) ?
811 PCIE_MSI
: PCIE_INTX
;
813 bus
->d2h_intr_method
= PCIE_INTX
;
814 #endif /* DHD_MSI_SUPPORT */
816 DHD_TRACE(("%s: EXIT SUCCESS\n",
823 DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__
));
824 if (bus
&& bus
->pcie_sh
) {
825 MFREE(osh
, bus
->pcie_sh
, sizeof(pciedev_shared_t
));
829 MFREE(osh
, bus
, sizeof(dhd_bus_t
));
836 dhd_bus_skip_clm(dhd_pub_t
*dhdp
)
838 switch (dhd_bus_chip_id(dhdp
)) {
839 case BCM4369_CHIP_ID
:
847 dhd_bus_chip(struct dhd_bus
*bus
)
849 ASSERT(bus
->sih
!= NULL
);
850 return bus
->sih
->chip
;
854 dhd_bus_chiprev(struct dhd_bus
*bus
)
857 ASSERT(bus
->sih
!= NULL
);
858 return bus
->sih
->chiprev
;
862 dhd_bus_pub(struct dhd_bus
*bus
)
868 dhd_bus_sih(struct dhd_bus
*bus
)
870 return (void *)bus
->sih
;
874 dhd_bus_txq(struct dhd_bus
*bus
)
879 /** Get Chip ID version */
880 uint
dhd_bus_chip_id(dhd_pub_t
*dhdp
)
882 dhd_bus_t
*bus
= dhdp
->bus
;
883 return bus
->sih
->chip
;
886 /** Get Chip Rev ID version */
887 uint
dhd_bus_chiprev_id(dhd_pub_t
*dhdp
)
889 dhd_bus_t
*bus
= dhdp
->bus
;
890 return bus
->sih
->chiprev
;
893 /** Get Chip Pkg ID version */
894 uint
dhd_bus_chippkg_id(dhd_pub_t
*dhdp
)
896 dhd_bus_t
*bus
= dhdp
->bus
;
897 return bus
->sih
->chippkg
;
900 /** Conduct Loopback test */
902 dhd_bus_dmaxfer_lpbk(dhd_pub_t
*dhdp
, uint32 type
)
904 dma_xfer_info_t dmaxfer_lpbk
;
907 #define PCIE_DMAXFER_LPBK_LENGTH 4096
908 memset(&dmaxfer_lpbk
, 0, sizeof(dma_xfer_info_t
));
909 dmaxfer_lpbk
.version
= DHD_DMAXFER_VERSION
;
910 dmaxfer_lpbk
.length
= (uint16
)sizeof(dma_xfer_info_t
);
911 dmaxfer_lpbk
.num_bytes
= PCIE_DMAXFER_LPBK_LENGTH
;
912 dmaxfer_lpbk
.type
= type
;
913 dmaxfer_lpbk
.should_wait
= TRUE
;
915 ret
= dhd_bus_iovar_op(dhdp
, "pcie_dmaxfer", NULL
, 0,
916 (char *)&dmaxfer_lpbk
, sizeof(dma_xfer_info_t
), IOV_SET
);
918 DHD_ERROR(("failed to start PCIe Loopback Test!!! "
919 "Type:%d Reason:%d\n", type
, ret
));
923 if (dmaxfer_lpbk
.status
!= DMA_XFER_SUCCESS
) {
924 DHD_ERROR(("failed to check PCIe Loopback Test!!! "
925 "Type:%d Status:%d Error code:%d\n", type
,
926 dmaxfer_lpbk
.status
, dmaxfer_lpbk
.error_code
));
929 DHD_ERROR(("successful to check PCIe Loopback Test"
930 " Type:%d\n", type
));
932 #undef PCIE_DMAXFER_LPBK_LENGTH
937 /* Check if there is DPC scheduling errors */
939 dhd_bus_query_dpc_sched_errors(dhd_pub_t
*dhdp
)
941 dhd_bus_t
*bus
= dhdp
->bus
;
944 if (bus
->dpc_entry_time
< bus
->isr_exit_time
) {
945 /* Kernel doesn't schedule the DPC after processing PCIe IRQ */
947 } else if (bus
->dpc_entry_time
< bus
->resched_dpc_time
) {
948 /* Kernel doesn't schedule the DPC after DHD tries to reschedule
949 * the DPC due to pending work items to be processed.
957 /* print out minimum timestamp info */
958 DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
959 " isr_exit_time="SEC_USEC_FMT
960 " dpc_entry_time="SEC_USEC_FMT
961 "\ndpc_exit_time="SEC_USEC_FMT
962 " isr_sched_dpc_time="SEC_USEC_FMT
963 " resched_dpc_time="SEC_USEC_FMT
"\n",
964 GET_SEC_USEC(bus
->isr_entry_time
),
965 GET_SEC_USEC(bus
->isr_exit_time
),
966 GET_SEC_USEC(bus
->dpc_entry_time
),
967 GET_SEC_USEC(bus
->dpc_exit_time
),
968 GET_SEC_USEC(bus
->isr_sched_dpc_time
),
969 GET_SEC_USEC(bus
->resched_dpc_time
)));
975 /** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
977 dhdpcie_bus_intstatus(dhd_bus_t
*bus
)
979 uint32 intstatus
= 0;
982 if (__DHD_CHK_BUS_LPS_D3_ACKED(bus
)) {
983 DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__
));
986 /* XXX: check for PCIE Gen2 also */
987 if ((bus
->sih
->buscorerev
== 6) || (bus
->sih
->buscorerev
== 4) ||
988 (bus
->sih
->buscorerev
== 2)) {
989 intstatus
= dhdpcie_bus_cfg_read_dword(bus
, PCIIntstatus
, 4);
990 dhdpcie_bus_cfg_write_dword(bus
, PCIIntstatus
, 4, intstatus
);
993 /* this is a PCIE core register..not a config register... */
994 intstatus
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_int
, 0, 0);
996 /* this is a PCIE core register..not a config register... */
997 intmask
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_mask
, 0, 0);
998 /* Is device removed. intstatus & intmask read 0xffffffff */
999 if (intstatus
== (uint32
)-1 || intmask
== (uint32
)-1) {
1000 DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__
));
1001 DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n",
1002 __FUNCTION__
, intstatus
, intmask
));
1003 bus
->is_linkdown
= TRUE
;
1004 dhd_pcie_debug_info_dump(bus
->dhd
);
1005 #ifdef CUSTOMER_HW4_DEBUG
1006 #ifdef SUPPORT_LINKDOWN_RECOVERY
1007 #ifdef CONFIG_ARCH_MSM
1008 bus
->no_cfg_restore
= 1;
1009 #endif /* CONFIG_ARCH_MSM */
1010 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1011 bus
->dhd
->hang_reason
= HANG_REASON_PCIE_LINK_DOWN_EP_DETECT
;
1012 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
1013 copy_hang_info_linkdown(bus
->dhd
);
1014 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
1015 dhd_os_send_hang_message(bus
->dhd
);
1016 #endif /* CUSTOMER_HW4_DEBUG */
1020 #ifndef DHD_READ_INTSTATUS_IN_DPC
1021 intstatus
&= intmask
;
1022 #endif /* DHD_READ_INTSTATUS_IN_DPC */
1024 /* XXX: define the mask in a .h file */
1026 * The fourth argument to si_corereg is the "mask" fields of the register to update
1027 * and the fifth field is the "value" to update. Now if we are interested in only
1028 * few fields of the "mask" bit map, we should not be writing back what we read
1029 * By doing so, we might clear/ack interrupts that are not handled yet.
1031 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_int
, bus
->def_intmask
,
1034 intstatus
&= bus
->def_intmask
;
1041 dhdpcie_cto_recovery_handler(dhd_pub_t
*dhd
)
1043 dhd_bus_t
*bus
= dhd
->bus
;
1046 /* Disable PCIe Runtime PM to avoid D3_ACK timeout.
1048 DHD_DISABLE_RUNTIME_PM(dhd
);
1050 /* Sleep for 1 seconds so that any AXI timeout
1051 * if running on ALP clock also will be captured
1055 /* reset backplane and cto,
1056 * then access through pcie is recovered.
1058 ret
= dhdpcie_cto_error_recovery(bus
);
1060 /* Waiting for backplane reset */
1062 /* Dump debug Info */
1063 dhd_prot_debug_info_print(bus
->dhd
);
1064 /* Dump console buffer */
1065 dhd_bus_dump_console_buffer(bus
);
1066 #if defined(DHD_FW_COREDUMP)
1067 /* save core dump or write to a file */
1068 if (!bus
->is_linkdown
&& bus
->dhd
->memdump_enabled
) {
1069 #ifdef DHD_SSSR_DUMP
1070 DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__
));
1071 bus
->dhd
->collect_sssr
= TRUE
;
1072 #endif /* DHD_SSSR_DUMP */
1073 bus
->dhd
->memdump_type
= DUMP_TYPE_CTO_RECOVERY
;
1074 dhdpcie_mem_dump(bus
);
1076 #endif /* DHD_FW_COREDUMP */
1078 #ifdef SUPPORT_LINKDOWN_RECOVERY
1079 #ifdef CONFIG_ARCH_MSM
1080 bus
->no_cfg_restore
= 1;
1081 #endif /* CONFIG_ARCH_MSM */
1082 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1083 bus
->is_linkdown
= TRUE
;
1084 bus
->dhd
->hang_reason
= HANG_REASON_PCIE_CTO_DETECT
;
1085 /* Send HANG event */
1086 dhd_os_send_hang_message(bus
->dhd
);
1090 dhd_bus_dump_imp_cfg_registers(struct dhd_bus
*bus
)
1092 uint32 status_cmd
= dhd_pcie_config_read(bus
, PCIECFGREG_STATUS_CMD
, sizeof(uint32
));
1093 uint32 pmcsr
= dhd_pcie_config_read(bus
, PCIE_CFG_PMCSR
, sizeof(uint32
));
1094 uint32 base_addr0
= dhd_pcie_config_read(bus
, PCIECFGREG_BASEADDR0
, sizeof(uint32
));
1095 uint32 base_addr1
= dhd_pcie_config_read(bus
, PCIECFGREG_BASEADDR1
, sizeof(uint32
));
1096 uint32 linkctl
= dhd_pcie_config_read(bus
, PCIECFGREG_LINK_STATUS_CTRL
, sizeof(uint32
));
1098 dhd_pcie_config_read(bus
, PCIECFGREG_PML1_SUB_CTRL1
, sizeof(uint32
));
1099 uint32 devctl
= dhd_pcie_config_read(bus
, PCIECFGREG_DEV_STATUS_CTRL
, sizeof(uint32
));
1100 uint32 devctl2
= dhd_pcie_config_read(bus
, PCIECFGGEN_DEV_STATUS_CTRL2
, sizeof(uint32
));
1102 DHD_ERROR(("status_cmd(0x%x)=0x%x, pmcsr(0x%x)=0x%x "
1103 "base_addr0(0x%x)=0x%x base_addr1(0x%x)=0x%x "
1104 "linkctl(0x%x)=0x%x l1ssctrl(0x%x)=0x%x "
1105 "devctl(0x%x)=0x%x devctl2(0x%x)=0x%x \n",
1106 PCIECFGREG_STATUS_CMD
, status_cmd
,
1107 PCIE_CFG_PMCSR
, pmcsr
,
1108 PCIECFGREG_BASEADDR0
, base_addr0
,
1109 PCIECFGREG_BASEADDR1
, base_addr1
,
1110 PCIECFGREG_LINK_STATUS_CTRL
, linkctl
,
1111 PCIECFGREG_PML1_SUB_CTRL1
, l1ssctrl
,
1112 PCIECFGREG_DEV_STATUS_CTRL
, devctl
,
1113 PCIECFGGEN_DEV_STATUS_CTRL2
, devctl2
));
1117 * Name: dhdpcie_bus_isr
1119 * 1: IN int irq -- interrupt vector
1120 * 2: IN void *arg -- handle to private data structure
1122 * Status (TRUE or FALSE)
1125 * Interrupt Service routine checks for the status register,
1126 * disable interrupt and queue DPC if mail box interrupts are raised.
1129 dhdpcie_bus_isr(dhd_bus_t
*bus
)
1131 uint32 intstatus
= 0;
1134 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
1135 /* verify argument */
1137 DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__
));
1141 if (bus
->dhd
->dongle_reset
) {
1142 DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__
));
1146 if (bus
->dhd
->busstate
== DHD_BUS_DOWN
) {
1147 DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__
));
1151 /* avoid processing of interrupts until msgbuf prot is inited */
1152 if (!bus
->intr_enabled
) {
1153 DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__
));
1157 if (PCIECTO_ENAB(bus
)) {
1158 /* read pci_intstatus */
1159 intstatus
= dhdpcie_bus_cfg_read_dword(bus
, PCI_INT_STATUS
, 4);
1161 if (intstatus
== (uint32
)-1) {
1162 DHD_ERROR(("%s : Invalid intstatus for cto recovery\n",
1164 dhdpcie_disable_irq_nosync(bus
);
1168 if (intstatus
& PCI_CTO_INT_MASK
) {
1169 DHD_ERROR(("%s: ##### CTO RECOVERY REPORTED BY DONGLE "
1170 "intstat=0x%x enab=%d\n", __FUNCTION__
,
1171 intstatus
, bus
->cto_enable
));
1172 bus
->cto_triggered
= 1;
1173 dhd_bus_dump_imp_cfg_registers(bus
);
1175 * DAR still accessible
1177 dhd_bus_dump_dar_registers(bus
);
1179 /* Disable further PCIe interrupts */
1180 dhdpcie_disable_irq_nosync(bus
); /* Disable interrupt!! */
1182 dhd_bus_stop_queue(bus
);
1184 /* Schedule CTO recovery */
1185 dhd_schedule_cto_recovery(bus
->dhd
);
1191 if (bus
->d2h_intr_method
== PCIE_MSI
) {
1192 /* For MSI, as intstatus is cleared by firmware, no need to read */
1193 goto skip_intstatus_read
;
1196 #ifndef DHD_READ_INTSTATUS_IN_DPC
1197 intstatus
= dhdpcie_bus_intstatus(bus
);
1199 /* Check if the interrupt is ours or not */
1200 if (intstatus
== 0) {
1201 bus
->non_ours_irq_count
++;
1202 bus
->last_non_ours_irq_time
= OSL_LOCALTIME_NS();
1203 DHD_INFO(("%s : this interrupt is not ours\n", __FUNCTION__
));
1207 /* save the intstatus */
1208 /* read interrupt status register!! Status bits will be cleared in DPC !! */
1209 bus
->intstatus
= intstatus
;
1211 /* return error for 0xFFFFFFFF */
1212 if (intstatus
== (uint32
)-1) {
1213 DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n",
1214 __FUNCTION__
, intstatus
));
1215 dhdpcie_disable_irq_nosync(bus
);
1219 skip_intstatus_read
:
1220 /* Overall operation:
1221 * - Mask further interrupts
1222 * - Read/ack intstatus
1223 * - Take action based on bits and state
1224 * - Reenable interrupts (as per state)
1227 /* Count the interrupt call */
1229 #endif /* DHD_READ_INTSTATUS_IN_DPC */
1233 bus
->isr_intr_disable_count
++;
1235 /* For Linux, Macos etc (otherthan NDIS) instead of disabling
1236 * dongle interrupt by clearing the IntMask, disable directly
1237 * interrupt from the host side, so that host will not recieve
1238 * any interrupts at all, even though dongle raises interrupts
1240 dhdpcie_disable_irq_nosync(bus
); /* Disable interrupt!! */
1244 #if defined(PCIE_ISR_THREAD)
1246 DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__
));
1247 DHD_OS_WAKE_LOCK(bus
->dhd
);
1248 while (dhd_bus_dpc(bus
));
1249 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
1251 bus
->dpc_sched
= TRUE
;
1252 bus
->isr_sched_dpc_time
= OSL_LOCALTIME_NS();
1253 dhd_sched_dpc(bus
->dhd
); /* queue DPC now!! */
1254 #endif /* defined(SDIO_ISR_THREAD) */
1256 DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__
));
1261 DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__
));
1266 dhdpcie_set_pwr_state(dhd_bus_t
*bus
, uint state
)
1268 uint32 cur_state
= 0;
1270 osl_t
*osh
= bus
->osh
;
1272 pm_csr
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_PM_CSR
, sizeof(uint32
));
1273 cur_state
= pm_csr
& PCIECFGREG_PM_CSR_STATE_MASK
;
1275 if (cur_state
== state
) {
1276 DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__
, cur_state
));
1280 if (state
> PCIECFGREG_PM_CSR_STATE_D3_HOT
)
1283 /* Validate the state transition
1284 * if already in a lower power state, return error
1286 if (state
!= PCIECFGREG_PM_CSR_STATE_D0
&&
1287 cur_state
<= PCIECFGREG_PM_CSR_STATE_D3_COLD
&&
1288 cur_state
> state
) {
1289 DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__
));
1293 pm_csr
&= ~PCIECFGREG_PM_CSR_STATE_MASK
;
1296 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_PM_CSR
, sizeof(uint32
), pm_csr
);
1298 /* need to wait for the specified mandatory pcie power transition delay time */
1299 if (state
== PCIECFGREG_PM_CSR_STATE_D3_HOT
||
1300 cur_state
== PCIECFGREG_PM_CSR_STATE_D3_HOT
)
1301 OSL_DELAY(DHDPCIE_PM_D3_DELAY
);
1302 else if (state
== PCIECFGREG_PM_CSR_STATE_D2
||
1303 cur_state
== PCIECFGREG_PM_CSR_STATE_D2
)
1304 OSL_DELAY(DHDPCIE_PM_D2_DELAY
);
1306 /* read back the power state and verify */
1307 pm_csr
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_PM_CSR
, sizeof(uint32
));
1308 cur_state
= pm_csr
& PCIECFGREG_PM_CSR_STATE_MASK
;
1309 if (cur_state
!= state
) {
1310 DHD_ERROR(("%s: power transition failed ! Current state is %u \n",
1311 __FUNCTION__
, cur_state
));
1314 DHD_ERROR(("%s: power transition to %u success \n",
1315 __FUNCTION__
, cur_state
));
1322 dhdpcie_config_check(dhd_bus_t
*bus
)
1325 int ret
= BCME_ERROR
;
1327 for (i
= 0; i
< DHDPCIE_CONFIG_CHECK_RETRY_COUNT
; i
++) {
1328 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCI_CFG_VID
, sizeof(uint32
));
1329 if ((val
& 0xFFFF) == VENDOR_BROADCOM
) {
1333 OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS
* 1000);
1340 dhdpcie_config_restore(dhd_bus_t
*bus
, bool restore_pmcsr
)
1343 osl_t
*osh
= bus
->osh
;
1345 if (BCME_OK
!= dhdpcie_config_check(bus
)) {
1349 for (i
= PCI_CFG_REV
>> 2; i
< DHDPCIE_CONFIG_HDR_SIZE
; i
++) {
1350 OSL_PCI_WRITE_CONFIG(osh
, i
<< 2, sizeof(uint32
), bus
->saved_config
.header
[i
]);
1352 OSL_PCI_WRITE_CONFIG(osh
, PCI_CFG_CMD
, sizeof(uint32
), bus
->saved_config
.header
[1]);
1355 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_PM_CSR
,
1356 sizeof(uint32
), bus
->saved_config
.pmcsr
);
1358 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_MSI_CAP
, sizeof(uint32
), bus
->saved_config
.msi_cap
);
1359 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_MSI_ADDR_L
, sizeof(uint32
),
1360 bus
->saved_config
.msi_addr0
);
1361 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_MSI_ADDR_H
,
1362 sizeof(uint32
), bus
->saved_config
.msi_addr1
);
1363 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_MSI_DATA
,
1364 sizeof(uint32
), bus
->saved_config
.msi_data
);
1366 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_DEV_STATUS_CTRL
,
1367 sizeof(uint32
), bus
->saved_config
.exp_dev_ctrl_stat
);
1368 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGGEN_DEV_STATUS_CTRL2
,
1369 sizeof(uint32
), bus
->saved_config
.exp_dev_ctrl_stat2
);
1370 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_LINK_STATUS_CTRL
,
1371 sizeof(uint32
), bus
->saved_config
.exp_link_ctrl_stat
);
1372 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_LINK_STATUS_CTRL2
,
1373 sizeof(uint32
), bus
->saved_config
.exp_link_ctrl_stat2
);
1375 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_PML1_SUB_CTRL1
,
1376 sizeof(uint32
), bus
->saved_config
.l1pm0
);
1377 OSL_PCI_WRITE_CONFIG(osh
, PCIECFGREG_PML1_SUB_CTRL2
,
1378 sizeof(uint32
), bus
->saved_config
.l1pm1
);
1380 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCI_BAR0_WIN
, sizeof(uint32
),
1381 bus
->saved_config
.bar0_win
);
1382 dhdpcie_setbar1win(bus
, bus
->saved_config
.bar1_win
);
1388 dhdpcie_config_save(dhd_bus_t
*bus
)
1391 osl_t
*osh
= bus
->osh
;
1393 if (BCME_OK
!= dhdpcie_config_check(bus
)) {
1397 for (i
= 0; i
< DHDPCIE_CONFIG_HDR_SIZE
; i
++) {
1398 bus
->saved_config
.header
[i
] = OSL_PCI_READ_CONFIG(osh
, i
<< 2, sizeof(uint32
));
1401 bus
->saved_config
.pmcsr
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_PM_CSR
, sizeof(uint32
));
1403 bus
->saved_config
.msi_cap
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_MSI_CAP
,
1405 bus
->saved_config
.msi_addr0
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_MSI_ADDR_L
,
1407 bus
->saved_config
.msi_addr1
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_MSI_ADDR_H
,
1409 bus
->saved_config
.msi_data
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_MSI_DATA
,
1412 bus
->saved_config
.exp_dev_ctrl_stat
= OSL_PCI_READ_CONFIG(osh
,
1413 PCIECFGREG_DEV_STATUS_CTRL
, sizeof(uint32
));
1414 bus
->saved_config
.exp_dev_ctrl_stat2
= OSL_PCI_READ_CONFIG(osh
,
1415 PCIECFGGEN_DEV_STATUS_CTRL2
, sizeof(uint32
));
1416 bus
->saved_config
.exp_link_ctrl_stat
= OSL_PCI_READ_CONFIG(osh
,
1417 PCIECFGREG_LINK_STATUS_CTRL
, sizeof(uint32
));
1418 bus
->saved_config
.exp_link_ctrl_stat2
= OSL_PCI_READ_CONFIG(osh
,
1419 PCIECFGREG_LINK_STATUS_CTRL2
, sizeof(uint32
));
1421 bus
->saved_config
.l1pm0
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_PML1_SUB_CTRL1
,
1423 bus
->saved_config
.l1pm1
= OSL_PCI_READ_CONFIG(osh
, PCIECFGREG_PML1_SUB_CTRL2
,
1426 bus
->saved_config
.bar0_win
= OSL_PCI_READ_CONFIG(osh
, PCI_BAR0_WIN
,
1428 bus
->saved_config
.bar1_win
= OSL_PCI_READ_CONFIG(osh
, PCI_BAR1_WIN
,
1434 #ifdef CONFIG_ARCH_EXYNOS
1435 dhd_pub_t
*link_recovery
= NULL
;
1436 #endif /* CONFIG_ARCH_EXYNOS */
1439 dhdpcie_bus_intr_init(dhd_bus_t
*bus
)
1441 uint buscorerev
= bus
->sih
->buscorerev
;
1442 bus
->pcie_mailbox_int
= PCIMailBoxInt(buscorerev
);
1443 bus
->pcie_mailbox_mask
= PCIMailBoxMask(buscorerev
);
1444 bus
->d2h_mb_mask
= PCIE_MB_D2H_MB_MASK(buscorerev
);
1445 bus
->def_intmask
= PCIE_MB_D2H_MB_MASK(buscorerev
);
1446 if (buscorerev
< 64) {
1447 bus
->def_intmask
|= PCIE_MB_TOPCIE_FN0_0
| PCIE_MB_TOPCIE_FN0_1
;
1452 dhdpcie_cc_watchdog_reset(dhd_bus_t
*bus
)
1454 uint32 wd_en
= (bus
->sih
->buscorerev
>= 66) ? WD_SSRESET_PCIE_F0_EN
:
1455 (WD_SSRESET_PCIE_F0_EN
| WD_SSRESET_PCIE_ALL_FN_EN
);
1456 pcie_watchdog_reset(bus
->osh
, bus
->sih
, WD_ENABLE_MASK
, wd_en
);
1459 dhdpcie_dongle_reset(dhd_bus_t
*bus
)
1462 /* if the pcie link is down, watchdog reset
1463 * should not be done, as it may hang
1465 if (bus
->is_linkdown
) {
1469 /* Currently BP reset using CFG reg is done only for android platforms */
1470 #ifdef DHD_USE_BP_RESET_SPROM
1471 /* This is for architectures that does NOT control subsystem reset */
1472 (void)dhd_bus_cfg_sprom_ctrl_bp_reset(bus
);
1474 #elif defined(DHD_USE_BP_RESET_SS_CTRL)
1475 /* This is for architectures that supports Subsystem Control */
1476 (void)dhd_bus_cfg_ss_ctrl_bp_reset(bus
);
1480 /* dhd_bus_perform_flr will return BCME_UNSUPPORTED if chip is not FLR capable */
1481 if (dhd_bus_perform_flr(bus
, FALSE
) == BCME_UNSUPPORTED
)
1483 /* Legacy chipcommon watchdog reset */
1484 dhdpcie_cc_watchdog_reset(bus
);
1487 #endif /* DHD_USE_BP_RESET */
1491 is_bmpu_supported(dhd_bus_t
*bus
)
1493 if (BCM4378_CHIP(bus
->sih
->chip
) ||
1494 BCM4376_CHIP(bus
->sih
->chip
) ||
1495 BCM4387_CHIP(bus
->sih
->chip
) ||
1496 BCM4385_CHIP(bus
->sih
->chip
)) {
1502 #define CHIP_COMMON_SCR_DHD_TO_BL_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SCR_DHD_TO_BL)
1503 #define CHIP_COMMON_SCR_BL_TO_DHD_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SCR_BL_TO_DHD)
1505 dhdpcie_bus_mpu_disable(dhd_bus_t
*bus
)
1507 volatile uint32
*cr4_regs
;
1510 if (is_bmpu_supported(bus
) == FALSE
) {
1514 /* reset to default values dhd_to_bl and bl_to_dhd regs */
1515 (void)serialized_backplane_access(bus
, CHIP_COMMON_SCR_DHD_TO_BL_ADDR(bus
->sih
),
1516 sizeof(val
), &val
, FALSE
);
1517 (void)serialized_backplane_access(bus
, CHIP_COMMON_SCR_BL_TO_DHD_ADDR(bus
->sih
),
1518 sizeof(val
), &val
, FALSE
);
1520 cr4_regs
= si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0);
1521 if (cr4_regs
== NULL
) {
1522 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__
));
1525 if (R_REG(bus
->osh
, cr4_regs
+ ARMCR4REG_CORECAP
) & ACC_MPU_MASK
) {
1526 /* bus mpu is supported */
1527 W_REG(bus
->osh
, cr4_regs
+ ARMCR4REG_MPUCTRL
, 0);
1532 dhdpcie_dongle_attach(dhd_bus_t
*bus
)
1534 osl_t
*osh
= bus
->osh
;
1535 volatile void *regsva
= (volatile void*)bus
->regs
;
1538 sbpcieregs_t
*sbpcieregs
;
1539 bool dongle_reset_needed
;
1542 BCM_REFERENCE(chipid
);
1544 DHD_TRACE(("%s: ENTER\n", __FUNCTION__
));
1546 /* Configure CTO Prevention functionality */
1547 #if defined(BCMPCIE_CTO_PREVENTION)
1548 chipid
= dhd_get_chipid(bus
);
1550 if (BCM4349_CHIP(chipid
) || BCM4350_CHIP(chipid
) || BCM4345_CHIP(chipid
)) {
1551 DHD_ERROR(("Disable CTO\n"));
1552 bus
->cto_enable
= FALSE
;
1554 DHD_ERROR(("Enable CTO\n"));
1555 bus
->cto_enable
= TRUE
;
1558 DHD_ERROR(("Disable CTO\n"));
1559 bus
->cto_enable
= FALSE
;
1560 #endif /* BCMPCIE_CTO_PREVENTION */
1562 if (PCIECTO_ENAB(bus
)) {
1563 dhdpcie_cto_init(bus
, TRUE
);
1566 #ifdef CONFIG_ARCH_EXYNOS
1567 link_recovery
= bus
->dhd
;
1568 #endif /* CONFIG_ARCH_EXYNOS */
1570 dhd_init_pwr_req_lock(bus
);
1571 dhd_init_bus_lp_state_lock(bus
);
1572 dhd_init_backplane_access_lock(bus
);
1574 bus
->alp_only
= TRUE
;
1577 /* Checking PCIe bus status with reading configuration space */
1578 val
= OSL_PCI_READ_CONFIG(osh
, PCI_CFG_VID
, sizeof(uint32
));
1579 if ((val
& 0xFFFF) != VENDOR_BROADCOM
) {
1580 DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__
));
1583 devid
= (val
>> 16) & 0xFFFF;
1584 bus
->cl_devid
= devid
;
1586 /* Set bar0 window to si_enum_base */
1587 dhdpcie_bus_cfg_set_bar0_win(bus
, si_enum_base(devid
));
1590 * Checking PCI_SPROM_CONTROL register for preventing invalid address access
1591 * due to switch address space from PCI_BUS to SI_BUS.
1593 val
= OSL_PCI_READ_CONFIG(osh
, PCI_SPROM_CONTROL
, sizeof(uint32
));
1594 if (val
== 0xffffffff) {
1595 DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__
));
1599 /* si_attach() will provide an SI handle and scan the backplane */
1600 if (!(bus
->sih
= si_attach((uint
)devid
, osh
, regsva
, PCI_BUS
, bus
,
1601 &bus
->vars
, &bus
->varsz
))) {
1602 DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__
));
1606 if (MULTIBP_ENAB(bus
->sih
) && (bus
->sih
->buscorerev
>= 66)) {
1608 * HW JIRA - CRWLPCIEGEN2-672
1609 * Producer Index Feature which is used by F1 gets reset on F0 FLR
1612 if (PCIE_ENUM_RESET_WAR_ENAB(bus
->sih
->buscorerev
)) {
1613 dhdpcie_ssreset_dis_enum_rst(bus
);
1616 /* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset
1617 * dhdpcie_bus_release_dongle() --> si_detach()
1618 * dhdpcie_dongle_attach() --> si_attach()
1620 bus
->pwr_req_ref
= 0;
1623 if (MULTIBP_ENAB(bus
->sih
)) {
1624 dhd_bus_pcie_pwr_req_nolock(bus
);
1627 /* Get info on the ARM and SOCRAM cores... */
1628 /* Should really be qualified by device id */
1629 if ((si_setcore(bus
->sih
, ARM7S_CORE_ID
, 0)) ||
1630 (si_setcore(bus
->sih
, ARMCM3_CORE_ID
, 0)) ||
1631 (si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0)) ||
1632 (si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0))) {
1633 bus
->armrev
= si_corerev(bus
->sih
);
1634 bus
->coreid
= si_coreid(bus
->sih
);
1636 DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__
));
1640 /* CA7 requires coherent bits on */
1641 if (bus
->coreid
== ARMCA7_CORE_ID
) {
1642 val
= dhdpcie_bus_cfg_read_dword(bus
, PCIE_CFG_SUBSYSTEM_CONTROL
, 4);
1643 dhdpcie_bus_cfg_write_dword(bus
, PCIE_CFG_SUBSYSTEM_CONTROL
, 4,
1644 (val
| PCIE_BARCOHERENTACCEN_MASK
));
1647 /* EFI requirement - stop driver load if FW is already running
1648 * need to do this here before pcie_watchdog_reset, because
1649 * pcie_watchdog_reset will put the ARM back into halt state
1651 if (!dhdpcie_is_arm_halted(bus
)) {
1652 DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n",
1657 BCM_REFERENCE(dongle_reset_needed
);
1659 /* For inbuilt drivers pcie clk req will be done by RC,
1660 * so do not do clkreq from dhd
1662 if (dhd_download_fw_on_driverload
)
1664 /* Enable CLKREQ# */
1665 dhdpcie_clkreq(bus
->osh
, 1, 1);
1668 /* Calculate htclkratio only for QT, for FPGA it is fixed at 30 */
1671 * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset
1672 * without checking dongle_isolation flag, but if it is called via some other path
1673 * like quiesce FLR, then based on dongle_isolation flag, watchdog_reset should
1676 if (bus
->dhd
== NULL
) {
1677 /* dhd_attach not yet happened, do dongle reset */
1678 #ifdef DHD_SKIP_DONGLE_RESET_IN_ATTACH
1679 dongle_reset_needed
= FALSE
;
1681 dongle_reset_needed
= TRUE
;
1682 #endif /* DHD_SKIP_DONGLE_RESET_IN_ATTACH */
1684 /* Based on dongle_isolationflag, reset dongle */
1685 dongle_reset_needed
= !(bus
->dhd
->dongle_isolation
);
1689 * Issue dongle to reset all the cores on the chip - similar to rmmod dhd
1690 * This is required to avoid spurious interrupts to the Host and bring back
1691 * dongle to a sane state (on host soft-reboot / watchdog-reboot).
1693 if (dongle_reset_needed
) {
1694 dhdpcie_dongle_reset(bus
);
1697 /* need to set the force_bt_quiesce flag here
1698 * before calling dhdpcie_dongle_flr_or_pwr_toggle
1700 bus
->force_bt_quiesce
= TRUE
;
1702 * For buscorerev = 66 and after, F0 FLR should be done independent from F1.
1703 * So don't need BT quiesce.
1705 if (bus
->sih
->buscorerev
>= 66) {
1706 bus
->force_bt_quiesce
= FALSE
;
1709 dhdpcie_dongle_flr_or_pwr_toggle(bus
);
1711 dhdpcie_bus_mpu_disable(bus
);
1713 si_setcore(bus
->sih
, PCIE2_CORE_ID
, 0);
1714 sbpcieregs
= (sbpcieregs_t
*)(bus
->regs
);
1716 /* WAR where the BAR1 window may not be sized properly */
1717 W_REG(osh
, &sbpcieregs
->configaddr
, 0x4e0);
1718 val
= R_REG(osh
, &sbpcieregs
->configdata
);
1719 W_REG(osh
, &sbpcieregs
->configdata
, val
);
1721 /* if chip uses sysmem instead of tcm, typically ARM CA chips */
1722 if (si_setcore(bus
->sih
, SYSMEM_CORE_ID
, 0)) {
1723 if (!(bus
->orig_ramsize
= si_sysmem_size(bus
->sih
))) {
1724 DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__
));
1727 /* also populate base address */
1728 switch ((uint16
)bus
->sih
->chip
) {
1729 case BCM4368_CHIP_ID
:
1730 bus
->dongle_ram_base
= CA7_4368_RAM_BASE
;
1732 case BCM4385_CHIP_ID
:
1733 bus
->dongle_ram_base
= CA7_4385_RAM_BASE
;
1735 case BCM4388_CHIP_ID
:
1736 case BCM4389_CHIP_ID
:
1737 bus
->dongle_ram_base
= CA7_4389_RAM_BASE
;
1740 /* also populate base address */
1741 bus
->dongle_ram_base
= 0x200000;
1742 DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
1743 __FUNCTION__
, bus
->dongle_ram_base
));
1746 } else if (!si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0)) {
1747 if (!(bus
->orig_ramsize
= si_socram_size(bus
->sih
))) {
1748 DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__
));
1752 /* cr4 has a different way to find the RAM size from TCM's */
1753 if (!(bus
->orig_ramsize
= si_tcm_size(bus
->sih
))) {
1754 DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__
));
1757 /* also populate base address */
1758 switch ((uint16
)bus
->sih
->chip
) {
1759 case BCM4339_CHIP_ID
:
1760 case BCM4335_CHIP_ID
:
1761 bus
->dongle_ram_base
= CR4_4335_RAM_BASE
;
1763 case BCM4358_CHIP_ID
:
1764 case BCM4354_CHIP_ID
:
1765 case BCM43567_CHIP_ID
:
1766 case BCM43569_CHIP_ID
:
1767 case BCM4350_CHIP_ID
:
1768 case BCM43570_CHIP_ID
:
1769 bus
->dongle_ram_base
= CR4_4350_RAM_BASE
;
1771 case BCM4360_CHIP_ID
:
1772 bus
->dongle_ram_base
= CR4_4360_RAM_BASE
;
1775 case BCM4364_CHIP_ID
:
1776 bus
->dongle_ram_base
= CR4_4364_RAM_BASE
;
1780 bus
->dongle_ram_base
= (bus
->sih
->chiprev
< 6) /* changed at 4345C0 */
1781 ? CR4_4345_LT_C0_RAM_BASE
: CR4_4345_GE_C0_RAM_BASE
;
1784 bus
->dongle_ram_base
= CR4_43602_RAM_BASE
;
1786 case BCM4349_CHIP_GRPID
:
1787 /* RAM based changed from 4349c0(revid=9) onwards */
1788 bus
->dongle_ram_base
= ((bus
->sih
->chiprev
< 9) ?
1789 CR4_4349_RAM_BASE
: CR4_4349_RAM_BASE_FROM_REV_9
);
1791 case BCM4347_CHIP_ID
:
1792 case BCM4357_CHIP_ID
:
1793 case BCM4361_CHIP_ID
:
1794 bus
->dongle_ram_base
= CR4_4347_RAM_BASE
;
1796 case BCM43751_CHIP_ID
:
1797 bus
->dongle_ram_base
= CR4_43751_RAM_BASE
;
1799 case BCM43752_CHIP_ID
:
1800 bus
->dongle_ram_base
= CR4_43752_RAM_BASE
;
1802 case BCM4376_CHIP_GRPID
:
1803 bus
->dongle_ram_base
= CR4_4376_RAM_BASE
;
1805 case BCM4378_CHIP_GRPID
:
1806 bus
->dongle_ram_base
= CR4_4378_RAM_BASE
;
1808 case BCM4362_CHIP_ID
:
1809 bus
->dongle_ram_base
= CR4_4362_RAM_BASE
;
1811 case BCM4375_CHIP_ID
:
1812 case BCM4369_CHIP_ID
:
1813 bus
->dongle_ram_base
= CR4_4369_RAM_BASE
;
1815 case BCM4377_CHIP_ID
:
1816 bus
->dongle_ram_base
= CR4_4377_RAM_BASE
;
1818 case BCM4387_CHIP_GRPID
:
1819 bus
->dongle_ram_base
= CR4_4387_RAM_BASE
;
1821 case BCM4385_CHIP_ID
:
1822 bus
->dongle_ram_base
= CR4_4385_RAM_BASE
;
1824 case BCM4389_CHIP_ID
:
1825 /* XXX: For corerev 3, use 4387 rambase */
1826 bus
->dongle_ram_base
= CR4_4387_RAM_BASE
;
1829 bus
->dongle_ram_base
= 0;
1830 DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
1831 __FUNCTION__
, bus
->dongle_ram_base
));
1834 bus
->ramsize
= bus
->orig_ramsize
;
1835 if (dhd_dongle_ramsize
) {
1836 dhdpcie_bus_dongle_setmemsize(bus
, dhd_dongle_ramsize
);
1839 if (bus
->ramsize
> DONGLE_TCM_MAP_SIZE
) {
1840 DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n",
1841 __FUNCTION__
, bus
->ramsize
, bus
->ramsize
));
1845 DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
1846 bus
->ramsize
, bus
->orig_ramsize
, bus
->dongle_ram_base
));
1848 dhdpcie_bar1_window_switch_enab(bus
);
1850 /* Init bar1_switch_lock only after bar1_switch_enab is inited */
1851 dhd_init_bar1_switch_lock(bus
);
1853 bus
->srmemsize
= si_socram_srmem_size(bus
->sih
);
1855 dhdpcie_bus_intr_init(bus
);
1857 /* Set the poll and/or interrupt flags */
1858 bus
->intr
= (bool)dhd_intr
;
1859 if ((bus
->poll
= (bool)dhd_poll
))
1861 #ifdef DHD_DISABLE_ASPM
1862 dhd_bus_aspm_enable_rc_ep(bus
, FALSE
);
1863 #endif /* DHD_DISABLE_ASPM */
1865 bus
->inb_enabled
= TRUE
;
1866 #endif /* PCIE_INB_DW */
1867 #if defined(PCIE_INB_DW)
1868 bus
->ds_enabled
= TRUE
;
1871 bus
->hwa_enabled
= TRUE
;
1872 bus
->idma_enabled
= TRUE
;
1873 bus
->ifrm_enabled
= TRUE
;
1875 dhdpcie_pme_stat_clear(bus
);
1877 if (MULTIBP_ENAB(bus
->sih
)) {
1878 dhd_bus_pcie_pwr_req_clear_nolock(bus
);
1881 * One time clearing of Common Power Domain since HW default is set
1882 * Needs to be after FLR because FLR resets PCIe enum back to HW defaults
1883 * for 4378B0 (rev 68).
1884 * On 4378A0 (rev 66), PCIe enum reset is disabled due to CRWLPCIEGEN2-672
1886 si_srpwr_request(bus
->sih
, SRPWR_DMN0_PCIE_MASK
, 0);
1889 * WAR to fix ARM cold boot;
1890 * Assert WL domain in DAR helps but not enum
1892 if (bus
->sih
->buscorerev
>= 68) {
1893 dhd_bus_pcie_pwr_req_wl_domain(bus
,
1894 DAR_PCIE_PWR_CTRL((bus
->sih
)->buscorerev
), TRUE
);
1898 DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__
));
1903 /* for EFI even if there is an error, load still succeeds
1904 * so si_detach should not be called here, it is called during unload
1907 dhd_deinit_pwr_req_lock(bus
);
1908 dhd_deinit_bus_lp_state_lock(bus
);
1909 dhd_deinit_backplane_access_lock(bus
);
1911 if (bus
->sih
!= NULL
) {
1912 if (MULTIBP_ENAB(bus
->sih
)) {
1913 dhd_bus_pcie_pwr_req_clear_nolock(bus
);
1916 si_detach(bus
->sih
);
1919 DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__
));
1924 dhpcie_bus_unmask_interrupt(dhd_bus_t
*bus
)
1926 dhdpcie_bus_cfg_write_dword(bus
, PCIIntmask
, 4, I_MB
);
1930 dhpcie_bus_mask_interrupt(dhd_bus_t
*bus
)
1932 dhdpcie_bus_cfg_write_dword(bus
, PCIIntmask
, 4, 0x0);
1936 /* Non atomic function, caller should hold appropriate lock */
1938 dhdpcie_bus_intr_enable(dhd_bus_t
*bus
)
1940 DHD_TRACE(("%s Enter\n", __FUNCTION__
));
1942 if (bus
->sih
&& !bus
->is_linkdown
) {
1943 /* Skip after recieving D3 ACK */
1944 if (bus
->bus_low_power_state
== DHD_BUS_D3_ACK_RECIEVED
) {
1947 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
1948 (bus
->sih
->buscorerev
== 4)) {
1949 dhpcie_bus_unmask_interrupt(bus
);
1951 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_mask
,
1952 bus
->def_intmask
, bus
->def_intmask
);
1958 DHD_TRACE(("%s Exit\n", __FUNCTION__
));
1961 /* Non atomic function, caller should hold appropriate lock */
1963 dhdpcie_bus_intr_disable(dhd_bus_t
*bus
)
1965 DHD_TRACE(("%s Enter\n", __FUNCTION__
));
1966 if (bus
&& bus
->sih
&& !bus
->is_linkdown
) {
1967 /* Skip after recieving D3 ACK */
1968 if (DHD_CHK_BUS_LPS_D3_ACKED(bus
)) {
1972 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
1973 (bus
->sih
->buscorerev
== 4)) {
1974 dhpcie_bus_mask_interrupt(bus
);
1976 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_mask
,
1977 bus
->def_intmask
, 0);
1981 DHD_TRACE(("%s Exit\n", __FUNCTION__
));
1985 * dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
1986 * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
1987 * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
1988 * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
1989 * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
1992 dhdpcie_advertise_bus_cleanup(dhd_pub_t
*dhdp
)
1994 unsigned long flags
;
1997 #ifdef DHD_PCIE_RUNTIMEPM
1998 dhdpcie_runtime_bus_wake(dhdp
, TRUE
, dhdpcie_advertise_bus_cleanup
);
1999 #endif /* DHD_PCIE_RUNTIMEPM */
2001 dhdp
->dhd_watchdog_ms_backup
= dhd_watchdog_ms
;
2002 if (dhdp
->dhd_watchdog_ms_backup
) {
2003 DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n",
2005 dhd_os_wd_timer(dhdp
, 0);
2007 if (dhdp
->busstate
!= DHD_BUS_DOWN
) {
2008 #ifdef DHD_DONGLE_TRAP_IN_DETACH
2010 * For x86 platforms, rmmod/insmod is failing due to some power
2011 * resources are not held high.
2012 * Hence induce DB7 trap during detach and in FW trap handler all
2013 * power resources are held high.
2015 if (!dhd_query_bus_erros(dhdp
) && dhdp
->db7_trap
.fw_db7w_trap
) {
2016 dhdp
->db7_trap
.fw_db7w_trap_inprogress
= TRUE
;
2017 dhdpcie_fw_trap(dhdp
->bus
);
2018 OSL_DELAY(100 * 1000); // wait 100 msec
2019 dhdp
->db7_trap
.fw_db7w_trap_inprogress
= FALSE
;
2021 DHD_ERROR(("%s: DB7 Not sent!!!\n",
2024 #endif /* DHD_DONGLE_TRAP_IN_DETACH */
2025 DHD_GENERAL_LOCK(dhdp
, flags
);
2026 dhdp
->busstate
= DHD_BUS_DOWN_IN_PROGRESS
;
2027 DHD_GENERAL_UNLOCK(dhdp
, flags
);
2030 timeleft
= dhd_os_busbusy_wait_negation(dhdp
, &dhdp
->dhd_bus_busy_state
);
2031 if ((timeleft
== 0) || (timeleft
== 1)) {
2032 /* XXX This condition ideally should not occur, this means some
2033 * bus usage context is not clearing the respective usage bit, print
2034 * dhd_bus_busy_state and crash the host for further debugging.
2036 DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
2037 __FUNCTION__
, dhdp
->dhd_bus_busy_state
));
2045 dhdpcie_advertise_bus_remove(dhd_pub_t
*dhdp
)
2047 unsigned long flags
;
2050 DHD_GENERAL_LOCK(dhdp
, flags
);
2051 dhdp
->busstate
= DHD_BUS_REMOVE
;
2052 DHD_GENERAL_UNLOCK(dhdp
, flags
);
2054 timeleft
= dhd_os_busbusy_wait_negation(dhdp
, &dhdp
->dhd_bus_busy_state
);
2055 if ((timeleft
== 0) || (timeleft
== 1)) {
2056 DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
2057 __FUNCTION__
, dhdp
->dhd_bus_busy_state
));
2065 dhdpcie_bus_remove_prep(dhd_bus_t
*bus
)
2067 unsigned long flags
;
2068 DHD_TRACE(("%s Enter\n", __FUNCTION__
));
2070 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
2071 DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__
));
2072 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
2073 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
2076 /* De-Initialize the lock to serialize Device Wake Inband activities */
2077 if (bus
->inb_lock
) {
2078 osl_spin_lock_deinit(bus
->dhd
->osh
, bus
->inb_lock
);
2079 bus
->inb_lock
= NULL
;
2083 dhd_os_sdlock(bus
->dhd
);
2085 if (bus
->sih
&& !bus
->dhd
->dongle_isolation
) {
2087 dhd_bus_pcie_pwr_req_reload_war(bus
);
2089 /* Skip below WARs for Android as insmod fails after rmmod in Brix Android */
2091 /* if the pcie link is down, watchdog reset
2092 * should not be done, as it may hang
2095 if (!bus
->is_linkdown
) {
2096 /* For Non-EFI modular builds, do dongle reset during rmmod */
2097 /* For EFI-DHD this compile flag will be defined.
2098 * In EFI, depending on bt over pcie mode
2099 * we either power toggle or do F0 FLR
2100 * from dhdpcie_bus_release dongle. So no need to
2101 * do dongle reset from here
2103 dhdpcie_dongle_reset(bus
);
2106 bus
->dhd
->is_pcie_watchdog_reset
= TRUE
;
2109 dhd_os_sdunlock(bus
->dhd
);
2111 DHD_TRACE(("%s Exit\n", __FUNCTION__
));
2115 dhd_init_bus_lp_state_lock(dhd_bus_t
*bus
)
2117 if (!bus
->bus_lp_state_lock
) {
2118 bus
->bus_lp_state_lock
= osl_spin_lock_init(bus
->osh
);
2123 dhd_deinit_bus_lp_state_lock(dhd_bus_t
*bus
)
2125 if (bus
->bus_lp_state_lock
) {
2126 osl_spin_lock_deinit(bus
->osh
, bus
->bus_lp_state_lock
);
2127 bus
->bus_lp_state_lock
= NULL
;
2132 dhd_init_backplane_access_lock(dhd_bus_t
*bus
)
2134 if (!bus
->backplane_access_lock
) {
2135 bus
->backplane_access_lock
= osl_spin_lock_init(bus
->osh
);
2140 dhd_deinit_backplane_access_lock(dhd_bus_t
*bus
)
2142 if (bus
->backplane_access_lock
) {
2143 osl_spin_lock_deinit(bus
->osh
, bus
->backplane_access_lock
);
2144 bus
->backplane_access_lock
= NULL
;
2148 /** Detach and free everything */
2150 dhdpcie_bus_release(dhd_bus_t
*bus
)
2152 bool dongle_isolation
= FALSE
;
2155 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
2163 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
2165 #endif /* DEBUGGER || DHD_DSCOPE */
2166 dhdpcie_advertise_bus_remove(bus
->dhd
);
2167 dongle_isolation
= bus
->dhd
->dongle_isolation
;
2168 bus
->dhd
->is_pcie_watchdog_reset
= FALSE
;
2169 dhdpcie_bus_remove_prep(bus
);
2172 dhdpcie_bus_intr_disable(bus
);
2173 dhdpcie_free_irq(bus
);
2175 dhd_deinit_bus_lp_state_lock(bus
);
2176 dhd_deinit_bar1_switch_lock(bus
);
2177 dhd_deinit_backplane_access_lock(bus
);
2178 dhd_deinit_pwr_req_lock(bus
);
2180 * dhdpcie_bus_release_dongle free bus->sih handle, which is needed to
2181 * access Dongle registers.
2182 * dhd_detach will communicate with dongle to delete flowring ..etc.
2183 * So dhdpcie_bus_release_dongle should be called only after the dhd_detach.
2185 dhd_detach(bus
->dhd
);
2186 dhdpcie_bus_release_dongle(bus
, osh
, dongle_isolation
, TRUE
);
2190 /* unmap the regs and tcm here!! */
2192 dhdpcie_bus_reg_unmap(osh
, bus
->regs
, DONGLE_REG_MAP_SIZE
);
2196 dhdpcie_bus_reg_unmap(osh
, bus
->tcm
, DONGLE_TCM_MAP_SIZE
);
2200 dhdpcie_bus_release_malloc(bus
, osh
);
2201 /* Detach pcie shared structure */
2203 MFREE(osh
, bus
->pcie_sh
, sizeof(pciedev_shared_t
));
2206 if (bus
->console
.buf
!= NULL
) {
2207 MFREE(osh
, bus
->console
.buf
, bus
->console
.bufsize
);
2210 /* Finally free bus info */
2211 MFREE(osh
, bus
, sizeof(dhd_bus_t
));
2216 DHD_TRACE(("%s: Exit\n", __FUNCTION__
));
2217 } /* dhdpcie_bus_release */
2220 dhdpcie_bus_release_dongle(dhd_bus_t
*bus
, osl_t
*osh
, bool dongle_isolation
, bool reset_flag
)
2222 DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__
,
2223 bus
->dhd
, bus
->dhd
->dongle_reset
));
2225 if ((bus
->dhd
&& bus
->dhd
->dongle_reset
) && reset_flag
) {
2226 DHD_TRACE(("%s Exit\n", __FUNCTION__
));
2230 if (bus
->is_linkdown
) {
2231 DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__
));
2238 * Perform dongle reset only if dongle isolation is not enabled.
2239 * In android platforms, dongle isolation will be enabled and
2240 * quiescing dongle will be done using DB7 trap.
2242 if (!dongle_isolation
&&
2243 bus
->dhd
&& !bus
->dhd
->is_pcie_watchdog_reset
) {
2244 dhdpcie_dongle_reset(bus
);
2247 /* Only for EFI this will be effective */
2248 dhdpcie_dongle_flr_or_pwr_toggle(bus
);
2250 if (bus
->ltrsleep_on_unload
) {
2251 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
2252 OFFSETOF(sbpcieregs_t
, u
.pcie2
.ltr_state
), ~0, 0);
2255 if (bus
->sih
->buscorerev
== 13)
2256 pcie_serdes_iddqdisable(bus
->osh
, bus
->sih
,
2257 (sbpcieregs_t
*) bus
->regs
);
2259 /* For inbuilt drivers pcie clk req will be done by RC,
2260 * so do not do clkreq from dhd
2262 if (dhd_download_fw_on_driverload
)
2264 /* Disable CLKREQ# */
2265 dhdpcie_clkreq(bus
->osh
, 1, 0);
2268 if (bus
->sih
!= NULL
) {
2269 si_detach(bus
->sih
);
2272 if (bus
->vars
&& bus
->varsz
)
2273 MFREE(osh
, bus
->vars
, bus
->varsz
);
2277 DHD_TRACE(("%s Exit\n", __FUNCTION__
));
2281 dhdpcie_bus_cfg_read_dword(dhd_bus_t
*bus
, uint32 addr
, uint32 size
)
2283 uint32 data
= OSL_PCI_READ_CONFIG(bus
->osh
, addr
, size
);
2287 /** 32 bit config write */
2289 dhdpcie_bus_cfg_write_dword(dhd_bus_t
*bus
, uint32 addr
, uint32 size
, uint32 data
)
2291 OSL_PCI_WRITE_CONFIG(bus
->osh
, addr
, size
, data
);
2295 dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t
*bus
, uint32 data
)
2297 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCI_BAR0_WIN
, 4, data
);
2301 dhdpcie_bus_dongle_setmemsize(struct dhd_bus
*bus
, int mem_size
)
2303 int32 min_size
= DONGLE_MIN_MEMSIZE
;
2304 /* Restrict the memsize to user specified limit */
2305 DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d max accepted %d\n",
2306 mem_size
, min_size
, (int32
)bus
->orig_ramsize
));
2307 if ((mem_size
> min_size
) &&
2308 (mem_size
< (int32
)bus
->orig_ramsize
)) {
2309 bus
->ramsize
= mem_size
;
2311 DHD_ERROR(("%s: Invalid mem_size %d\n", __FUNCTION__
, mem_size
));
2316 dhdpcie_bus_release_malloc(dhd_bus_t
*bus
, osl_t
*osh
)
2318 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
2320 if (bus
->dhd
&& bus
->dhd
->dongle_reset
)
2323 if (bus
->vars
&& bus
->varsz
) {
2324 MFREE(osh
, bus
->vars
, bus
->varsz
);
2327 DHD_TRACE(("%s: Exit\n", __FUNCTION__
));
2332 /** Stop bus module: clear pending frames, disable data flow */
2333 void dhd_bus_stop(struct dhd_bus
*bus
, bool enforce_mutex
)
2335 unsigned long flags
;
2337 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
2342 if (bus
->dhd
->busstate
== DHD_BUS_DOWN
) {
2343 DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__
));
2347 DHD_STOP_RPM_TIMER(bus
->dhd
);
2349 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
2350 DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__
));
2351 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
2352 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
2354 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2355 atomic_set(&bus
->dhd
->block_bus
, TRUE
);
2356 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2358 dhdpcie_bus_intr_disable(bus
);
2360 if (!bus
->is_linkdown
) {
2362 status
= dhdpcie_bus_cfg_read_dword(bus
, PCIIntstatus
, 4);
2363 dhdpcie_bus_cfg_write_dword(bus
, PCIIntstatus
, 4, status
);
2366 if (!dhd_download_fw_on_driverload
) {
2367 dhd_dpc_kill(bus
->dhd
);
2370 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2371 pm_runtime_disable(dhd_bus_to_dev(bus
));
2372 pm_runtime_set_suspended(dhd_bus_to_dev(bus
));
2373 pm_runtime_enable(dhd_bus_to_dev(bus
));
2374 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2376 /* Clear rx control and wake any waiters */
2377 /* XXX More important in disconnect, but no context? */
2378 dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT
);
2379 dhd_wakeup_ioctl_event(bus
->dhd
, IOCTL_RETURN_ON_BUS_STOP
);
2386 * Watchdog timer function.
2387 * @param dhd Represents a specific hardware (dongle) instance that this DHD manages
2389 bool dhd_bus_watchdog(dhd_pub_t
*dhd
)
2391 unsigned long flags
;
2392 dhd_bus_t
*bus
= dhd
->bus
;
2394 if (dhd_query_bus_erros(bus
->dhd
)) {
2398 DHD_GENERAL_LOCK(dhd
, flags
);
2399 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd
) ||
2400 DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd
)) {
2401 DHD_GENERAL_UNLOCK(dhd
, flags
);
2404 DHD_BUS_BUSY_SET_IN_WD(dhd
);
2405 DHD_GENERAL_UNLOCK(dhd
, flags
);
2407 #ifdef DHD_PCIE_RUNTIMEPM
2408 dhdpcie_runtime_bus_wake(dhd
, TRUE
, __builtin_return_address(0));
2409 #endif /* DHD_PCIE_RUNTIMEPM */
2411 /* Poll for console output periodically */
2412 if (dhd
->busstate
== DHD_BUS_DATA
&&
2413 dhd
->dhd_console_ms
!= 0 &&
2414 DHD_CHK_BUS_NOT_IN_LPS(bus
)) {
2415 bus
->console
.count
+= dhd_watchdog_ms
;
2416 if (bus
->console
.count
>= dhd
->dhd_console_ms
) {
2417 bus
->console
.count
-= dhd
->dhd_console_ms
;
2419 if (MULTIBP_ENAB(bus
->sih
)) {
2420 dhd_bus_pcie_pwr_req(bus
);
2423 /* Make sure backplane clock is on */
2424 if (dhd
->db7_trap
.fw_db7w_trap_inprogress
== FALSE
) {
2425 if (dhdpcie_bus_readconsole(bus
) < 0) {
2426 dhd
->dhd_console_ms
= 0; /* On error, stop trying */
2430 if (MULTIBP_ENAB(bus
->sih
)) {
2431 dhd_bus_pcie_pwr_req_clear(bus
);
2436 #ifdef DHD_READ_INTSTATUS_IN_DPC
2439 bus
->dpc_sched
= TRUE
;
2440 dhd_sched_dpc(bus
->dhd
); /* queue DPC now!! */
2442 #endif /* DHD_READ_INTSTATUS_IN_DPC */
2444 DHD_GENERAL_LOCK(dhd
, flags
);
2445 DHD_BUS_BUSY_CLEAR_IN_WD(dhd
);
2446 dhd_os_busbusy_wake(dhd
);
2447 DHD_GENERAL_UNLOCK(dhd
, flags
);
2448 #if !defined(DHD_PCIE_RUNTIMEPM) && defined(PCIE_INB_DW)
2449 dhd
->bus
->inb_dw_deassert_cnt
+= dhd_watchdog_ms
;
2450 if (dhd
->bus
->inb_dw_deassert_cnt
>=
2451 DHD_INB_DW_DEASSERT_MS
) {
2452 dhd
->bus
->inb_dw_deassert_cnt
= 0;
2453 /* Inband device wake is deasserted from DPC context after DS_Exit is received,
2454 * but if at all there is no d2h interrupt received, dpc will not be scheduled
2455 * and inband DW is not deasserted, hence DW is deasserted from watchdog thread
2458 dhd_bus_dw_deassert(dhd
);
2462 } /* dhd_bus_watchdog */
2464 #if defined(SUPPORT_MULTIPLE_REVISION)
2465 static int concate_revision_bcm4358(dhd_bus_t
*bus
, char *fw_path
, char *nv_path
)
2468 #if defined(SUPPORT_MULTIPLE_CHIPS)
2469 char chipver_tag
[20] = "_4358";
2471 char chipver_tag
[10] = {0, };
2472 #endif /* SUPPORT_MULTIPLE_CHIPS */
2474 chiprev
= dhd_bus_chiprev(bus
);
2476 DHD_ERROR(("----- CHIP 4358 A0 -----\n"));
2477 strcat(chipver_tag
, "_a0");
2478 } else if (chiprev
== 1) {
2479 DHD_ERROR(("----- CHIP 4358 A1 -----\n"));
2480 #if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS)
2481 strcat(chipver_tag
, "_a1");
2482 #endif /* defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) */
2483 } else if (chiprev
== 3) {
2484 DHD_ERROR(("----- CHIP 4358 A3 -----\n"));
2485 #if defined(SUPPORT_MULTIPLE_CHIPS)
2486 strcat(chipver_tag
, "_a3");
2487 #endif /* SUPPORT_MULTIPLE_CHIPS */
2489 DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev
));
2492 strcat(fw_path
, chipver_tag
);
2494 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
2495 if (chiprev
== 1 || chiprev
== 3) {
2496 int ret
= dhd_check_module_b85a();
2497 if ((chiprev
== 1) && (ret
< 0)) {
2498 memset(chipver_tag
, 0x00, sizeof(chipver_tag
));
2499 strcat(chipver_tag
, "_b85");
2500 strcat(chipver_tag
, "_a1");
2504 DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__
, chipver_tag
));
2505 #endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
2507 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
2508 if (system_rev
>= 10) {
2509 DHD_ERROR(("----- Board Rev [%d]-----\n", system_rev
));
2510 strcat(chipver_tag
, "_r10");
2512 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
2513 strcat(nv_path
, chipver_tag
);
2518 static int concate_revision_bcm4359(dhd_bus_t
*bus
, char *fw_path
, char *nv_path
)
2521 char chipver_tag
[10] = {0, };
2522 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2523 defined(SUPPORT_BCM4359_MIXED_MODULES)
2524 int module_type
= -1;
2525 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2527 chip_ver
= bus
->sih
->chiprev
;
2528 if (chip_ver
== 4) {
2529 DHD_ERROR(("----- CHIP 4359 B0 -----\n"));
2530 strncat(chipver_tag
, "_b0", strlen("_b0"));
2531 } else if (chip_ver
== 5) {
2532 DHD_ERROR(("----- CHIP 4359 B1 -----\n"));
2533 strncat(chipver_tag
, "_b1", strlen("_b1"));
2534 } else if (chip_ver
== 9) {
2535 DHD_ERROR(("----- CHIP 4359 C0 -----\n"));
2536 strncat(chipver_tag
, "_c0", strlen("_c0"));
2538 DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver
));
2542 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2543 defined(SUPPORT_BCM4359_MIXED_MODULES)
2544 module_type
= dhd_check_module_b90();
2546 switch (module_type
) {
2547 case BCM4359_MODULE_TYPE_B90B
:
2548 strcat(fw_path
, chipver_tag
);
2550 case BCM4359_MODULE_TYPE_B90S
:
2553 * .cid.info file not exist case,
2554 * loading B90S FW force for initial MFG boot up.
2556 if (chip_ver
== 5) {
2557 strncat(fw_path
, "_b90s", strlen("_b90s"));
2559 strcat(fw_path
, chipver_tag
);
2560 strcat(nv_path
, chipver_tag
);
2563 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2564 strcat(fw_path
, chipver_tag
);
2565 strcat(nv_path
, chipver_tag
);
2566 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2571 #if defined(USE_CID_CHECK)
2573 #define MAX_EXTENSION 20
2574 #define MODULE_BCM4361_INDEX 3
2575 #define CHIP_REV_A0 1
2576 #define CHIP_REV_A1 2
2577 #define CHIP_REV_B0 3
2578 #define CHIP_REV_B1 4
2579 #define CHIP_REV_B2 5
2580 #define CHIP_REV_C0 6
2581 #define BOARD_TYPE_EPA 0x080f
2582 #define BOARD_TYPE_IPA 0x0827
2583 #define BOARD_TYPE_IPA_OLD 0x081a
2584 #define DEFAULT_CIDINFO_FOR_EPA "r00a_e000_a0_ePA"
2585 #define DEFAULT_CIDINFO_FOR_IPA "r00a_e000_a0_iPA"
2586 #define DEFAULT_CIDINFO_FOR_A1 "r01a_e30a_a1"
2587 #define DEFAULT_CIDINFO_FOR_B0 "r01i_e32_b0"
2588 #define MAX_VID_LEN 8
2589 #define CIS_TUPLE_HDR_LEN 2
2590 #if defined(BCM4375_CHIP)
2591 #define CIS_TUPLE_START_ADDRESS 0x18011120
2592 #define CIS_TUPLE_END_ADDRESS 0x18011177
2593 #elif defined(BCM4389_CHIP_DEF)
2594 /* 4389A0 CIS tuple start address is different with 4389B0
2595 * due to OTP layout is changed from 4389B0
2597 #define CIS_TUPLE_START_ADDRESS 0x1801113C
2598 #define CIS_TUPLE_END_ADDRESS 0x18011193
2599 #define CIS_TUPLE_START_ADDRESS_89B0 0x18011058
2600 #define CIS_TUPLE_END_ADDRESS_89B0 0x180110AF
2602 #define CIS_TUPLE_START_ADDRESS 0x18011110
2603 #define CIS_TUPLE_END_ADDRESS 0x18011167
2604 #endif /* defined(BCM4375_CHIP) */
2605 #define CIS_TUPLE_MAX_COUNT (uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\
2606 + 1) / sizeof(uint32))
2607 #define CIS_TUPLE_TAG_START 0x80
2608 #define CIS_TUPLE_TAG_VENDOR 0x81
2609 #define CIS_TUPLE_TAG_BOARDTYPE 0x1b
2610 #define CIS_TUPLE_TAG_LENGTH 1
2611 #define NVRAM_FEM_MURATA "_murata"
2612 #define CID_FEM_MURATA "_mur_"
2614 typedef struct cis_tuple_format
{
2616 uint8 len
; /* total length of tag and data */
2619 } cis_tuple_format_t
;
2622 char cid_ext
[MAX_EXTENSION
];
2623 char nvram_ext
[MAX_EXTENSION
];
2624 char fw_ext
[MAX_EXTENSION
];
2627 naming_info_t bcm4361_naming_table
[] = {
2628 { {""}, {""}, {""} },
2629 { {"r00a_e000_a0_ePA"}, {"_a0_ePA"}, {"_a0_ePA"} },
2630 { {"r00a_e000_a0_iPA"}, {"_a0"}, {"_a1"} },
2631 { {"r01a_e30a_a1"}, {"_r01a_a1"}, {"_a1"} },
2632 { {"r02a_e30a_a1"}, {"_r02a_a1"}, {"_a1"} },
2633 { {"r02c_e30a_a1"}, {"_r02c_a1"}, {"_a1"} },
2634 { {"r01d_e31_b0"}, {"_r01d_b0"}, {"_b0"} },
2635 { {"r01f_e31_b0"}, {"_r01f_b0"}, {"_b0"} },
2636 { {"r02g_e31_b0"}, {"_r02g_b0"}, {"_b0"} },
2637 { {"r01h_e32_b0"}, {"_r01h_b0"}, {"_b0"} },
2638 { {"r01i_e32_b0"}, {"_r01i_b0"}, {"_b0"} },
2639 { {"r02j_e32_b0"}, {"_r02j_b0"}, {"_b0"} },
2640 { {"r012_1kl_a1"}, {"_r012_a1"}, {"_a1"} },
2641 { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
2642 { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
2643 { {"r014_1kl_b0"}, {"_r014_b0"}, {"_b0"} },
2644 { {"r015_1kl_b0"}, {"_r015_b0"}, {"_b0"} },
2645 { {"r020_1kl_b0"}, {"_r020_b0"}, {"_b0"} },
2646 { {"r021_1kl_b0"}, {"_r021_b0"}, {"_b0"} },
2647 { {"r022_1kl_b0"}, {"_r022_b0"}, {"_b0"} },
2648 { {"r023_1kl_b0"}, {"_r023_b0"}, {"_b0"} },
2649 { {"r024_1kl_b0"}, {"_r024_b0"}, {"_b0"} },
2650 { {"r030_1kl_b0"}, {"_r030_b0"}, {"_b0"} },
2651 { {"r031_1kl_b0"}, {"_r030_b0"}, {"_b0"} }, /* exceptional case : r31 -> r30 */
2652 { {"r032_1kl_b0"}, {"_r032_b0"}, {"_b0"} },
2653 { {"r033_1kl_b0"}, {"_r033_b0"}, {"_b0"} },
2654 { {"r034_1kl_b0"}, {"_r034_b0"}, {"_b0"} },
2655 { {"r02a_e32a_b2"}, {"_r02a_b2"}, {"_b2"} },
2656 { {"r02b_e32a_b2"}, {"_r02b_b2"}, {"_b2"} },
2657 { {"r020_1qw_b2"}, {"_r020_b2"}, {"_b2"} },
2658 { {"r021_1qw_b2"}, {"_r021_b2"}, {"_b2"} },
2659 { {"r022_1qw_b2"}, {"_r022_b2"}, {"_b2"} },
2660 { {"r031_1qw_b2"}, {"_r031_b2"}, {"_b2"} }
2663 #define MODULE_BCM4375_INDEX 3
2665 naming_info_t bcm4375_naming_table
[] = {
2666 { {""}, {""}, {""} },
2667 { {"e41_es11"}, {"_ES00_semco_b0"}, {"_b0"} },
2668 { {"e43_es33"}, {"_ES01_semco_b0"}, {"_b0"} },
2669 { {"e43_es34"}, {"_ES02_semco_b0"}, {"_b0"} },
2670 { {"e43_es35"}, {"_ES02_semco_b0"}, {"_b0"} },
2671 { {"e43_es36"}, {"_ES03_semco_b0"}, {"_b0"} },
2672 { {"e43_cs41"}, {"_CS00_semco_b1"}, {"_b1"} },
2673 { {"e43_cs51"}, {"_CS01_semco_b1"}, {"_b1"} },
2674 { {"e43_cs53"}, {"_CS01_semco_b1"}, {"_b1"} },
2675 { {"e43_cs61"}, {"_CS00_skyworks_b1"}, {"_b1"} },
2676 { {"1rh_es10"}, {"_1rh_es10_b0"}, {"_b0"} },
2677 { {"1rh_es11"}, {"_1rh_es11_b0"}, {"_b0"} },
2678 { {"1rh_es12"}, {"_1rh_es12_b0"}, {"_b0"} },
2679 { {"1rh_es13"}, {"_1rh_es13_b0"}, {"_b0"} },
2680 { {"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"} },
2681 { {"1rh_es32"}, {"_1rh_es32_b0"}, {"_b0"} },
2682 { {"1rh_es41"}, {"_1rh_es41_b1"}, {"_b1"} },
2683 { {"1rh_es42"}, {"_1rh_es42_b1"}, {"_b1"} },
2684 { {"1rh_es43"}, {"_1rh_es43_b1"}, {"_b1"} },
2685 { {"1rh_es44"}, {"_1rh_es44_b1"}, {"_b1"} }
2688 #define MODULE_BCM4389_INDEX 3
2690 naming_info_t bcm4389_naming_table
[] = {
2691 { {""}, {""}, {""} },
2692 { {"e51_es11"}, {"_ES01_semco_a0"}, {"_a0"} },
2693 { {"e51_es12"}, {"_ES02_semco_a0"}, {"_a0"} },
2694 { {"e53_es23"}, {"_ES10_semco_b0"}, {"_b0"} },
2695 { {"1wk_es21"}, {"_1wk_es21_b0"}, {"_b0"} },
2696 { {"1wk_es30"}, {"_1wk_es30_b0"}, {"_b0"} },
2697 { {"1wk_es31"}, {"_1wk_es31_b0"}, {"_b0"} }
2700 #if defined(BCM4361_CHIP) || defined(BCM4375_CHIP) || defined(BCM4389_CHIP_DEF)
2701 static naming_info_t
*
2702 dhd_find_naming_info(naming_info_t table
[], int table_size
, char *module_type
)
2704 int index_found
= 0, i
= 0;
2706 if (module_type
&& strlen(module_type
) > 0) {
2707 for (i
= 1; i
< table_size
; i
++) {
2708 if (!strncmp(table
[i
].cid_ext
, module_type
, strlen(table
[i
].cid_ext
))) {
2715 DHD_INFO(("%s: index_found=%d\n", __FUNCTION__
, index_found
));
2717 return &table
[index_found
];
2720 static naming_info_t
*
2721 dhd_find_naming_info_by_cid(naming_info_t table
[], int table_size
,
2724 int index_found
= 0, i
= 0;
2727 /* truncate extension */
2728 for (i
= 1, ptr
= cid_info
; i
< MODULE_BCM4361_INDEX
&& ptr
; i
++) {
2729 ptr
= bcmstrstr(ptr
, "_");
2735 for (i
= 1; i
< table_size
&& ptr
; i
++) {
2736 if (!strncmp(table
[i
].cid_ext
, ptr
, strlen(table
[i
].cid_ext
))) {
2742 DHD_INFO(("%s: index_found=%d\n", __FUNCTION__
, index_found
));
2744 return &table
[index_found
];
2748 dhd_parse_board_information_bcm(dhd_bus_t
*bus
, int *boardtype
,
2749 unsigned char *vid
, int *vid_length
)
2751 int boardtype_backplane_addr
[] = {
2752 0x18010324, /* OTP Control 1 */
2753 0x18012618, /* PMU min resource mask */
2755 int boardtype_backplane_data
[] = {
2757 0x0e4fffff /* Keep on ARMHTAVAIL */
2759 int int_val
= 0, i
= 0;
2760 cis_tuple_format_t
*tuple
;
2762 uint32 raw_data
[CIS_TUPLE_MAX_COUNT
];
2763 uint32 cis_start_addr
= CIS_TUPLE_START_ADDRESS
;
2764 #ifdef BCM4389_CHIP_DEF
2765 uint chipid
= dhd_bus_chip_id(bus
->dhd
);
2766 uint revid
= dhd_bus_chiprev_id(bus
->dhd
);
2768 if ((BCM4389_CHIP_GRPID
== chipid
) && (revid
== 1)) {
2769 cis_start_addr
= CIS_TUPLE_START_ADDRESS_89B0
;
2771 DHD_INFO(("%s : chipid :%u, revid %u\n", __FUNCTION__
, chipid
, revid
));
2772 #endif /* BCM4389_CHIP_DEF */
2773 for (i
= 0; i
< ARRAYSIZE(boardtype_backplane_addr
); i
++) {
2774 /* Write new OTP and PMU configuration */
2775 if (si_backplane_access(bus
->sih
, boardtype_backplane_addr
[i
], sizeof(int),
2776 &boardtype_backplane_data
[i
], FALSE
) != BCME_OK
) {
2777 DHD_ERROR(("invalid size/addr combination\n"));
2781 if (si_backplane_access(bus
->sih
, boardtype_backplane_addr
[i
], sizeof(int),
2782 &int_val
, TRUE
) != BCME_OK
) {
2783 DHD_ERROR(("invalid size/addr combination\n"));
2787 DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n",
2788 __FUNCTION__
, boardtype_backplane_addr
[i
], int_val
));
2791 /* read tuple raw data */
2792 for (i
= 0; i
< CIS_TUPLE_MAX_COUNT
; i
++) {
2793 if (si_backplane_access(bus
->sih
, cis_start_addr
+ i
* sizeof(uint32
),
2794 sizeof(uint32
), &raw_data
[i
], TRUE
) != BCME_OK
) {
2797 DHD_INFO(("%s: tuple index %d, raw data 0x%08x\n", __FUNCTION__
, i
, raw_data
[i
]));
2800 totlen
= i
* sizeof(uint32
);
2801 tuple
= (cis_tuple_format_t
*)raw_data
;
2803 /* check the first tuple has tag 'start' */
2804 if (tuple
->id
!= CIS_TUPLE_TAG_START
) {
2805 DHD_ERROR(("%s: Can not find the TAG\n", __FUNCTION__
));
2809 *vid_length
= *boardtype
= 0;
2811 /* find tagged parameter */
2812 while ((totlen
>= (tuple
->len
+ CIS_TUPLE_HDR_LEN
)) &&
2813 (*vid_length
== 0 || *boardtype
== 0)) {
2816 if ((tuple
->tag
== CIS_TUPLE_TAG_VENDOR
) &&
2817 (totlen
>= (int)(len
+ CIS_TUPLE_HDR_LEN
))) {
2819 memcpy(vid
, tuple
->data
, tuple
->len
- CIS_TUPLE_TAG_LENGTH
);
2820 *vid_length
= tuple
->len
- CIS_TUPLE_TAG_LENGTH
;
2821 prhex("OTP VID", tuple
->data
, tuple
->len
- CIS_TUPLE_TAG_LENGTH
);
2823 else if ((tuple
->tag
== CIS_TUPLE_TAG_BOARDTYPE
) &&
2824 (totlen
>= (int)(len
+ CIS_TUPLE_HDR_LEN
))) {
2825 /* found boardtype */
2826 *boardtype
= (int)tuple
->data
[0];
2827 prhex("OTP boardtype", tuple
->data
, tuple
->len
- CIS_TUPLE_TAG_LENGTH
);
2830 tuple
= (cis_tuple_format_t
*)((uint8
*)tuple
+ (len
+ CIS_TUPLE_HDR_LEN
));
2831 totlen
-= (len
+ CIS_TUPLE_HDR_LEN
);
2834 if (*vid_length
<= 0 || *boardtype
<= 0) {
2835 DHD_ERROR(("failed to parse information (vid=%d, boardtype=%d)\n",
2836 *vid_length
, *boardtype
));
2844 static naming_info_t
*
2845 dhd_find_naming_info_by_chip_rev(naming_info_t table
[], int table_size
,
2846 dhd_bus_t
*bus
, bool *is_murata_fem
)
2848 int board_type
= 0, chip_rev
= 0, vid_length
= 0;
2849 unsigned char vid
[MAX_VID_LEN
];
2850 naming_info_t
*info
= &table
[0];
2851 char *cid_info
= NULL
;
2853 if (!bus
|| !bus
->sih
) {
2854 DHD_ERROR(("%s:bus(%p) or bus->sih is NULL\n", __FUNCTION__
, bus
));
2857 chip_rev
= bus
->sih
->chiprev
;
2859 if (dhd_parse_board_information_bcm(bus
, &board_type
, vid
, &vid_length
)
2861 DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__
));
2865 DHD_INFO(("%s:chip version %d\n", __FUNCTION__
, chip_rev
));
2867 #if defined(BCM4361_CHIP)
2868 /* A0 chipset has exception only */
2869 if (chip_rev
== CHIP_REV_A0
) {
2870 if (board_type
== BOARD_TYPE_EPA
) {
2871 info
= dhd_find_naming_info(table
, table_size
,
2872 DEFAULT_CIDINFO_FOR_EPA
);
2873 } else if ((board_type
== BOARD_TYPE_IPA
) ||
2874 (board_type
== BOARD_TYPE_IPA_OLD
)) {
2875 info
= dhd_find_naming_info(table
, table_size
,
2876 DEFAULT_CIDINFO_FOR_IPA
);
2879 cid_info
= dhd_get_cid_info(vid
, vid_length
);
2881 info
= dhd_find_naming_info_by_cid(table
, table_size
, cid_info
);
2882 if (strstr(cid_info
, CID_FEM_MURATA
)) {
2883 *is_murata_fem
= TRUE
;
2888 cid_info
= dhd_get_cid_info(vid
, vid_length
);
2890 info
= dhd_find_naming_info_by_cid(table
, table_size
, cid_info
);
2891 if (strstr(cid_info
, CID_FEM_MURATA
)) {
2892 *is_murata_fem
= TRUE
;
2895 #endif /* BCM4361_CHIP */
2899 #endif /* BCM4361_CHIP || BCM4375_CHIP || BCM4389_CHIP_DEF */
2900 #endif /* USE_CID_CHECK */
2903 concate_revision_bcm4361(dhd_bus_t
*bus
, char *fw_path
, char *nv_path
)
2906 #if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
2907 char module_type
[MAX_VNAME_LEN
];
2908 naming_info_t
*info
= NULL
;
2909 bool is_murata_fem
= FALSE
;
2911 memset(module_type
, 0, sizeof(module_type
));
2913 if (dhd_check_module_bcm(module_type
,
2914 MODULE_BCM4361_INDEX
, &is_murata_fem
) == BCME_OK
) {
2915 info
= dhd_find_naming_info(bcm4361_naming_table
,
2916 ARRAYSIZE(bcm4361_naming_table
), module_type
);
2918 /* in case of .cid.info doesn't exists */
2919 info
= dhd_find_naming_info_by_chip_rev(bcm4361_naming_table
,
2920 ARRAYSIZE(bcm4361_naming_table
), bus
, &is_murata_fem
);
2923 if (bcmstrnstr(nv_path
, PATH_MAX
, "_murata", 7)) {
2924 is_murata_fem
= FALSE
;
2928 if (is_murata_fem
) {
2929 strncat(nv_path
, NVRAM_FEM_MURATA
, strlen(NVRAM_FEM_MURATA
));
2931 strncat(nv_path
, info
->nvram_ext
, strlen(info
->nvram_ext
));
2932 strncat(fw_path
, info
->fw_ext
, strlen(info
->fw_ext
));
2934 DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__
));
2937 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2938 char chipver_tag
[10] = {0, };
2940 strcat(fw_path
, chipver_tag
);
2941 strcat(nv_path
, chipver_tag
);
2942 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2948 concate_revision_bcm4375(dhd_bus_t
*bus
, char *fw_path
, char *nv_path
)
2951 #if defined(SUPPORT_BCM4375_MIXED_MODULES) && defined(USE_CID_CHECK)
2952 char module_type
[MAX_VNAME_LEN
];
2953 naming_info_t
*info
= NULL
;
2954 bool is_murata_fem
= FALSE
;
2956 memset(module_type
, 0, sizeof(module_type
));
2958 if (dhd_check_module_bcm(module_type
,
2959 MODULE_BCM4375_INDEX
, &is_murata_fem
) == BCME_OK
) {
2960 info
= dhd_find_naming_info(bcm4375_naming_table
,
2961 ARRAYSIZE(bcm4375_naming_table
), module_type
);
2963 /* in case of .cid.info doesn't exists */
2964 info
= dhd_find_naming_info_by_chip_rev(bcm4375_naming_table
,
2965 ARRAYSIZE(bcm4375_naming_table
), bus
, &is_murata_fem
);
2969 strncat(nv_path
, info
->nvram_ext
, strlen(info
->nvram_ext
));
2970 strncat(fw_path
, info
->fw_ext
, strlen(info
->fw_ext
));
2972 DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__
));
2975 #else /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
2976 char chipver_tag
[10] = {0, };
2978 strcat(fw_path
, chipver_tag
);
2979 strcat(nv_path
, chipver_tag
);
2980 #endif /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
2986 concate_revision_bcm4389(dhd_bus_t
*bus
, char *fw_path
, char *nv_path
)
2989 #if defined(SUPPORT_BCM4389_MIXED_MODULES) && defined(USE_CID_CHECK)
2990 char module_type
[MAX_VNAME_LEN
];
2991 naming_info_t
*info
= NULL
;
2992 bool is_murata_fem
= FALSE
;
2994 memset(module_type
, 0, sizeof(module_type
));
2996 if (dhd_check_module_bcm(module_type
,
2997 MODULE_BCM4389_INDEX
, &is_murata_fem
) == BCME_OK
) {
2998 info
= dhd_find_naming_info(bcm4389_naming_table
,
2999 ARRAYSIZE(bcm4389_naming_table
), module_type
);
3001 /* in case of .cid.info doesn't exists */
3002 info
= dhd_find_naming_info_by_chip_rev(bcm4389_naming_table
,
3003 ARRAYSIZE(bcm4389_naming_table
), bus
, &is_murata_fem
);
3007 strncat(nv_path
, info
->nvram_ext
, strlen(info
->nvram_ext
));
3008 strncat(fw_path
, info
->fw_ext
, strlen(info
->fw_ext
));
3010 DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__
));
3013 #else /* SUPPORT_BCM4389_MIXED_MODULES && USE_CID_CHECK */
3014 char chipver_tag
[10] = {0, };
3016 strcat(fw_path
, chipver_tag
);
3017 strcat(nv_path
, chipver_tag
);
3018 #endif /* SUPPORT_BCM4389_MIXED_MODULES && USE_CID_CHECK */
3023 concate_revision(dhd_bus_t
*bus
, char *fw_path
, char *nv_path
)
3027 if (!bus
|| !bus
->sih
) {
3028 DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__
));
3032 if (!fw_path
|| !nv_path
) {
3033 DHD_ERROR(("fw_path or nv_path is null.\n"));
3037 switch (si_chipid(bus
->sih
)) {
3039 case BCM43569_CHIP_ID
:
3040 case BCM4358_CHIP_ID
:
3041 res
= concate_revision_bcm4358(bus
, fw_path
, nv_path
);
3043 case BCM4355_CHIP_ID
:
3044 case BCM4359_CHIP_ID
:
3045 res
= concate_revision_bcm4359(bus
, fw_path
, nv_path
);
3047 case BCM4361_CHIP_ID
:
3048 case BCM4347_CHIP_ID
:
3049 res
= concate_revision_bcm4361(bus
, fw_path
, nv_path
);
3051 case BCM4375_CHIP_ID
:
3052 res
= concate_revision_bcm4375(bus
, fw_path
, nv_path
);
3054 case BCM4389_CHIP_ID
:
3055 res
= concate_revision_bcm4389(bus
, fw_path
, nv_path
);
3058 DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
3064 #endif /* SUPPORT_MULTIPLE_REVISION */
3067 dhd_get_chipid(struct dhd_bus
*bus
)
3069 if (bus
&& bus
->sih
) {
3070 return (uint16
)si_chipid(bus
->sih
);
3071 } else if (bus
&& bus
->regs
) {
3072 chipcregs_t
*cc
= (chipcregs_t
*)bus
->regs
;
3075 /* Set bar0 window to si_enum_base */
3076 dhdpcie_bus_cfg_set_bar0_win(bus
, si_enum_base(0));
3078 w
= R_REG(bus
->osh
, &cc
->chipid
);
3079 chipid
= w
& CID_ID_MASK
;
3081 return (uint16
)chipid
;
3088 * Loads firmware given by caller supplied path and nvram image into PCIe dongle.
3090 * BCM_REQUEST_FW specific :
3091 * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
3092 * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
3094 * BCMEMBEDIMAGE specific:
3095 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
3096 * file will be used instead.
3098 * @return BCME_OK on success
3101 dhd_bus_download_firmware(struct dhd_bus
*bus
, osl_t
*osh
,
3102 char *pfw_path
, char *pnv_path
,
3103 char *pclm_path
, char *pconf_path
)
3107 bus
->fw_path
= pfw_path
;
3108 bus
->nv_path
= pnv_path
;
3109 bus
->dhd
->clm_path
= pclm_path
;
3110 bus
->dhd
->conf_path
= pconf_path
;
3112 #if defined(SUPPORT_MULTIPLE_REVISION)
3113 if (concate_revision(bus
, bus
->fw_path
, bus
->nv_path
) != 0) {
3114 DHD_ERROR(("%s: fail to concatnate revison \n",
3116 /* Proceed if SUPPORT_MULTIPLE_CHIPS is enabled */
3117 #ifndef SUPPORT_MULTIPLE_CHIPS
3119 #endif /* !SUPPORT_MULTIPLE_CHIPS */
3121 #endif /* SUPPORT_MULTIPLE_REVISION */
3123 #if defined(DHD_BLOB_EXISTENCE_CHECK)
3124 dhd_set_blob_support(bus
->dhd
, bus
->fw_path
);
3125 #endif /* DHD_BLOB_EXISTENCE_CHECK */
3127 DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
3128 __FUNCTION__
, bus
->fw_path
, bus
->nv_path
));
3129 dhdpcie_dump_resource(bus
);
3131 ret
= dhdpcie_download_firmware(bus
, osh
);
3137 dhd_set_bus_params(struct dhd_bus
*bus
)
3139 if (bus
->dhd
->conf
->dhd_poll
>= 0) {
3140 bus
->poll
= bus
->dhd
->conf
->dhd_poll
;
3143 printf("%s: set polling mode %d\n", __FUNCTION__
, bus
->dhd
->conf
->dhd_poll
);
3148 * Loads firmware given by 'bus->fw_path' into PCIe dongle.
3150 * BCM_REQUEST_FW specific :
3151 * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
3152 * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
3154 * BCMEMBEDIMAGE specific:
3155 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
3156 * file will be used instead.
3158 * @return BCME_OK on success
3161 dhdpcie_download_firmware(struct dhd_bus
*bus
, osl_t
*osh
)
3164 #if defined(BCM_REQUEST_FW)
3165 uint chipid
= bus
->sih
->chip
;
3166 uint revid
= bus
->sih
->chiprev
;
3167 char fw_path
[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */
3168 char nv_path
[64]; /* path to nvram vars file */
3169 bus
->fw_path
= fw_path
;
3170 bus
->nv_path
= nv_path
;
3172 case BCM43570_CHIP_ID
:
3173 bcmstrncat(fw_path
, "43570", 5);
3176 bcmstrncat(fw_path
, "a0", 2);
3179 bcmstrncat(fw_path
, "a2", 2);
3182 DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__
,
3188 DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__
,
3192 /* load board specific nvram file */
3193 snprintf(bus
->nv_path
, sizeof(nv_path
), "%s.nvm", fw_path
);
3195 snprintf(bus
->fw_path
, sizeof(fw_path
), "%s-firmware.bin", fw_path
);
3196 #endif /* BCM_REQUEST_FW */
3198 DHD_OS_WAKE_LOCK(bus
->dhd
);
3200 dhd_conf_set_path_params(bus
->dhd
, bus
->fw_path
, bus
->nv_path
);
3201 dhd_set_bus_params(bus
);
3203 ret
= _dhdpcie_download_firmware(bus
);
3205 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
3207 } /* dhdpcie_download_firmware */
3210 * Downloads a file containing firmware into dongle memory. In case of a .bea file, the DHD
3211 * is updated with the event logging partitions within that file as well.
3213 * @param pfw_path Path to .bin or .bea file
3216 dhdpcie_download_code_file(struct dhd_bus
*bus
, char *pfw_path
)
3218 int bcmerror
= BCME_ERROR
;
3222 char *imgbuf
= NULL
; /**< XXX a file pointer, contradicting its name and type */
3223 uint8
*memblock
= NULL
, *memptr
= NULL
;
3224 uint8
*memptr_tmp
= NULL
; // terence: check downloaded firmware is correct
3225 int offset_end
= bus
->ramsize
;
3226 uint32 file_size
= 0, read_len
= 0;
3228 #if defined(DHD_FW_MEM_CORRUPTION)
3229 if (dhd_bus_get_fw_mode(bus
->dhd
) == DHD_FLAG_MFG_MODE
) {
3230 dhd_tcm_test_enable
= TRUE
;
3232 dhd_tcm_test_enable
= FALSE
;
3234 #endif /* DHD_FW_MEM_CORRUPTION */
3235 DHD_ERROR(("%s: dhd_tcm_test_enable %u\n", __FUNCTION__
, dhd_tcm_test_enable
));
3237 if (dhd_tcm_test_enable
&& !dhd_bus_tcm_test(bus
)) {
3238 DHD_ERROR(("dhd_bus_tcm_test failed\n"));
3239 bcmerror
= BCME_ERROR
;
3242 DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__
, pfw_path
));
3244 /* Should succeed in opening image if it is actually given through registry
3245 * entry or in module param.
3247 imgbuf
= dhd_os_open_image1(bus
->dhd
, pfw_path
);
3248 if (imgbuf
== NULL
) {
3249 printf("%s: Open firmware file failed %s\n", __FUNCTION__
, pfw_path
);
3253 file_size
= dhd_os_get_image_size(imgbuf
);
3255 DHD_ERROR(("%s: get file size fails ! \n", __FUNCTION__
));
3259 memptr
= memblock
= MALLOC(bus
->dhd
->osh
, MEMBLOCK
+ DHD_SDALIGN
);
3260 if (memblock
== NULL
) {
3261 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__
, MEMBLOCK
));
3262 bcmerror
= BCME_NOMEM
;
3265 if (dhd_msg_level
& DHD_TRACE_VAL
) {
3266 memptr_tmp
= MALLOC(bus
->dhd
->osh
, MEMBLOCK
+ DHD_SDALIGN
);
3267 if (memptr_tmp
== NULL
) {
3268 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__
, MEMBLOCK
));
3272 if ((uint32
)(uintptr
)memblock
% DHD_SDALIGN
) {
3273 memptr
+= (DHD_SDALIGN
- ((uint32
)(uintptr
)memblock
% DHD_SDALIGN
));
3276 /* check if CR4/CA7 */
3277 store_reset
= (si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0) ||
3278 si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0));
3279 /* Download image with MEMBLOCK size */
3280 while ((len
= dhd_os_get_image_block((char*)memptr
, MEMBLOCK
, imgbuf
))) {
3282 DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__
, len
));
3283 bcmerror
= BCME_ERROR
;
3288 if (read_len
> file_size
) {
3289 DHD_ERROR(("%s: WARNING! reading beyond EOF, len=%d; read_len=%u;"
3290 " file_size=%u truncating len to %d \n", __FUNCTION__
,
3291 len
, read_len
, file_size
, (len
- (read_len
- file_size
))));
3292 len
-= (read_len
- file_size
);
3295 /* if address is 0, store the reset instruction to be written in 0 */
3297 ASSERT(offset
== 0);
3298 bus
->resetinstr
= *(((uint32
*)memptr
));
3299 /* Add start of RAM address to the address given by user */
3300 offset
+= bus
->dongle_ram_base
;
3301 offset_end
+= offset
;
3302 store_reset
= FALSE
;
3305 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
, offset
, (uint8
*)memptr
, len
);
3307 DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
3308 __FUNCTION__
, bcmerror
, MEMBLOCK
, offset
));
3312 if (dhd_msg_level
& DHD_TRACE_VAL
) {
3313 bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
, offset
, memptr_tmp
, len
);
3315 DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
3316 __FUNCTION__
, bcmerror
, MEMBLOCK
, offset
));
3319 if (memcmp(memptr_tmp
, memptr
, len
)) {
3320 DHD_ERROR(("%s: Downloaded image is corrupted.\n", __FUNCTION__
));
3323 DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__
));
3327 if (offset
>= offset_end
) {
3328 DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
3329 __FUNCTION__
, offset
, offset_end
));
3330 bcmerror
= BCME_ERROR
;
3334 if (read_len
>= file_size
) {
3340 MFREE(bus
->dhd
->osh
, memblock
, MEMBLOCK
+ DHD_SDALIGN
);
3341 if (dhd_msg_level
& DHD_TRACE_VAL
) {
3343 MFREE(bus
->dhd
->osh
, memptr_tmp
, MEMBLOCK
+ DHD_SDALIGN
);
3348 dhd_os_close_image1(bus
->dhd
, imgbuf
);
3352 } /* dhdpcie_download_code_file */
3354 #ifdef CUSTOMER_HW4_DEBUG
3355 #define MIN_NVRAMVARS_SIZE 128
3356 #endif /* CUSTOMER_HW4_DEBUG */
3359 dhdpcie_download_nvram(struct dhd_bus
*bus
)
3361 int bcmerror
= BCME_ERROR
;
3363 char * memblock
= NULL
;
3366 bool nvram_file_exists
;
3367 bool nvram_uefi_exists
= FALSE
;
3368 bool local_alloc
= FALSE
;
3369 pnv_path
= bus
->nv_path
;
3371 nvram_file_exists
= ((pnv_path
!= NULL
) && (pnv_path
[0] != '\0'));
3373 /* First try UEFI */
3374 len
= MAX_NVRAMBUF_SIZE
;
3375 dhd_get_download_buffer(bus
->dhd
, NULL
, NVRAM
, &memblock
, (int *)&len
);
3377 /* If UEFI empty, then read from file system */
3378 if ((len
<= 0) || (memblock
== NULL
)) {
3380 if (nvram_file_exists
) {
3381 len
= MAX_NVRAMBUF_SIZE
;
3382 dhd_get_download_buffer(bus
->dhd
, pnv_path
, NVRAM
, &memblock
, (int *)&len
);
3383 if ((len
<= 0 || len
> MAX_NVRAMBUF_SIZE
)) {
3388 /* For SROM OTP no external file or UEFI required */
3392 nvram_uefi_exists
= TRUE
;
3395 DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__
, len
));
3397 if (len
> 0 && len
<= MAX_NVRAMBUF_SIZE
&& memblock
!= NULL
) {
3398 bufp
= (char *) memblock
;
3402 if (nvram_uefi_exists
|| nvram_file_exists
) {
3403 len
= process_nvram_vars(bufp
, len
);
3407 DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__
, len
));
3408 #ifdef CUSTOMER_HW4_DEBUG
3409 if (len
< MIN_NVRAMVARS_SIZE
) {
3410 DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
3412 bcmerror
= BCME_ERROR
;
3415 #endif /* CUSTOMER_HW4_DEBUG */
3418 len
+= 4 - (len
% 4);
3423 bcmerror
= dhdpcie_downloadvars(bus
, memblock
, len
+ 1);
3425 DHD_ERROR(("%s: error downloading vars: %d\n",
3426 __FUNCTION__
, bcmerror
));
3433 MFREE(bus
->dhd
->osh
, memblock
, MAX_NVRAMBUF_SIZE
);
3435 dhd_free_download_buffer(bus
->dhd
, memblock
, MAX_NVRAMBUF_SIZE
);
3443 * Downloads firmware file given by 'bus->fw_path' into PCIe dongle
3445 * BCMEMBEDIMAGE specific:
3446 * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
3447 * file will be used instead.
3451 _dhdpcie_download_firmware(struct dhd_bus
*bus
)
3455 bool embed
= FALSE
; /* download embedded firmware */
3456 bool dlok
= FALSE
; /* download firmware succeeded */
3458 /* Out immediately if no image to download */
3459 if ((bus
->fw_path
== NULL
) || (bus
->fw_path
[0] == '\0')) {
3460 DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__
));
3463 /* Keep arm in reset */
3464 if (dhdpcie_bus_download_state(bus
, TRUE
)) {
3465 DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__
));
3469 /* External image takes precedence if specified */
3470 if ((bus
->fw_path
!= NULL
) && (bus
->fw_path
[0] != '\0')) {
3471 if (dhdpcie_download_code_file(bus
, bus
->fw_path
)) {
3472 DHD_ERROR(("%s:%d dongle image file download failed\n", __FUNCTION__
,
3481 BCM_REFERENCE(embed
);
3483 DHD_ERROR(("%s:%d dongle image download failed\n", __FUNCTION__
, __LINE__
));
3487 /* EXAMPLE: nvram_array */
3488 /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
3489 /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
3491 /* External nvram takes precedence if specified */
3492 if (dhdpcie_download_nvram(bus
)) {
3493 DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__
, __LINE__
));
3497 /* Take arm out of reset */
3498 if (dhdpcie_bus_download_state(bus
, FALSE
)) {
3499 DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__
));
3507 } /* _dhdpcie_download_firmware */
3510 dhdpcie_bus_readconsole(dhd_bus_t
*bus
)
3512 dhd_console_t
*c
= &bus
->console
;
3513 uint8 line
[CONSOLE_LINE_MAX
], ch
;
3514 uint32 n
, idx
, addr
;
3519 /* Don't do anything until FWREADY updates console address */
3520 if (bus
->console_addr
== 0)
3523 /* Read console log struct */
3524 addr
= bus
->console_addr
+ OFFSETOF(hnd_cons_t
, log
);
3526 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
, (uint8
*)&c
->log
, sizeof(c
->log
))) < 0)
3529 /* Allocate console buffer (one time only) */
3530 if (c
->buf
== NULL
) {
3531 c
->bufsize
= ltoh32(c
->log
.buf_size
);
3532 if ((c
->buf
= MALLOC(bus
->dhd
->osh
, c
->bufsize
)) == NULL
)
3534 DHD_INFO(("conlog: bufsize=0x%x\n", c
->bufsize
));
3536 idx
= ltoh32(c
->log
.idx
);
3538 /* Protect against corrupt value */
3539 if (idx
> c
->bufsize
)
3542 /* Skip reading the console buffer if the index pointer has not moved */
3546 DHD_INFO(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c
->log
.buf
,
3549 /* Read the console buffer data to a local buffer
3550 * optimize and read only the portion of the buffer needed, but
3551 * important to handle wrap-around. Read ptr is 'c->last',
3552 * write ptr is 'idx'
3554 addr
= ltoh32(c
->log
.buf
);
3556 /* wrap around case - write ptr < read ptr */
3557 if (idx
< c
->last
) {
3558 /* from read ptr to end of buffer */
3559 readlen
= c
->bufsize
- c
->last
;
3560 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
,
3561 addr
+ c
->last
, c
->buf
, readlen
)) < 0) {
3562 DHD_ERROR(("conlog: read error[1] ! \n"));
3565 /* from beginning of buffer to write ptr */
3566 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
,
3567 addr
, c
->buf
+ readlen
,
3569 DHD_ERROR(("conlog: read error[2] ! \n"));
3574 /* non-wraparound case, write ptr > read ptr */
3575 readlen
= (uint
)idx
- c
->last
;
3576 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
,
3577 addr
+ c
->last
, c
->buf
, readlen
)) < 0) {
3578 DHD_ERROR(("conlog: read error[3] ! \n"));
3582 /* update read ptr */
3585 /* now output the read data from the local buffer to the host console */
3586 while (i
< readlen
) {
3587 for (n
= 0; n
< CONSOLE_LINE_MAX
- 2 && i
< readlen
; n
++) {
3596 if (line
[n
- 1] == '\r')
3599 DHD_FWLOG(("CONSOLE: %s\n", line
));
3605 } /* dhdpcie_bus_readconsole */
3608 dhd_bus_dump_console_buffer(dhd_bus_t
*bus
)
3612 char *console_buffer
= NULL
;
3613 uint32 console_ptr
, console_size
, console_index
;
3614 uint8 line
[CONSOLE_LINE_MAX
], ch
;
3617 DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__
));
3619 if (bus
->is_linkdown
) {
3620 DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__
));
3624 addr
= bus
->pcie_sh
->console_addr
+ OFFSETOF(hnd_cons_t
, log
);
3625 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
,
3626 (uint8
*)&console_ptr
, sizeof(console_ptr
))) < 0) {
3630 addr
= bus
->pcie_sh
->console_addr
+ OFFSETOF(hnd_cons_t
, log
.buf_size
);
3631 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
,
3632 (uint8
*)&console_size
, sizeof(console_size
))) < 0) {
3636 addr
= bus
->pcie_sh
->console_addr
+ OFFSETOF(hnd_cons_t
, log
.idx
);
3637 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
,
3638 (uint8
*)&console_index
, sizeof(console_index
))) < 0) {
3642 console_ptr
= ltoh32(console_ptr
);
3643 console_size
= ltoh32(console_size
);
3644 console_index
= ltoh32(console_index
);
3646 if (console_size
> CONSOLE_BUFFER_MAX
||
3647 !(console_buffer
= MALLOC(bus
->dhd
->osh
, console_size
))) {
3651 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, console_ptr
,
3652 (uint8
*)console_buffer
, console_size
)) < 0) {
3656 for (i
= 0, n
= 0; i
< console_size
; i
+= n
+ 1) {
3657 for (n
= 0; n
< CONSOLE_LINE_MAX
- 2; n
++) {
3658 ch
= console_buffer
[(console_index
+ i
+ n
) % console_size
];
3665 if (line
[n
- 1] == '\r')
3668 /* Don't use DHD_ERROR macro since we print
3669 * a lot of information quickly. The macro
3670 * will truncate a lot of the printfs
3673 DHD_FWLOG(("CONSOLE: %s\n", line
));
3679 MFREE(bus
->dhd
->osh
, console_buffer
, console_size
);
3684 dhdpcie_schedule_log_dump(dhd_bus_t
*bus
)
3686 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP)
3687 log_dump_type_t
*flush_type
;
3689 /* flush_type is freed at do_dhd_log_dump function */
3690 flush_type
= MALLOCZ(bus
->dhd
->osh
, sizeof(log_dump_type_t
));
3692 *flush_type
= DLD_BUF_TYPE_ALL
;
3693 dhd_schedule_log_dump(bus
->dhd
, flush_type
);
3695 DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__
));
3697 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */
3701 * Opens the file given by bus->fw_path, reads part of the file into a buffer and closes the file.
3703 * @return BCME_OK on success
3706 dhdpcie_checkdied(dhd_bus_t
*bus
, char *data
, uint size
)
3710 char *mbuffer
= NULL
;
3711 uint maxstrlen
= 256;
3713 pciedev_shared_t
*local_pciedev_shared
= bus
->pcie_sh
;
3714 struct bcmstrbuf strbuf
;
3715 unsigned long flags
;
3716 bool dongle_trap_occured
= FALSE
;
3718 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
3720 if (DHD_NOCHECKDIED_ON()) {
3726 * Called after a rx ctrl timeout. "data" is NULL.
3727 * allocate memory to trace the trap or assert.
3730 mbuffer
= data
= MALLOC(bus
->dhd
->osh
, msize
);
3732 if (mbuffer
== NULL
) {
3733 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__
, msize
));
3734 bcmerror
= BCME_NOMEM
;
3739 if ((str
= MALLOC(bus
->dhd
->osh
, maxstrlen
)) == NULL
) {
3740 DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__
, maxstrlen
));
3741 bcmerror
= BCME_NOMEM
;
3744 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
3745 DHD_BUS_BUSY_SET_IN_CHECKDIED(bus
->dhd
);
3746 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
3748 if (MULTIBP_ENAB(bus
->sih
)) {
3749 dhd_bus_pcie_pwr_req(bus
);
3751 if ((bcmerror
= dhdpcie_readshared(bus
)) < 0) {
3755 bcm_binit(&strbuf
, data
, size
);
3757 bcm_bprintf(&strbuf
, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
3758 local_pciedev_shared
->msgtrace_addr
, local_pciedev_shared
->console_addr
);
3760 if ((local_pciedev_shared
->flags
& PCIE_SHARED_ASSERT_BUILT
) == 0) {
3761 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
3762 * (Avoids conflict with real asserts for programmatic parsing of output.)
3764 bcm_bprintf(&strbuf
, "Assrt not built in dongle\n");
3767 if ((bus
->pcie_sh
->flags
& (PCIE_SHARED_ASSERT
|PCIE_SHARED_TRAP
)) == 0) {
3768 /* NOTE: Misspelled assert is intentional - DO NOT FIX.
3769 * (Avoids conflict with real asserts for programmatic parsing of output.)
3771 bcm_bprintf(&strbuf
, "No trap%s in dongle",
3772 (bus
->pcie_sh
->flags
& PCIE_SHARED_ASSERT_BUILT
)
3775 if (bus
->pcie_sh
->flags
& PCIE_SHARED_ASSERT
) {
3776 /* Download assert */
3777 bcm_bprintf(&strbuf
, "Dongle assert");
3778 if (bus
->pcie_sh
->assert_exp_addr
!= 0) {
3780 if ((bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
,
3781 bus
->pcie_sh
->assert_exp_addr
,
3782 (uint8
*)str
, maxstrlen
)) < 0) {
3786 str
[maxstrlen
- 1] = '\0';
3787 bcm_bprintf(&strbuf
, " expr \"%s\"", str
);
3790 if (bus
->pcie_sh
->assert_file_addr
!= 0) {
3792 if ((bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
,
3793 bus
->pcie_sh
->assert_file_addr
,
3794 (uint8
*)str
, maxstrlen
)) < 0) {
3798 str
[maxstrlen
- 1] = '\0';
3799 bcm_bprintf(&strbuf
, " file \"%s\"", str
);
3802 bcm_bprintf(&strbuf
, " line %d ", bus
->pcie_sh
->assert_line
);
3805 if (bus
->pcie_sh
->flags
& PCIE_SHARED_TRAP
) {
3806 trap_t
*tr
= &bus
->dhd
->last_trap_info
;
3807 dongle_trap_occured
= TRUE
;
3808 if ((bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
,
3809 bus
->pcie_sh
->trap_addr
, (uint8
*)tr
, sizeof(trap_t
))) < 0) {
3810 bus
->dhd
->dongle_trap_occured
= TRUE
;
3813 dhd_bus_dump_trap_info(bus
, &strbuf
);
3817 if (bus
->pcie_sh
->flags
& (PCIE_SHARED_ASSERT
| PCIE_SHARED_TRAP
)) {
3818 DHD_ERROR(("%s: %s\n", __FUNCTION__
, strbuf
.origbuf
));
3820 /* wake up IOCTL wait event */
3821 dhd_wakeup_ioctl_event(bus
->dhd
, IOCTL_RETURN_ON_TRAP
);
3823 dhd_bus_dump_console_buffer(bus
);
3824 dhd_prot_debug_info_print(bus
->dhd
);
3826 #if defined(DHD_FW_COREDUMP)
3827 /* save core dump or write to a file */
3828 if (bus
->dhd
->memdump_enabled
) {
3829 #ifdef DHD_SSSR_DUMP
3830 DHD_ERROR(("%s : Set collect_sssr as TRUE\n", __FUNCTION__
));
3831 bus
->dhd
->collect_sssr
= TRUE
;
3832 #endif /* DHD_SSSR_DUMP */
3833 #ifdef DHD_SDTC_ETB_DUMP
3834 DHD_ERROR(("%s : Set collect_sdtc as TRUE\n", __FUNCTION__
));
3835 bus
->dhd
->collect_sdtc
= TRUE
;
3836 #endif /* DHD_SDTC_ETB_DUMP */
3837 bus
->dhd
->memdump_type
= DUMP_TYPE_DONGLE_TRAP
;
3838 dhdpcie_mem_dump(bus
);
3840 #endif /* DHD_FW_COREDUMP */
3842 /* set the trap occured flag only after all the memdump,
3843 * logdump and sssr dump collection has been scheduled
3845 if (dongle_trap_occured
) {
3846 bus
->dhd
->dongle_trap_occured
= TRUE
;
3849 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
3850 copy_hang_info_trap(bus
->dhd
);
3851 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
3853 dhd_schedule_reset(bus
->dhd
);
3859 /* dhdpcie_checkdied is invoked only when dongle has trapped
3860 * or after PCIe link down..etc. so set dongle_trap_occured so that
3861 * log_dump logic can rely on only one flag dongle_trap_occured.
3863 bus
->dhd
->dongle_trap_occured
= TRUE
;
3864 dhdpcie_schedule_log_dump(bus
);
3866 if (MULTIBP_ENAB(bus
->sih
)) {
3867 dhd_bus_pcie_pwr_req_clear(bus
);
3870 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
3871 DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus
->dhd
);
3872 dhd_os_busbusy_wake(bus
->dhd
);
3873 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
3876 MFREE(bus
->dhd
->osh
, mbuffer
, msize
);
3878 MFREE(bus
->dhd
->osh
, str
, maxstrlen
);
3881 } /* dhdpcie_checkdied */
3883 /* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
3884 void dhdpcie_mem_dump_bugcheck(dhd_bus_t
*bus
, uint8
*buf
)
3887 int size
; /* Full mem size */
3888 int start
; /* Start address */
3889 int read_size
= 0; /* Read size of each iteration */
3890 uint8
*databuf
= buf
;
3896 start
= bus
->dongle_ram_base
;
3898 /* check for dead bus */
3901 ret
= dhdpcie_bus_membytes(bus
, FALSE
, start
, (uint8
*)&test_word
, read_size
);
3902 /* if read error or bus timeout */
3903 if (ret
|| (test_word
== 0xFFFFFFFF)) {
3908 /* Get full mem size */
3909 size
= bus
->ramsize
;
3910 /* Read mem content */
3913 read_size
= MIN(MEMBLOCK
, size
);
3914 if ((ret
= dhdpcie_bus_membytes(bus
, FALSE
, start
, databuf
, read_size
))) {
3918 /* Decrement size and increment start address */
3921 databuf
+= read_size
;
3923 bus
->dhd
->soc_ram
= buf
;
3924 bus
->dhd
->soc_ram_length
= bus
->ramsize
;
3928 #if defined(DHD_FW_COREDUMP)
3930 dhdpcie_get_mem_dump(dhd_bus_t
*bus
)
3935 int read_size
= 0; /* Read size of each iteration */
3936 uint8
*p_buf
= NULL
, *databuf
= NULL
;
3937 unsigned long flags_bus
;
3940 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__
));
3945 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
3949 size
= bus
->ramsize
; /* Full mem size */
3950 start
= bus
->dongle_ram_base
; /* Start address */
3952 /* Get full mem size */
3953 p_buf
= dhd_get_fwdump_buf(bus
->dhd
, size
);
3955 DHD_ERROR(("%s: Out of memory (%d bytes)\n",
3956 __FUNCTION__
, size
));
3960 /* Read mem content */
3961 DHD_TRACE_HW4(("Dump dongle memory\n"));
3964 /* Hold BUS_LP_STATE_LOCK to avoid simultaneous bus access */
3965 DHD_BUS_LP_STATE_LOCK(bus
->bus_lp_state_lock
, flags_bus
);
3967 read_size
= MIN(MEMBLOCK
, size
);
3968 ret
= dhdpcie_bus_membytes(bus
, FALSE
, start
, databuf
, read_size
);
3970 DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__
, ret
));
3971 #ifdef DHD_DEBUG_UART
3972 bus
->dhd
->memdump_success
= FALSE
;
3973 #endif /* DHD_DEBUG_UART */
3978 /* Decrement size and increment start address */
3981 databuf
+= read_size
;
3983 DHD_BUS_LP_STATE_UNLOCK(bus
->bus_lp_state_lock
, flags_bus
);
3989 dhdpcie_mem_dump(dhd_bus_t
*bus
)
3993 uint32 dhd_console_ms_prev
= 0;
3997 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__
));
4000 dhd_console_ms_prev
= dhdp
->dhd_console_ms
;
4001 if (dhd_console_ms_prev
) {
4002 DHD_ERROR(("%s: Disabling console msgs(0x%d) before mem dump to local buf\n",
4003 __FUNCTION__
, dhd_console_ms_prev
));
4004 dhdp
->dhd_console_ms
= 0;
4007 #ifdef EXYNOS_PCIE_DEBUG
4008 exynos_pcie_register_dump(1);
4009 #endif /* EXYNOS_PCIE_DEBUG */
4011 #ifdef SUPPORT_LINKDOWN_RECOVERY
4012 if (bus
->is_linkdown
) {
4013 DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__
));
4014 /* panic only for DUMP_MEMFILE_BUGON */
4015 ASSERT(bus
->dhd
->memdump_enabled
!= DUMP_MEMFILE_BUGON
);
4018 #endif /* SUPPORT_LINKDOWN_RECOVERY */
4020 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp
)) {
4021 DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__
));
4025 /* Induce DB7 trap for below non-trap cases */
4026 switch (dhdp
->memdump_type
) {
4027 case DUMP_TYPE_RESUMED_ON_TIMEOUT
:
4028 /* intentional fall through */
4029 case DUMP_TYPE_D3_ACK_TIMEOUT
:
4030 /* intentional fall through */
4031 case DUMP_TYPE_PKTID_AUDIT_FAILURE
:
4032 /* intentional fall through */
4033 case DUMP_TYPE_PKTID_INVALID
:
4034 /* intentional fall through */
4035 case DUMP_TYPE_SCAN_TIMEOUT
:
4036 /* intentional fall through */
4037 case DUMP_TYPE_SCAN_BUSY
:
4038 /* intentional fall through */
4039 case DUMP_TYPE_BY_LIVELOCK
:
4040 /* intentional fall through */
4041 case DUMP_TYPE_IFACE_OP_FAILURE
:
4042 /* intentional fall through */
4043 case DUMP_TYPE_PKTID_POOL_DEPLETED
:
4044 /* intentional fall through */
4045 case DUMP_TYPE_ESCAN_SYNCID_MISMATCH
:
4046 if (dhdp
->db7_trap
.fw_db7w_trap
) {
4047 /* Set fw_db7w_trap_inprogress here and clear from DPC */
4048 dhdp
->db7_trap
.fw_db7w_trap_inprogress
= TRUE
;
4049 dhdpcie_fw_trap(dhdp
->bus
);
4050 OSL_DELAY(100 * 1000); // wait 100 msec
4052 DHD_ERROR(("%s: DB7 Not supported!!!\n",
4060 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4061 if (pm_runtime_get_sync(dhd_bus_to_dev(bus
)) < 0)
4063 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4065 ret
= dhdpcie_get_mem_dump(bus
);
4067 DHD_ERROR(("%s: failed to get mem dump, err=%d\n",
4068 __FUNCTION__
, ret
));
4071 #ifdef DHD_DEBUG_UART
4072 bus
->dhd
->memdump_success
= TRUE
;
4073 #endif /* DHD_DEBUG_UART */
4075 if (dhd_console_ms_prev
) {
4076 DHD_ERROR(("%s: enable console msgs(0x%d) after collecting memdump to local buf\n",
4077 __FUNCTION__
, dhd_console_ms_prev
));
4078 dhdp
->dhd_console_ms
= dhd_console_ms_prev
;
4081 dhd_schedule_memdump(dhdp
, dhdp
->soc_ram
, dhdp
->soc_ram_length
);
4082 /* buf, actually soc_ram free handled in dhd_{free,clear} */
4084 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
4085 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus
));
4086 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus
));
4087 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
4093 dhd_bus_get_mem_dump(dhd_pub_t
*dhdp
)
4096 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__
));
4100 return dhdpcie_get_mem_dump(dhdp
->bus
);
4104 dhd_bus_mem_dump(dhd_pub_t
*dhdp
)
4106 dhd_bus_t
*bus
= dhdp
->bus
;
4107 int ret
= BCME_ERROR
;
4109 if (dhdp
->busstate
== DHD_BUS_DOWN
) {
4110 DHD_ERROR(("%s bus is down\n", __FUNCTION__
));
4114 /* Try to resume if already suspended or suspend in progress */
4115 #ifdef DHD_PCIE_RUNTIMEPM
4116 dhdpcie_runtime_bus_wake(dhdp
, CAN_SLEEP(), __builtin_return_address(0));
4117 #endif /* DHD_PCIE_RUNTIMEPM */
4119 /* Skip if still in suspended or suspend in progress */
4120 if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp
)) {
4121 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
4122 __FUNCTION__
, dhdp
->busstate
, dhdp
->dhd_bus_busy_state
));
4126 DHD_OS_WAKE_LOCK(dhdp
);
4127 ret
= dhdpcie_mem_dump(bus
);
4128 DHD_OS_WAKE_UNLOCK(dhdp
);
4131 #endif /* DHD_FW_COREDUMP */
4134 dhd_socram_dump(dhd_bus_t
*bus
)
4136 #if defined(DHD_FW_COREDUMP)
4137 DHD_OS_WAKE_LOCK(bus
->dhd
);
4138 dhd_bus_mem_dump(bus
->dhd
);
4139 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
4147 * Transfers bytes from host to dongle using pio mode.
4148 * Parameter 'address' is a backplane address.
4151 dhdpcie_bus_membytes(dhd_bus_t
*bus
, bool write
, ulong address
, uint8
*data
, uint size
)
4154 int detect_endian_flag
= 0x01;
4157 if (write
&& bus
->is_linkdown
) {
4158 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
4162 if (MULTIBP_ENAB(bus
->sih
)) {
4163 dhd_bus_pcie_pwr_req(bus
);
4165 /* Detect endianness. */
4166 little_endian
= *(char *)&detect_endian_flag
;
4168 /* In remap mode, adjust address beyond socram and redirect
4169 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
4170 * is not backplane accessible
4173 /* Determine initial transfer parameters */
4174 #ifdef DHD_SUPPORT_64BIT
4175 dsize
= sizeof(uint64
);
4176 #else /* !DHD_SUPPORT_64BIT */
4177 dsize
= sizeof(uint32
);
4178 #endif /* DHD_SUPPORT_64BIT */
4180 /* Do the transfer(s) */
4181 DHD_INFO(("%s: %s %d bytes in window 0x%08lx\n",
4182 __FUNCTION__
, (write
? "write" : "read"), size
, address
));
4185 #ifdef DHD_SUPPORT_64BIT
4186 if (size
>= sizeof(uint64
) && little_endian
&& !(address
% 8)) {
4187 dhdpcie_bus_wtcm64(bus
, address
, *((uint64
*)data
));
4189 #else /* !DHD_SUPPORT_64BIT */
4190 if (size
>= sizeof(uint32
) && little_endian
&& !(address
% 4)) {
4191 dhdpcie_bus_wtcm32(bus
, address
, *((uint32
*)data
));
4193 #endif /* DHD_SUPPORT_64BIT */
4195 dsize
= sizeof(uint8
);
4196 dhdpcie_bus_wtcm8(bus
, address
, *data
);
4199 /* Adjust for next transfer (if any) */
4200 if ((size
-= dsize
)) {
4207 #ifdef DHD_SUPPORT_64BIT
4208 if (size
>= sizeof(uint64
) && little_endian
&& !(address
% 8))
4210 *(uint64
*)data
= dhdpcie_bus_rtcm64(bus
, address
);
4212 #else /* !DHD_SUPPORT_64BIT */
4213 if (size
>= sizeof(uint32
) && little_endian
&& !(address
% 4))
4215 *(uint32
*)data
= dhdpcie_bus_rtcm32(bus
, address
);
4217 #endif /* DHD_SUPPORT_64BIT */
4219 dsize
= sizeof(uint8
);
4220 *data
= dhdpcie_bus_rtcm8(bus
, address
);
4223 /* Adjust for next transfer (if any) */
4224 if ((size
-= dsize
) > 0) {
4230 if (MULTIBP_ENAB(bus
->sih
)) {
4231 dhd_bus_pcie_pwr_req_clear(bus
);
4234 } /* dhdpcie_bus_membytes */
4237 * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue
4238 * to the (non flow controlled) flow ring.
4241 BCMFASTPATH(dhd_bus_schedule_queue
)(struct dhd_bus
*bus
, uint16 flow_id
, bool txs
)
4242 /** XXX function name could be more descriptive, eg use 'tx' and 'flow ring' in name */
4244 flow_ring_node_t
*flow_ring_node
;
4246 #ifdef DHD_LOSSLESS_ROAMING
4247 dhd_pub_t
*dhdp
= bus
->dhd
;
4250 DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__
, flow_id
));
4252 /* ASSERT on flow_id */
4253 if (flow_id
>= bus
->max_submission_rings
) {
4254 DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__
,
4255 flow_id
, bus
->max_submission_rings
));
4259 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flow_id
);
4261 if (flow_ring_node
->prot_info
== NULL
) {
4262 DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__
));
4263 return BCME_NOTREADY
;
4266 #ifdef DHD_LOSSLESS_ROAMING
4267 if ((dhdp
->dequeue_prec_map
& (1 << flow_ring_node
->flow_info
.tid
)) == 0) {
4268 DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
4269 __FUNCTION__
, flow_ring_node
->flow_info
.tid
));
4272 #endif /* DHD_LOSSLESS_ROAMING */
4275 unsigned long flags
;
4277 flow_queue_t
*queue
;
4278 #ifdef DHD_LOSSLESS_ROAMING
4279 struct ether_header
*eh
;
4281 #endif /* DHD_LOSSLESS_ROAMING */
4283 queue
= &flow_ring_node
->queue
; /* queue associated with flow ring */
4285 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
4287 if (flow_ring_node
->status
!= FLOW_RING_STATUS_OPEN
) {
4288 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
4289 return BCME_NOTREADY
;
4292 while ((txp
= dhd_flow_queue_dequeue(bus
->dhd
, queue
)) != NULL
) {
4293 if (bus
->dhd
->conf
->orphan_move
<= 1)
4294 PKTORPHAN(txp
, bus
->dhd
->conf
->tsq
);
4297 * Modifying the packet length caused P2P cert failures.
4298 * Specifically on test cases where a packet of size 52 bytes
4299 * was injected, the sniffer capture showed 62 bytes because of
4300 * which the cert tests failed. So making the below change
4301 * only Router specific.
4304 #ifdef DHDTCPACK_SUPPRESS
4305 if (bus
->dhd
->tcpack_sup_mode
!= TCPACK_SUP_HOLD
) {
4306 ret
= dhd_tcpack_check_xmit(bus
->dhd
, txp
);
4307 if (ret
!= BCME_OK
) {
4308 DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n",
4312 #endif /* DHDTCPACK_SUPPRESS */
4313 #ifdef DHD_LOSSLESS_ROAMING
4314 pktdata
= (uint8
*)PKTDATA(OSH_NULL
, txp
);
4315 eh
= (struct ether_header
*) pktdata
;
4316 if (eh
->ether_type
== hton16(ETHER_TYPE_802_1X
)) {
4317 uint8 prio
= (uint8
)PKTPRIO(txp
);
4318 /* Restore to original priority for 802.1X packet */
4319 if (prio
== PRIO_8021D_NC
) {
4320 PKTSETPRIO(txp
, dhdp
->prio_8021x
);
4323 #endif /* DHD_LOSSLESS_ROAMING */
4324 /* Attempt to transfer packet over flow ring */
4325 /* XXX: ifidx is wrong */
4326 ret
= dhd_prot_txdata(bus
->dhd
, txp
, flow_ring_node
->flow_info
.ifindex
);
4327 if (ret
!= BCME_OK
) { /* may not have resources in flow ring */
4328 DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__
, ret
));
4329 dhd_prot_txdata_write_flush(bus
->dhd
, flow_id
);
4330 /* reinsert at head */
4331 dhd_flow_queue_reinsert(bus
->dhd
, queue
, txp
);
4332 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
4334 /* If we are able to requeue back, return success */
4338 #ifdef DHD_MEM_STATS
4339 DHD_MEM_STATS_LOCK(bus
->dhd
->mem_stats_lock
, flags
);
4340 bus
->dhd
->txpath_mem
+= PKTLEN(bus
->dhd
->osh
, txp
);
4341 DHD_INFO(("%s txpath_mem: %llu PKTLEN: %d\n",
4342 __FUNCTION__
, bus
->dhd
->txpath_mem
, PKTLEN(bus
->dhd
->osh
, txp
)));
4343 DHD_MEM_STATS_UNLOCK(bus
->dhd
->mem_stats_lock
, flags
);
4344 #endif /* DHD_MEM_STATS */
4347 dhd_prot_txdata_write_flush(bus
->dhd
, flow_id
);
4348 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
4352 } /* dhd_bus_schedule_queue */
4354 /** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */
4356 BCMFASTPATH(dhd_bus_txdata
)(struct dhd_bus
*bus
, void *txp
, uint8 ifidx
)
4359 #ifdef IDLE_TX_FLOW_MGMT
4361 #endif /* IDLE_TX_FLOW_MGMT */
4362 flow_queue_t
*queue
;
4363 flow_ring_node_t
*flow_ring_node
;
4364 unsigned long flags
;
4366 void *txp_pend
= NULL
;
4368 if (!bus
->dhd
->flowid_allocator
) {
4369 DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__
));
4373 flowid
= DHD_PKT_GET_FLOWID(txp
);
4375 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flowid
);
4377 DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
4378 __FUNCTION__
, flowid
, flow_ring_node
->status
, flow_ring_node
->active
));
4380 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
4381 if ((flowid
> bus
->dhd
->max_tx_flowid
) ||
4382 #ifdef IDLE_TX_FLOW_MGMT
4383 (!flow_ring_node
->active
))
4385 (!flow_ring_node
->active
) ||
4386 (flow_ring_node
->status
== FLOW_RING_STATUS_DELETE_PENDING
) ||
4387 (flow_ring_node
->status
== FLOW_RING_STATUS_STA_FREEING
))
4388 #endif /* IDLE_TX_FLOW_MGMT */
4390 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
4391 DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
4392 __FUNCTION__
, flowid
, flow_ring_node
->status
,
4393 flow_ring_node
->active
));
4398 #ifdef IDLE_TX_FLOW_MGMT
4399 node_status
= flow_ring_node
->status
;
4401 /* handle diffrent status states here!! */
4402 switch (node_status
)
4404 case FLOW_RING_STATUS_OPEN
:
4406 if (bus
->enable_idle_flowring_mgmt
) {
4407 /* Move the node to the head of active list */
4408 dhd_flow_ring_move_to_active_list_head(bus
, flow_ring_node
);
4412 case FLOW_RING_STATUS_SUSPENDED
:
4413 DHD_INFO(("Need to Initiate TX Flow resume\n"));
4414 /* Issue resume_ring request */
4415 dhd_bus_flow_ring_resume_request(bus
,
4419 case FLOW_RING_STATUS_CREATE_PENDING
:
4420 case FLOW_RING_STATUS_RESUME_PENDING
:
4421 /* Dont do anything here!! */
4422 DHD_INFO(("Waiting for Flow create/resume! status is %u\n",
4426 case FLOW_RING_STATUS_DELETE_PENDING
:
4428 DHD_ERROR(("Dropping packet!! flowid %u status is %u\n",
4429 flowid
, node_status
));
4432 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
4435 /* Now queue the packet */
4436 #endif /* IDLE_TX_FLOW_MGMT */
4438 queue
= &flow_ring_node
->queue
; /* queue associated with flow ring */
4440 if ((ret
= dhd_flow_queue_enqueue(bus
->dhd
, queue
, txp
)) != BCME_OK
)
4443 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
4445 if (flow_ring_node
->status
) {
4446 DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
4447 __FUNCTION__
, flowid
, flow_ring_node
->status
,
4448 flow_ring_node
->active
));
4455 ret
= dhd_bus_schedule_queue(bus
, flowid
, FALSE
); /* from queue to flowring */
4457 /* If we have anything pending, try to push into q */
4459 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
4461 if ((ret
= dhd_flow_queue_enqueue(bus
->dhd
, queue
, txp_pend
)) != BCME_OK
) {
4462 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
4467 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
4473 DHD_INFO(("%s: Toss %d\n", __FUNCTION__
, ret
));
4474 PKTCFREE(bus
->dhd
->osh
, txp
, TRUE
);
4476 } /* dhd_bus_txdata */
4479 dhd_bus_stop_queue(struct dhd_bus
*bus
)
4481 dhd_txflowcontrol(bus
->dhd
, ALL_INTERFACES
, ON
);
4485 dhd_bus_start_queue(struct dhd_bus
*bus
)
4488 * Tx queue has been stopped due to resource shortage (or)
4489 * bus is not in a state to turn on.
4491 * Note that we try to re-start network interface only
4492 * when we have enough resources, one has to first change the
4493 * flag indicating we have all the resources.
4495 if (dhd_prot_check_tx_resource(bus
->dhd
)) {
4496 DHD_ERROR(("%s: Interface NOT started, previously stopped "
4497 "due to resource shortage\n", __FUNCTION__
));
4500 dhd_txflowcontrol(bus
->dhd
, ALL_INTERFACES
, OFF
);
4503 /* Device console input function */
4504 int dhd_bus_console_in(dhd_pub_t
*dhd
, uchar
*msg
, uint msglen
)
4506 dhd_bus_t
*bus
= dhd
->bus
;
4510 unsigned long flags
= 0;
4511 #endif /* PCIE_INB_DW */
4513 /* Address could be zero if CONSOLE := 0 in dongle Makefile */
4514 if (bus
->console_addr
== 0)
4515 return BCME_UNSUPPORTED
;
4517 /* Don't allow input if dongle is in reset */
4518 if (bus
->dhd
->dongle_reset
) {
4519 return BCME_NOTREADY
;
4522 /* Zero cbuf_index */
4523 addr
= bus
->console_addr
+ OFFSETOF(hnd_cons_t
, cbuf_idx
);
4524 /* handle difference in definition of hnd_log_t in certain branches */
4525 if (dhd
->wlc_ver_major
< 14) {
4526 addr
-= (uint32
)sizeof(uint32
);
4529 if ((rv
= dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*)&val
, sizeof(val
))) < 0)
4532 /* Write message into cbuf */
4533 addr
= bus
->console_addr
+ OFFSETOF(hnd_cons_t
, cbuf
);
4534 /* handle difference in definition of hnd_log_t in certain branches */
4535 if (dhd
->wlc_ver_major
< 14) {
4536 addr
-= sizeof(uint32
);
4538 if ((rv
= dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*)msg
, msglen
)) < 0)
4541 /* Write length into vcons_in */
4542 addr
= bus
->console_addr
+ OFFSETOF(hnd_cons_t
, vcons_in
);
4543 val
= htol32(msglen
);
4544 if ((rv
= dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*)&val
, sizeof(val
))) < 0)
4548 /* Use a lock to ensure this tx DEVICE_WAKE + tx H2D_HOST_CONS_INT sequence is
4549 * mutually exclusive with the rx D2H_DEV_DS_ENTER_REQ + tx H2D_HOST_DS_ACK sequence.
4551 DHD_BUS_INB_DW_LOCK(bus
->inb_lock
, flags
);
4552 #endif /* PCIE_INB_DW */
4554 /* generate an interrupt to dongle to indicate that it needs to process cons command */
4555 dhdpcie_send_mb_data(bus
, H2D_HOST_CONS_INT
);
4558 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
4559 #endif /* PCIE_INB_DW */
4562 } /* dhd_bus_console_in */
4565 * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
4566 * contained in 'pkt'. Processes rx frame, forwards up the layer to netif.
4569 BCMFASTPATH(dhd_bus_rx_frame
)(struct dhd_bus
*bus
, void* pkt
, int ifidx
, uint pkt_count
)
4571 dhd_rx_frame(bus
->dhd
, ifidx
, pkt
, pkt_count
, 0);
4574 /* Aquire/Release bar1_switch_lock only if the chip supports bar1 switching */
4575 #define DHD_BUS_BAR1_SWITCH_LOCK(bus, flags) \
4576 ((bus)->bar1_switch_enab) ? DHD_BAR1_SWITCH_LOCK((bus)->bar1_switch_lock, flags) : \
4577 BCM_REFERENCE(flags)
4579 #define DHD_BUS_BAR1_SWITCH_UNLOCK(bus, flags) \
4580 ((bus)->bar1_switch_enab) ? DHD_BAR1_SWITCH_UNLOCK((bus)->bar1_switch_lock, flags) : \
4581 BCM_REFERENCE(flags)
4583 /* Init/Deinit bar1_switch_lock only if the chip supports bar1 switching */
4585 dhd_init_bar1_switch_lock(dhd_bus_t
*bus
)
4587 if (bus
->bar1_switch_enab
&& !bus
->bar1_switch_lock
) {
4588 bus
->bar1_switch_lock
= osl_spin_lock_init(bus
->osh
);
4593 dhd_deinit_bar1_switch_lock(dhd_bus_t
*bus
)
4595 if (bus
->bar1_switch_enab
&& bus
->bar1_switch_lock
) {
4596 osl_spin_lock_deinit(bus
->osh
, bus
->bar1_switch_lock
);
4597 bus
->bar1_switch_lock
= NULL
;
4602 * The bpwindow for any address will be lower bound of multiples of bar1_size.
4603 * For eg, if addr=0x938fff and bar1_size is 0x400000, then
4604 * address will fall in the window of 0x800000-0xbfffff, so need
4605 * to select bpwindow as 0x800000.
4606 * To achieve this mask the LSB nibbles of bar1_size of the given addr.
4608 #define DHD_BUS_BAR1_BPWIN(addr, bar1_size) \
4609 (uint32)((addr) & ~((bar1_size) - 1))
4612 * dhdpcie_bar1_window_switch_enab
4614 * Check if the chip requires BAR1 window switching based on
4615 * dongle_ram_base, ramsize and mapped bar1_size and sets
4616 * bus->bar1_switch_enab accordingly
4617 * @bus: dhd bus context
4621 dhdpcie_bar1_window_switch_enab(dhd_bus_t
*bus
)
4623 uint32 ramstart
= bus
->dongle_ram_base
;
4624 uint32 ramend
= bus
->dongle_ram_base
+ bus
->ramsize
- 1;
4625 uint32 bpwinstart
= DHD_BUS_BAR1_BPWIN(ramstart
, bus
->bar1_size
);
4626 uint32 bpwinend
= DHD_BUS_BAR1_BPWIN(ramend
, bus
->bar1_size
);
4628 bus
->bar1_switch_enab
= FALSE
;
4631 * Window switch is needed to access complete BAR1
4632 * if bpwinstart and bpwinend are different
4634 if (bpwinstart
!= bpwinend
) {
4635 bus
->bar1_switch_enab
= TRUE
;
4638 DHD_ERROR(("%s: bar1_switch_enab=%d ramstart=0x%x ramend=0x%x bar1_size=0x%x\n",
4639 __FUNCTION__
, bus
->bar1_switch_enab
, ramstart
, ramend
, bus
->bar1_size
));
4643 * dhdpcie_setbar1win
4645 * os independendent function for setting bar1 window in order to allow
4646 * also set current window positon.
4648 * @bus: dhd bus context
4649 * @addr: new backplane windows address for BAR1
4652 dhdpcie_setbar1win(dhd_bus_t
*bus
, uint32 addr
)
4654 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCI_BAR1_WIN
, 4, addr
);
4655 bus
->curr_bar1_win
= addr
;
4659 * dhdpcie_bus_chkandshift_bpoffset
4661 * Check the provided address is within the current BAR1 window,
4662 * if not, shift the window
4664 * @bus: dhd bus context
4665 * @offset: back plane address that the caller wants to access
4667 * Return: new offset for access
4670 dhdpcie_bus_chkandshift_bpoffset(dhd_bus_t
*bus
, ulong offset
)
4675 if (!bus
->bar1_switch_enab
) {
4679 /* Determine BAR1 backplane window using window size
4680 * Window address mask should be ~(size - 1)
4682 bpwin
= DHD_BUS_BAR1_BPWIN(offset
, bus
->bar1_size
);
4684 if (bpwin
!= bus
->curr_bar1_win
) {
4685 DHD_INFO(("%s: move BAR1 window curr_bar1_win=0x%x bpwin=0x%x offset=0x%lx\n",
4686 __FUNCTION__
, bus
->curr_bar1_win
, bpwin
, offset
));
4687 /* Move BAR1 window */
4688 dhdpcie_setbar1win(bus
, bpwin
);
4691 return offset
- bpwin
;
4694 /** 'offset' is a backplane address */
4696 dhdpcie_bus_wtcm8(dhd_bus_t
*bus
, ulong offset
, uint8 data
)
4700 if (bus
->is_linkdown
) {
4701 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
4705 DHD_BUS_BAR1_SWITCH_LOCK(bus
, flags
);
4707 offset
= dhdpcie_bus_chkandshift_bpoffset(bus
, offset
);
4709 W_REG(bus
->dhd
->osh
, (volatile uint8
*)(bus
->tcm
+ offset
), data
);
4711 DHD_BUS_BAR1_SWITCH_UNLOCK(bus
, flags
);
4715 dhdpcie_bus_wtcm16(dhd_bus_t
*bus
, ulong offset
, uint16 data
)
4719 if (bus
->is_linkdown
) {
4720 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
4724 DHD_BUS_BAR1_SWITCH_LOCK(bus
, flags
);
4726 offset
= dhdpcie_bus_chkandshift_bpoffset(bus
, offset
);
4728 W_REG(bus
->dhd
->osh
, (volatile uint16
*)(bus
->tcm
+ offset
), data
);
4730 DHD_BUS_BAR1_SWITCH_UNLOCK(bus
, flags
);
4734 dhdpcie_bus_wtcm32(dhd_bus_t
*bus
, ulong offset
, uint32 data
)
4738 if (bus
->is_linkdown
) {
4739 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
4743 DHD_BUS_BAR1_SWITCH_LOCK(bus
, flags
);
4745 offset
= dhdpcie_bus_chkandshift_bpoffset(bus
, offset
);
4747 W_REG(bus
->dhd
->osh
, (volatile uint32
*)(bus
->tcm
+ offset
), data
);
4749 DHD_BUS_BAR1_SWITCH_UNLOCK(bus
, flags
);
4752 #ifdef DHD_SUPPORT_64BIT
4754 dhdpcie_bus_wtcm64(dhd_bus_t
*bus
, ulong offset
, uint64 data
)
4758 if (bus
->is_linkdown
) {
4759 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
4763 DHD_BUS_BAR1_SWITCH_LOCK(bus
, flags
);
4765 offset
= dhdpcie_bus_chkandshift_bpoffset(bus
, offset
);
4767 W_REG(bus
->dhd
->osh
, (volatile uint64
*)(bus
->tcm
+ offset
), data
);
4769 DHD_BUS_BAR1_SWITCH_UNLOCK(bus
, flags
);
4771 #endif /* DHD_SUPPORT_64BIT */
4774 dhdpcie_bus_rtcm8(dhd_bus_t
*bus
, ulong offset
)
4776 volatile uint8 data
;
4779 if (bus
->is_linkdown
) {
4780 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
4785 DHD_BUS_BAR1_SWITCH_LOCK(bus
, flags
);
4787 offset
= dhdpcie_bus_chkandshift_bpoffset(bus
, offset
);
4789 data
= R_REG(bus
->dhd
->osh
, (volatile uint8
*)(bus
->tcm
+ offset
));
4791 DHD_BUS_BAR1_SWITCH_UNLOCK(bus
, flags
);
4796 dhdpcie_bus_rtcm16(dhd_bus_t
*bus
, ulong offset
)
4798 volatile uint16 data
;
4801 if (bus
->is_linkdown
) {
4802 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
4807 DHD_BUS_BAR1_SWITCH_LOCK(bus
, flags
);
4809 offset
= dhdpcie_bus_chkandshift_bpoffset(bus
, offset
);
4811 data
= R_REG(bus
->dhd
->osh
, (volatile uint16
*)(bus
->tcm
+ offset
));
4813 DHD_BUS_BAR1_SWITCH_UNLOCK(bus
, flags
);
4818 dhdpcie_bus_rtcm32(dhd_bus_t
*bus
, ulong offset
)
4820 volatile uint32 data
;
4823 if (bus
->is_linkdown
) {
4824 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
4829 DHD_BUS_BAR1_SWITCH_LOCK(bus
, flags
);
4831 offset
= dhdpcie_bus_chkandshift_bpoffset(bus
, offset
);
4833 data
= R_REG(bus
->dhd
->osh
, (volatile uint32
*)(bus
->tcm
+ offset
));
4835 DHD_BUS_BAR1_SWITCH_UNLOCK(bus
, flags
);
4839 #ifdef DHD_SUPPORT_64BIT
4841 dhdpcie_bus_rtcm64(dhd_bus_t
*bus
, ulong offset
)
4843 volatile uint64 data
;
4846 if (bus
->is_linkdown
) {
4847 DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__
));
4852 DHD_BUS_BAR1_SWITCH_LOCK(bus
, flags
);
4854 offset
= dhdpcie_bus_chkandshift_bpoffset(bus
, offset
);
4856 data
= R_REG(bus
->dhd
->osh
, (volatile uint64
*)(bus
->tcm
+ offset
));
4858 DHD_BUS_BAR1_SWITCH_UNLOCK(bus
, flags
);
4861 #endif /* DHD_SUPPORT_64BIT */
4863 /** A snippet of dongle memory is shared between host and dongle */
4865 dhd_bus_cmn_writeshared(dhd_bus_t
*bus
, void *data
, uint32 len
, uint8 type
, uint16 ringid
)
4868 ulong addr
; /* dongle address */
4870 DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__
, type
, len
));
4872 if (bus
->is_linkdown
) {
4873 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
4877 if (MULTIBP_ENAB(bus
->sih
)) {
4878 dhd_bus_pcie_pwr_req(bus
);
4881 case D2H_DMA_SCRATCH_BUF
:
4882 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, host_dma_scratch_buffer
);
4883 long_data
= HTOL64(*(uint64
*)data
);
4884 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4885 if (dhd_msg_level
& DHD_INFO_VAL
) {
4886 prhex(__FUNCTION__
, data
, len
);
4890 case D2H_DMA_SCRATCH_BUF_LEN
:
4891 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, host_dma_scratch_buffer_len
);
4892 dhdpcie_bus_wtcm32(bus
, addr
, (uint32
) HTOL32(*(uint32
*)data
));
4893 if (dhd_msg_level
& DHD_INFO_VAL
) {
4894 prhex(__FUNCTION__
, data
, len
);
4898 case H2D_DMA_INDX_WR_BUF
:
4899 long_data
= HTOL64(*(uint64
*)data
);
4900 addr
= DHD_RING_INFO_MEMBER_ADDR(bus
, h2d_w_idx_hostaddr
);
4901 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4902 if (dhd_msg_level
& DHD_INFO_VAL
) {
4903 prhex(__FUNCTION__
, data
, len
);
4907 case H2D_DMA_INDX_RD_BUF
:
4908 long_data
= HTOL64(*(uint64
*)data
);
4909 addr
= DHD_RING_INFO_MEMBER_ADDR(bus
, h2d_r_idx_hostaddr
);
4910 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4911 if (dhd_msg_level
& DHD_INFO_VAL
) {
4912 prhex(__FUNCTION__
, data
, len
);
4916 case D2H_DMA_INDX_WR_BUF
:
4917 long_data
= HTOL64(*(uint64
*)data
);
4918 addr
= DHD_RING_INFO_MEMBER_ADDR(bus
, d2h_w_idx_hostaddr
);
4919 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4920 if (dhd_msg_level
& DHD_INFO_VAL
) {
4921 prhex(__FUNCTION__
, data
, len
);
4925 case D2H_DMA_INDX_RD_BUF
:
4926 long_data
= HTOL64(*(uint64
*)data
);
4927 addr
= DHD_RING_INFO_MEMBER_ADDR(bus
, d2h_r_idx_hostaddr
);
4928 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4929 if (dhd_msg_level
& DHD_INFO_VAL
) {
4930 prhex(__FUNCTION__
, data
, len
);
4934 case H2D_IFRM_INDX_WR_BUF
:
4935 long_data
= HTOL64(*(uint64
*)data
);
4936 addr
= DHD_RING_INFO_MEMBER_ADDR(bus
, ifrm_w_idx_hostaddr
);
4937 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4938 if (dhd_msg_level
& DHD_INFO_VAL
) {
4939 prhex(__FUNCTION__
, data
, len
);
4943 case RING_ITEM_LEN
:
4944 addr
= DHD_RING_MEM_MEMBER_ADDR(bus
, ringid
, len_items
);
4945 dhdpcie_bus_wtcm16(bus
, addr
, (uint16
) HTOL16(*(uint16
*)data
));
4948 case RING_MAX_ITEMS
:
4949 addr
= DHD_RING_MEM_MEMBER_ADDR(bus
, ringid
, max_item
);
4950 dhdpcie_bus_wtcm16(bus
, addr
, (uint16
) HTOL16(*(uint16
*)data
));
4953 case RING_BUF_ADDR
:
4954 long_data
= HTOL64(*(uint64
*)data
);
4955 addr
= DHD_RING_MEM_MEMBER_ADDR(bus
, ringid
, base_addr
);
4956 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4957 if (dhd_msg_level
& DHD_INFO_VAL
) {
4958 prhex(__FUNCTION__
, data
, len
);
4963 addr
= bus
->ring_sh
[ringid
].ring_state_w
;
4964 dhdpcie_bus_wtcm16(bus
, addr
, (uint16
) HTOL16(*(uint16
*)data
));
4968 addr
= bus
->ring_sh
[ringid
].ring_state_r
;
4969 dhdpcie_bus_wtcm16(bus
, addr
, (uint16
) HTOL16(*(uint16
*)data
));
4973 addr
= bus
->d2h_mb_data_ptr_addr
;
4974 dhdpcie_bus_wtcm32(bus
, addr
, (uint32
) HTOL32(*(uint32
*)data
));
4978 addr
= bus
->h2d_mb_data_ptr_addr
;
4979 dhdpcie_bus_wtcm32(bus
, addr
, (uint32
) HTOL32(*(uint32
*)data
));
4982 case HOST_API_VERSION
:
4983 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, host_cap
);
4984 dhdpcie_bus_wtcm32(bus
, addr
, (uint32
) HTOL32(*(uint32
*)data
));
4987 case DNGL_TO_HOST_TRAP_ADDR
:
4988 long_data
= HTOL64(*(uint64
*)data
);
4989 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, host_trap_addr
);
4990 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*) &long_data
, len
);
4991 DHD_INFO(("Wrote trap addr:0x%x\n", (uint32
) HTOL32(*(uint32
*)data
)));
4995 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, host_scb_addr
);
4996 #ifdef DHD_SUPPORT_64BIT
4997 dhdpcie_bus_wtcm64(bus
, addr
, (uint64
) HTOL64(*(uint64
*)data
));
4998 #else /* !DHD_SUPPORT_64BIT */
4999 dhdpcie_bus_wtcm32(bus
, addr
, *((uint32
*)data
));
5000 #endif /* DHD_SUPPORT_64BIT */
5001 DHD_INFO(("Wrote host_scb_addr:0x%x\n",
5002 (uint32
) HTOL32(*(uint32
*)data
)));
5008 if (MULTIBP_ENAB(bus
->sih
)) {
5009 dhd_bus_pcie_pwr_req_clear(bus
);
5011 } /* dhd_bus_cmn_writeshared */
5013 /** A snippet of dongle memory is shared between host and dongle */
5015 dhd_bus_cmn_readshared(dhd_bus_t
*bus
, void* data
, uint8 type
, uint16 ringid
)
5017 ulong addr
; /* dongle address */
5019 if (MULTIBP_ENAB(bus
->sih
)) {
5020 dhd_bus_pcie_pwr_req(bus
);
5024 addr
= bus
->ring_sh
[ringid
].ring_state_w
;
5025 *(uint16
*)data
= LTOH16(dhdpcie_bus_rtcm16(bus
, addr
));
5029 addr
= bus
->ring_sh
[ringid
].ring_state_r
;
5030 *(uint16
*)data
= LTOH16(dhdpcie_bus_rtcm16(bus
, addr
));
5033 case TOTAL_LFRAG_PACKET_CNT
:
5034 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, total_lfrag_pkt_cnt
);
5035 *(uint16
*)data
= LTOH16(dhdpcie_bus_rtcm16(bus
, addr
));
5039 addr
= bus
->h2d_mb_data_ptr_addr
;
5040 *(uint32
*)data
= LTOH32(dhdpcie_bus_rtcm32(bus
, addr
));
5044 addr
= bus
->d2h_mb_data_ptr_addr
;
5045 *(uint32
*)data
= LTOH32(dhdpcie_bus_rtcm32(bus
, addr
));
5048 case MAX_HOST_RXBUFS
:
5049 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, max_host_rxbufs
);
5050 *(uint16
*)data
= LTOH16(dhdpcie_bus_rtcm16(bus
, addr
));
5054 addr
= DHD_PCIE_SHARED_MEMBER_ADDR(bus
, host_scb_size
);
5055 *(uint32
*)data
= LTOH32(dhdpcie_bus_rtcm32(bus
, addr
));
5061 if (MULTIBP_ENAB(bus
->sih
)) {
5062 dhd_bus_pcie_pwr_req_clear(bus
);
5066 uint32
dhd_bus_get_sharedflags(dhd_bus_t
*bus
)
5068 return ((pciedev_shared_t
*)bus
->pcie_sh
)->flags
;
5072 dhd_bus_clearcounts(dhd_pub_t
*dhdp
)
5077 * @param params input buffer, NULL for 'set' operation.
5078 * @param plen length of 'params' buffer, 0 for 'set' operation.
5079 * @param arg output buffer
5082 dhd_bus_iovar_op(dhd_pub_t
*dhdp
, const char *name
,
5083 void *params
, uint plen
, void *arg
, uint len
, bool set
)
5085 dhd_bus_t
*bus
= dhdp
->bus
;
5086 const bcm_iovar_t
*vi
= NULL
;
5087 int bcmerror
= BCME_UNSUPPORTED
;
5091 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
5097 /* Get MUST have return space */
5098 ASSERT(set
|| (arg
&& len
));
5099 if (!(set
|| (arg
&& len
)))
5102 /* Set does NOT take qualifiers */
5103 ASSERT(!set
|| (!params
&& !plen
));
5104 if (!(!set
|| (!params
&& !plen
)))
5107 DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__
,
5108 name
, (set
? "set" : "get"), len
, plen
));
5110 /* Look up var locally; if not found pass to host driver */
5111 if ((vi
= bcm_iovar_lookup(dhdpcie_iovars
, name
)) == NULL
) {
5115 if (MULTIBP_ENAB(bus
->sih
)) {
5116 if (vi
->flags
& DHD_IOVF_PWRREQ_BYPASS
) {
5117 DHD_ERROR(("%s: Bypass pwr request\n", __FUNCTION__
));
5119 dhd_bus_pcie_pwr_req(bus
);
5123 /* set up 'params' pointer in case this is a set command so that
5124 * the convenience int and bool code can be common to set and get
5126 if (params
== NULL
) {
5131 if (vi
->type
== IOVT_VOID
)
5133 else if (vi
->type
== IOVT_BUFFER
)
5136 /* all other types are integer sized */
5137 val_size
= sizeof(int);
5139 actionid
= set
? IOV_SVAL(vi
->varid
) : IOV_GVAL(vi
->varid
);
5140 bcmerror
= dhdpcie_bus_doiovar(bus
, vi
, actionid
, name
, params
, plen
, arg
, len
, val_size
);
5143 /* In DEVRESET_QUIESCE/DEVRESET_ON,
5144 * this includes dongle re-attach which initialize pwr_req_ref count to 0 and
5145 * causes pwr_req_ref count miss-match in pwr req clear function and hang.
5146 * In this case, bypass pwr req clear.
5148 if (bcmerror
== BCME_DNGL_DEVRESET
) {
5151 if (MULTIBP_ENAB(bus
->sih
)) {
5153 if (vi
->flags
& DHD_IOVF_PWRREQ_BYPASS
) {
5154 DHD_ERROR(("%s: Bypass pwr request clear\n", __FUNCTION__
));
5156 dhd_bus_pcie_pwr_req_clear(bus
);
5162 } /* dhd_bus_iovar_op */
5165 #include <bcm_buzzz.h>
5168 dhd_buzzz_dump_cntrs(char *p
, uint32
*core
, uint32
*log
,
5169 const int num_counters
)
5173 uint32 curr
[BCM_BUZZZ_COUNTERS_MAX
], prev
[BCM_BUZZZ_COUNTERS_MAX
];
5174 uint32 delta
[BCM_BUZZZ_COUNTERS_MAX
];
5176 /* Compute elapsed counter values per counter event type */
5177 for (ctr
= 0U; ctr
< num_counters
; ctr
++) {
5178 prev
[ctr
] = core
[ctr
];
5180 core
[ctr
] = curr
[ctr
]; /* saved for next log */
5182 if (curr
[ctr
] < prev
[ctr
])
5183 delta
[ctr
] = curr
[ctr
] + (~0U - prev
[ctr
]);
5185 delta
[ctr
] = (curr
[ctr
] - prev
[ctr
]);
5187 bytes
+= sprintf(p
+ bytes
, "%12u ", delta
[ctr
]);
5193 typedef union cm3_cnts
{ /* export this in bcm_buzzz.h */
5205 dhd_bcm_buzzz_dump_cntrs6(char *p
, uint32
*core
, uint32
*log
)
5209 uint32 cyccnt
, instrcnt
;
5210 cm3_cnts_t cm3_cnts
;
5213 { /* 32bit cyccnt */
5214 uint32 curr
, prev
, delta
;
5215 prev
= core
[0]; curr
= *log
++; core
[0] = curr
;
5217 delta
= curr
+ (~0U - prev
);
5219 delta
= (curr
- prev
);
5221 bytes
+= sprintf(p
+ bytes
, "%12u ", delta
);
5225 { /* Extract the 4 cnts: cpi, exc, sleep and lsu */
5228 cm3_cnts_t curr
, prev
, delta
;
5229 prev
.u32
= core
[1]; curr
.u32
= * log
++; core
[1] = curr
.u32
;
5230 for (i
= 0; i
< 4; i
++) {
5231 if (curr
.u8
[i
] < prev
.u8
[i
])
5232 delta
.u8
[i
] = curr
.u8
[i
] + (max8
- prev
.u8
[i
]);
5234 delta
.u8
[i
] = (curr
.u8
[i
] - prev
.u8
[i
]);
5235 bytes
+= sprintf(p
+ bytes
, "%4u ", delta
.u8
[i
]);
5237 cm3_cnts
.u32
= delta
.u32
;
5240 { /* Extract the foldcnt from arg0 */
5241 uint8 curr
, prev
, delta
, max8
= ~0;
5242 bcm_buzzz_arg0_t arg0
; arg0
.u32
= *log
;
5243 prev
= core
[2]; curr
= arg0
.klog
.cnt
; core
[2] = curr
;
5245 delta
= curr
+ (max8
- prev
);
5247 delta
= (curr
- prev
);
5248 bytes
+= sprintf(p
+ bytes
, "%4u ", delta
);
5252 instrcnt
= cyccnt
- (cm3_cnts
.u8
[0] + cm3_cnts
.u8
[1] + cm3_cnts
.u8
[2]
5253 + cm3_cnts
.u8
[3]) + foldcnt
;
5254 if (instrcnt
> 0xFFFFFF00)
5255 bytes
+= sprintf(p
+ bytes
, "[%10s] ", "~");
5257 bytes
+= sprintf(p
+ bytes
, "[%10u] ", instrcnt
);
5262 dhd_buzzz_dump_log(char *p
, uint32
*core
, uint32
*log
, bcm_buzzz_t
*buzzz
)
5265 bcm_buzzz_arg0_t arg0
;
5266 static uint8
* fmt
[] = BCM_BUZZZ_FMT_STRINGS
;
5268 if (buzzz
->counters
== 6) {
5269 bytes
+= dhd_bcm_buzzz_dump_cntrs6(p
, core
, log
);
5270 log
+= 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
5272 bytes
+= dhd_buzzz_dump_cntrs(p
, core
, log
, buzzz
->counters
);
5273 log
+= buzzz
->counters
; /* (N x 32bit) CR4=3, CA7=4 */
5276 /* Dump the logged arguments using the registered formats */
5279 switch (arg0
.klog
.args
) {
5281 bytes
+= sprintf(p
+ bytes
, fmt
[arg0
.klog
.id
]);
5285 uint32 arg1
= *log
++;
5286 bytes
+= sprintf(p
+ bytes
, fmt
[arg0
.klog
.id
], arg1
);
5292 arg1
= *log
++; arg2
= *log
++;
5293 bytes
+= sprintf(p
+ bytes
, fmt
[arg0
.klog
.id
], arg1
, arg2
);
5298 uint32 arg1
, arg2
, arg3
;
5299 arg1
= *log
++; arg2
= *log
++; arg3
= *log
++;
5300 bytes
+= sprintf(p
+ bytes
, fmt
[arg0
.klog
.id
], arg1
, arg2
, arg3
);
5305 uint32 arg1
, arg2
, arg3
, arg4
;
5306 arg1
= *log
++; arg2
= *log
++;
5307 arg3
= *log
++; arg4
= *log
++;
5308 bytes
+= sprintf(p
+ bytes
, fmt
[arg0
.klog
.id
], arg1
, arg2
, arg3
, arg4
);
5312 printf("%s: Maximum one argument supported\n", __FUNCTION__
);
5316 bytes
+= sprintf(p
+ bytes
, "\n");
5321 void dhd_buzzz_dump(bcm_buzzz_t
*buzzz_p
, void *buffer_p
, char *p
)
5324 uint32 total
, part1
, part2
, log_sz
, core
[BCM_BUZZZ_COUNTERS_MAX
];
5327 for (i
= 0; i
< BCM_BUZZZ_COUNTERS_MAX
; i
++) {
5331 log_sz
= buzzz_p
->log_sz
;
5333 part1
= ((uint32
)buzzz_p
->cur
- (uint32
)buzzz_p
->log
) / log_sz
;
5335 if (buzzz_p
->wrap
== TRUE
) {
5336 part2
= ((uint32
)buzzz_p
->end
- (uint32
)buzzz_p
->cur
) / log_sz
;
5337 total
= (buzzz_p
->buffer_sz
- BCM_BUZZZ_LOGENTRY_MAXSZ
) / log_sz
;
5340 total
= buzzz_p
->count
;
5344 printf("%s: bcm_buzzz_dump total<%u> done\n", __FUNCTION__
, total
);
5347 printf("%s: bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__
,
5348 total
, part2
, part1
);
5351 if (part2
) { /* with wrap */
5352 log
= (void*)((size_t)buffer_p
+ (buzzz_p
->cur
- buzzz_p
->log
));
5353 while (part2
--) { /* from cur to end : part2 */
5355 dhd_buzzz_dump_log(p
, core
, (uint32
*)log
, buzzz_p
);
5357 log
= (void*)((size_t)log
+ buzzz_p
->log_sz
);
5361 log
= (void*)buffer_p
;
5364 dhd_buzzz_dump_log(p
, core
, (uint32
*)log
, buzzz_p
);
5366 log
= (void*)((size_t)log
+ buzzz_p
->log_sz
);
5369 printf("%s: bcm_buzzz_dump done.\n", __FUNCTION__
);
5372 int dhd_buzzz_dump_dngl(dhd_bus_t
*bus
)
5374 bcm_buzzz_t
* buzzz_p
= NULL
;
5375 void * buffer_p
= NULL
;
5376 char * page_p
= NULL
;
5377 pciedev_shared_t
*sh
;
5380 if (bus
->dhd
->busstate
!= DHD_BUS_DATA
) {
5381 return BCME_UNSUPPORTED
;
5383 if ((page_p
= (char *)MALLOC(bus
->dhd
->osh
, 4096)) == NULL
) {
5384 printf("%s: Page memory allocation failure\n", __FUNCTION__
);
5387 if ((buzzz_p
= MALLOC(bus
->dhd
->osh
, sizeof(bcm_buzzz_t
))) == NULL
) {
5388 printf("%s: BCM BUZZZ memory allocation failure\n", __FUNCTION__
);
5392 ret
= dhdpcie_readshared(bus
);
5394 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__
));
5400 DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__
, sh
->buzz_dbg_ptr
));
5402 if (sh
->buzz_dbg_ptr
!= 0U) { /* Fetch and display dongle BUZZZ Trace */
5404 dhdpcie_bus_membytes(bus
, FALSE
, (ulong
)sh
->buzz_dbg_ptr
,
5405 (uint8
*)buzzz_p
, sizeof(bcm_buzzz_t
));
5407 printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
5408 "count<%u> status<%u> wrap<%u>\n"
5409 "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
5410 (int)sh
->buzz_dbg_ptr
,
5411 (int)buzzz_p
->log
, (int)buzzz_p
->cur
, (int)buzzz_p
->end
,
5412 buzzz_p
->count
, buzzz_p
->status
, buzzz_p
->wrap
,
5413 buzzz_p
->cpu_idcode
, buzzz_p
->counters
, buzzz_p
->group
,
5414 buzzz_p
->buffer_sz
, buzzz_p
->log_sz
);
5416 if (buzzz_p
->count
== 0) {
5417 printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__
);
5421 /* Allocate memory for trace buffer and format strings */
5422 buffer_p
= MALLOC(bus
->dhd
->osh
, buzzz_p
->buffer_sz
);
5423 if (buffer_p
== NULL
) {
5424 printf("%s: Buffer memory allocation failure\n", __FUNCTION__
);
5428 /* Fetch the trace. format strings are exported via bcm_buzzz.h */
5429 dhdpcie_bus_membytes(bus
, FALSE
, (uint32
)buzzz_p
->log
, /* Trace */
5430 (uint8
*)buffer_p
, buzzz_p
->buffer_sz
);
5432 /* Process and display the trace using formatted output */
5436 for (ctr
= 0; ctr
< buzzz_p
->counters
; ctr
++) {
5437 printf("<Evt[%02X]> ", buzzz_p
->eventid
[ctr
]);
5439 printf("<code execution point>\n");
5442 dhd_buzzz_dump(buzzz_p
, buffer_p
, page_p
);
5444 printf("%s: ----- End of dongle BCM BUZZZ Trace -----\n\n", __FUNCTION__
);
5446 MFREE(bus
->dhd
->osh
, buffer_p
, buzzz_p
->buffer_sz
); buffer_p
= NULL
;
5451 if (page_p
) MFREE(bus
->dhd
->osh
, page_p
, 4096);
5452 if (buzzz_p
) MFREE(bus
->dhd
->osh
, buzzz_p
, sizeof(bcm_buzzz_t
));
5453 if (buffer_p
) MFREE(bus
->dhd
->osh
, buffer_p
, buzzz_p
->buffer_sz
);
5457 #endif /* BCM_BUZZZ */
5459 #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \
5460 ((sih)->buscoretype == PCIE2_CORE_ID))
5463 dhdpcie_enum_reg_init(dhd_bus_t
*bus
)
5465 /* initialize Function control register (clear bit 4) to HW init value */
5466 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5467 OFFSETOF(sbpcieregs_t
, ftn_ctrl
.control
), ~0,
5468 PCIE_CPLCA_ENABLE
| PCIE_DLY_PERST_TO_COE
);
5471 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5472 OFFSETOF(sbpcieregs_t
, ftn_ctrl
.intmask
), ~0, 0);
5473 /* clear IntStatus */
5474 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5475 OFFSETOF(sbpcieregs_t
, ftn_ctrl
.intstatus
), ~0,
5476 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5477 OFFSETOF(sbpcieregs_t
, ftn_ctrl
.intstatus
), 0, 0));
5479 /* clear MSIVector */
5480 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5481 OFFSETOF(sbpcieregs_t
, ftn_ctrl
.msi_vector
), ~0, 0);
5482 /* clear MSIIntMask */
5483 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5484 OFFSETOF(sbpcieregs_t
, ftn_ctrl
.msi_intmask
), ~0, 0);
5485 /* clear MSIIntStatus */
5486 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5487 OFFSETOF(sbpcieregs_t
, ftn_ctrl
.msi_intstatus
), ~0,
5488 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5489 OFFSETOF(sbpcieregs_t
, ftn_ctrl
.msi_intstatus
), 0, 0));
5491 /* clear PowerIntMask */
5492 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5493 OFFSETOF(sbpcieregs_t
, ftn_ctrl
.pwr_intmask
), ~0, 0);
5494 /* clear PowerIntStatus */
5495 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5496 OFFSETOF(sbpcieregs_t
, ftn_ctrl
.pwr_intstatus
), ~0,
5497 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5498 OFFSETOF(sbpcieregs_t
, ftn_ctrl
.pwr_intstatus
), 0, 0));
5500 /* clear MailboxIntMask */
5501 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5502 OFFSETOF(sbpcieregs_t
, ftn_ctrl
.mbox_intmask
), ~0, 0);
5503 /* clear MailboxInt */
5504 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5505 OFFSETOF(sbpcieregs_t
, ftn_ctrl
.mbox_intstatus
), ~0,
5506 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5507 OFFSETOF(sbpcieregs_t
, ftn_ctrl
.mbox_intstatus
), 0, 0));
5511 dhd_bus_perform_flr(dhd_bus_t
*bus
, bool force_fail
)
5517 DHD_ERROR(("******** Perform FLR ********\n"));
5519 /* Kernel Panic for 4378Ax during traptest/devreset4 reload case:
5520 * For 4378Ax, enum registers will not be reset with FLR (producer index WAR).
5521 * So, the MailboxIntMask is left as 0xffff during fw boot-up,
5522 * and the fw trap handling during fw boot causes Kernel Panic.
5523 * Jira: SWWLAN-212578: [4378A0 PCIe DVT] :
5524 * Kernel Panic seen in F0 FLR with BT Idle/Traffic/DMA
5526 if (bus
->sih
&& PCIE_ENUM_RESET_WAR_ENAB(bus
->sih
->buscorerev
)) {
5527 if (bus
->pcie_mailbox_mask
!= 0) {
5528 dhdpcie_bus_intr_disable(bus
);
5530 /* initialize F0 enum registers before FLR for rev66/67 */
5531 dhdpcie_enum_reg_init(bus
);
5534 /* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */
5535 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_DEVICE_CAPABILITY
, sizeof(val
));
5536 flr_capab
= val
& (1 << PCIE_FLR_CAPAB_BIT
);
5537 DHD_INFO(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
5538 PCIE_CFG_DEVICE_CAPABILITY
, val
, flr_capab
));
5540 DHD_ERROR(("Chip does not support FLR\n"));
5541 return BCME_UNSUPPORTED
;
5544 /* Save pcie config space */
5545 DHD_INFO(("Save Pcie Config Space\n"));
5546 DHD_PCIE_CONFIG_SAVE(bus
);
5548 /* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */
5549 DHD_INFO(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
5550 PCIE_FUNCTION_LEVEL_RESET_BIT
, PCIE_CFG_DEVICE_CONTROL
));
5551 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_DEVICE_CONTROL
, sizeof(val
));
5552 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL
, val
));
5553 val
= val
| (1 << PCIE_FUNCTION_LEVEL_RESET_BIT
);
5554 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL
, val
));
5555 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIE_CFG_DEVICE_CONTROL
, sizeof(val
), val
);
5557 /* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */
5558 DHD_INFO(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY
));
5560 CAN_SLEEP() ? OSL_SLEEP(DHD_FUNCTION_LEVEL_RESET_DELAY
) :
5561 OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY
* USEC_PER_MSEC
);
5564 DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
5565 PCIE_SSRESET_DISABLE_BIT
, PCIE_CFG_SUBSYSTEM_CONTROL
));
5566 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
));
5567 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL
,
5569 val
= val
| (1 << PCIE_SSRESET_DISABLE_BIT
);
5570 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL
,
5572 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
), val
);
5574 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
));
5575 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL
,
5579 /* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */
5580 DHD_INFO(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
5581 PCIE_FUNCTION_LEVEL_RESET_BIT
, PCIE_CFG_DEVICE_CONTROL
));
5582 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_DEVICE_CONTROL
, sizeof(val
));
5583 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL
, val
));
5584 val
= val
& ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT
);
5585 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL
, val
));
5586 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIE_CFG_DEVICE_CONTROL
, sizeof(val
), val
);
5588 /* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
5589 DHD_INFO(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
5590 "is cleared\n", PCIE_SSRESET_STATUS_BIT
, PCIE_CFG_SUBSYSTEM_CONTROL
));
5592 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
));
5593 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
5594 PCIE_CFG_SUBSYSTEM_CONTROL
, val
));
5595 val
= val
& (1 << PCIE_SSRESET_STATUS_BIT
);
5596 OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY
);
5597 } while (val
&& (retry
++ < DHD_SSRESET_STATUS_RETRIES
));
5600 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5601 PCIE_CFG_SUBSYSTEM_CONTROL
, PCIE_SSRESET_STATUS_BIT
));
5602 /* User has to fire the IOVAR again, if force_fail is needed */
5604 bus
->flr_force_fail
= FALSE
;
5605 DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__
));
5607 return BCME_DONGLE_DOWN
;
5610 /* Restore pcie config space */
5611 DHD_INFO(("Restore Pcie Config Space\n"));
5612 DHD_PCIE_CONFIG_RESTORE(bus
);
5614 DHD_ERROR(("******** FLR Succedeed ********\n"));
5619 #define DHD_BP_RESET_ASPM_DISABLE_DELAY 500u /* usec */
5621 #define DHD_BP_RESET_STATUS_RETRY_DELAY 40u /* usec */
5622 #define DHD_BP_RESET_STATUS_RETRIES 50u
5624 #define PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT 10
5625 #define PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT 12
5628 dhd_bus_cfg_ss_ctrl_bp_reset(struct dhd_bus
*bus
)
5633 bool reset_stat_bit
;
5635 DHD_ERROR(("******** Perform BP reset ********\n"));
5638 DHD_ERROR(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5639 PCIECFGREG_LINK_STATUS_CTRL
));
5640 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
, sizeof(val
));
5641 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
, val
));
5642 val
= val
& (~PCIE_ASPM_ENAB
);
5643 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
, val
));
5644 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
, sizeof(val
), val
);
5646 /* wait for delay usec */
5647 DHD_ERROR(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY
));
5648 OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY
);
5650 /* Set bp reset bit 10 of PCIE_CFG_SUBSYSTEM_CONTROL */
5651 DHD_ERROR(("Set PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT(%d)"
5652 " of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
5653 PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT
, PCIE_CFG_SUBSYSTEM_CONTROL
));
5654 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
));
5655 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL
, val
));
5656 val
= val
| (1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT
);
5657 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL
, val
));
5658 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
), val
);
5660 /* Wait till bp reset status bit 12 of PCIE_CFG_SUBSYSTEM_CONTROL is set */
5661 DHD_ERROR(("Wait till PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT(%d) of "
5662 "PCIE_CFG_SUBSYSTEM_CONTROL(0x%x) is set\n",
5663 PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT
, PCIE_CFG_SUBSYSTEM_CONTROL
));
5665 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
));
5666 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
5667 PCIE_CFG_SUBSYSTEM_CONTROL
, val
));
5668 reset_stat_bit
= val
& (1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT
);
5669 OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY
);
5670 } while (!reset_stat_bit
&& (retry
++ < DHD_BP_RESET_STATUS_RETRIES
));
5672 if (!reset_stat_bit
) {
5673 DHD_ERROR(("ERROR: reg=0x%x bit %d is not set\n",
5674 PCIE_CFG_SUBSYSTEM_CONTROL
,
5675 PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT
));
5680 /* Clear bp reset bit 10 of PCIE_CFG_SUBSYSTEM_CONTROL */
5681 DHD_ERROR(("Clear PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT(%d)"
5682 " of PCIECFGREG_SPROM_CTRL(0x%x)\n",
5683 PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT
, PCIE_CFG_SUBSYSTEM_CONTROL
));
5684 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
));
5685 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL
, val
));
5686 val
= val
& ~(1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_BIT
);
5687 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL
, val
));
5688 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
), val
);
5690 /* Wait till bp reset status bit 12 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
5691 DHD_ERROR(("Wait till PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT(%d) of "
5692 "PCIE_CFG_SUBSYSTEM_CONTROL(0x%x) is cleared\n",
5693 PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT
, PCIE_CFG_SUBSYSTEM_CONTROL
));
5695 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIE_CFG_SUBSYSTEM_CONTROL
, sizeof(val
));
5696 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
5697 PCIE_CFG_SUBSYSTEM_CONTROL
, val
));
5698 reset_stat_bit
= val
& (1 << PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT
);
5699 OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY
);
5700 } while (reset_stat_bit
&& (retry
++ < DHD_BP_RESET_STATUS_RETRIES
));
5702 if (reset_stat_bit
) {
5703 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5704 PCIE_CFG_SUBSYSTEM_CONTROL
,
5705 PCIE_CFG_SUBSYSTEM_CONTROL_BP_RESET_STATUS_BIT
));
5711 DHD_ERROR(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5712 PCIECFGREG_LINK_STATUS_CTRL
));
5713 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
, sizeof(val
));
5714 DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
, val
));
5715 val
= val
| (PCIE_ASPM_L1_ENAB
);
5716 DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
, val
));
5717 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
, sizeof(val
), val
);
5720 DHD_ERROR(("******** BP reset Failed ********\n"));
5722 DHD_ERROR(("******** BP reset Succedeed ********\n"));
5728 #define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT 10
5729 #define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT 21
5732 dhd_bus_cfg_sprom_ctrl_bp_reset(struct dhd_bus
*bus
)
5736 uint dar_clk_ctrl_status_reg
= DAR_CLK_CTRL(bus
->sih
->buscorerev
);
5740 DHD_ERROR(("******** Perform BP reset ********\n"));
5743 DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5744 PCIECFGREG_LINK_STATUS_CTRL
));
5745 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
, sizeof(val
));
5746 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
, val
));
5747 val
= val
& (~PCIE_ASPM_ENAB
);
5748 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
, val
));
5749 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
, sizeof(val
), val
);
5751 /* wait for delay usec */
5752 DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY
));
5753 OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY
);
5755 /* Set bit 10 of PCIECFGREG_SPROM_CTRL */
5756 DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n",
5757 PCIE_CFG_SPROM_CTRL_SB_RESET_BIT
, PCIECFGREG_SPROM_CTRL
));
5758 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIECFGREG_SPROM_CTRL
, sizeof(val
));
5759 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL
, val
));
5760 val
= val
| (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT
);
5761 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL
, val
));
5762 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIECFGREG_SPROM_CTRL
, sizeof(val
), val
);
5764 /* Wait till bit backplane reset is ASSERTED i,e
5765 * bit 10 of PCIECFGREG_SPROM_CTRL is cleared.
5766 * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid
5767 * else DAR register will read previous old value
5769 DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of "
5770 "PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n",
5771 PCIE_CFG_SPROM_CTRL_SB_RESET_BIT
, PCIECFGREG_SPROM_CTRL
));
5773 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIECFGREG_SPROM_CTRL
, sizeof(val
));
5774 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL
, val
));
5775 cond
= val
& (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT
);
5776 OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY
);
5777 } while (cond
&& (retry
++ < DHD_BP_RESET_STATUS_RETRIES
));
5780 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5781 PCIECFGREG_SPROM_CTRL
, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT
));
5786 /* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */
5787 DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of "
5788 "dar_clk_ctrl_status_reg(0x%x) is cleared\n",
5789 PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT
, dar_clk_ctrl_status_reg
));
5791 val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
5792 dar_clk_ctrl_status_reg
, 0, 0);
5793 DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n",
5794 dar_clk_ctrl_status_reg
, val
));
5795 cond
= val
& (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT
);
5796 OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY
);
5797 } while (cond
&& (retry
++ < DHD_BP_RESET_STATUS_RETRIES
));
5800 DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5801 dar_clk_ctrl_status_reg
, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT
));
5807 DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5808 PCIECFGREG_LINK_STATUS_CTRL
));
5809 val
= OSL_PCI_READ_CONFIG(bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
, sizeof(val
));
5810 DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
, val
));
5811 val
= val
| (PCIE_ASPM_L1_ENAB
);
5812 DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL
, val
));
5813 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIECFGREG_LINK_STATUS_CTRL
, sizeof(val
), val
);
5815 DHD_ERROR(("******** BP reset Succedeed ********\n"));
5821 dhd_bus_devreset(dhd_pub_t
*dhdp
, uint8 flag
)
5823 dhd_bus_t
*bus
= dhdp
->bus
;
5825 unsigned long flags
;
5826 int retry
= POWERUP_MAX_RETRY
;
5828 if (flag
== TRUE
) { /* Turn off WLAN */
5829 /* Removing Power */
5830 DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__
));
5832 /* wait for other contexts to finish -- if required a call
5833 * to OSL_DELAY for 1s can be added to give other contexts
5834 * a chance to finish
5836 dhdpcie_advertise_bus_cleanup(bus
->dhd
);
5838 if (bus
->dhd
->busstate
!= DHD_BUS_DOWN
) {
5839 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5840 atomic_set(&bus
->dhd
->block_bus
, TRUE
);
5841 dhd_flush_rx_tx_wq(bus
->dhd
);
5842 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5844 #ifdef BCMPCIE_OOB_HOST_WAKE
5845 /* Clean up any pending host wake IRQ */
5846 dhd_bus_oob_intr_set(bus
->dhd
, FALSE
);
5847 dhd_bus_oob_intr_unregister(bus
->dhd
);
5848 #endif /* BCMPCIE_OOB_HOST_WAKE */
5849 dhd_os_wd_timer(dhdp
, 0);
5850 dhd_bus_stop(bus
, TRUE
);
5852 dhdpcie_bus_intr_disable(bus
);
5853 dhdpcie_free_irq(bus
);
5855 dhd_deinit_bus_lp_state_lock(bus
);
5856 dhd_deinit_bar1_switch_lock(bus
);
5857 dhd_deinit_backplane_access_lock(bus
);
5858 dhd_deinit_pwr_req_lock(bus
);
5859 dhd_bus_release_dongle(bus
);
5860 dhdpcie_bus_free_resource(bus
);
5861 bcmerror
= dhdpcie_bus_disable_device(bus
);
5863 DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
5864 __FUNCTION__
, bcmerror
));
5865 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5866 atomic_set(&bus
->dhd
->block_bus
, FALSE
);
5867 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5869 /* Clean up protocol data after Bus Master Enable bit clear
5870 * so that host can safely unmap DMA and remove the allocated buffers
5871 * from the PKTID MAP. Some Applicantion Processors supported
5872 * System MMU triggers Kernel panic when they detect to attempt to
5873 * DMA-unmapped memory access from the devices which use the
5874 * System MMU. Therefore, Kernel panic can be happened since it is
5875 * possible that dongle can access to DMA-unmapped memory after
5876 * calling the dhd_prot_reset().
5877 * For this reason, the dhd_prot_reset() and dhd_clear() functions
5878 * should be located after the dhdpcie_bus_disable_device().
5880 dhd_prot_reset(dhdp
);
5881 /* XXX Reset dhd_pub_t instance to initial status
5882 * for built-in type driver
5886 bcmerror
= dhdpcie_bus_stop_host_dev(bus
);
5888 DHD_ERROR(("%s: dhdpcie_bus_stop host_dev failed: %d\n",
5889 __FUNCTION__
, bcmerror
));
5890 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5891 atomic_set(&bus
->dhd
->block_bus
, FALSE
);
5892 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5896 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
5897 DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__
));
5898 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
5899 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
5900 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5901 atomic_set(&bus
->dhd
->block_bus
, FALSE
);
5902 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5905 dhdpcie_free_irq(bus
);
5907 #ifdef BCMPCIE_OOB_HOST_WAKE
5908 /* Clean up any pending host wake IRQ */
5909 dhd_bus_oob_intr_set(bus
->dhd
, FALSE
);
5910 dhd_bus_oob_intr_unregister(bus
->dhd
);
5911 #endif /* BCMPCIE_OOB_HOST_WAKE */
5912 dhd_dpc_kill(bus
->dhd
);
5913 if (!bus
->no_bus_init
) {
5914 dhd_bus_release_dongle(bus
);
5915 dhdpcie_bus_free_resource(bus
);
5916 bcmerror
= dhdpcie_bus_disable_device(bus
);
5918 DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
5919 __FUNCTION__
, bcmerror
));
5922 /* Clean up protocol data after Bus Master Enable bit clear
5923 * so that host can safely unmap DMA and remove the allocated
5924 * buffers from the PKTID MAP. Some Applicantion Processors
5925 * supported System MMU triggers Kernel panic when they detect
5926 * to attempt to DMA-unmapped memory access from the devices
5927 * which use the System MMU.
5928 * Therefore, Kernel panic can be happened since it is possible
5929 * that dongle can access to DMA-unmapped memory after calling
5930 * the dhd_prot_reset().
5931 * For this reason, the dhd_prot_reset() and dhd_clear() functions
5932 * should be located after the dhdpcie_bus_disable_device().
5934 dhd_prot_reset(dhdp
);
5935 /* XXX Reset dhd_pub_t instance to initial status
5936 * for built-in type driver
5940 bus
->no_bus_init
= FALSE
;
5943 bcmerror
= dhdpcie_bus_stop_host_dev(bus
);
5945 DHD_ERROR(("%s: dhdpcie_bus_stop_host_dev failed: %d\n",
5946 __FUNCTION__
, bcmerror
));
5951 bus
->dhd
->dongle_reset
= TRUE
;
5952 DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__
));
5954 } else { /* Turn on WLAN */
5955 if (bus
->dhd
->busstate
== DHD_BUS_DOWN
) {
5957 DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__
));
5958 /* PCIe RC Turn on */
5960 bcmerror
= dhdpcie_bus_start_host_dev(bus
);
5962 DHD_ERROR(("%s: dhdpcie_bus_start_host_dev OK\n",
5971 DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
5972 __FUNCTION__
, bcmerror
));
5975 #if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
5976 dhd_bus_aspm_enable_rc_ep(bus
, FALSE
);
5977 #endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
5978 bus
->is_linkdown
= 0;
5979 bus
->cto_triggered
= 0;
5980 #ifdef SUPPORT_LINKDOWN_RECOVERY
5981 bus
->read_shm_fail
= FALSE
;
5982 #endif /* SUPPORT_LINKDOWN_RECOVERY */
5983 bcmerror
= dhdpcie_bus_enable_device(bus
);
5985 DHD_ERROR(("%s: host configuration restore failed: %d\n",
5986 __FUNCTION__
, bcmerror
));
5990 bcmerror
= dhdpcie_bus_alloc_resource(bus
);
5992 DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
5993 __FUNCTION__
, bcmerror
));
5997 #ifdef FORCE_DONGLE_RESET_IN_DEVRESET_ON
5999 * This will be enabled from phone platforms to
6000 * reset dongle during Wifi ON
6002 dhdpcie_dongle_reset(bus
);
6003 #endif /* FORCE_DONGLE_RESET_IN_DEVRESET_ON */
6005 bcmerror
= dhdpcie_bus_dongle_attach(bus
);
6008 * As request irq is done later, till then CTO will not be detected,
6009 * so unconditionally dump cfg and DAR registers.
6011 dhd_bus_dump_imp_cfg_registers(bus
);
6012 dhd_bus_dump_dar_registers(bus
);
6013 /* Check if CTO has happened */
6014 if (PCIECTO_ENAB(bus
)) {
6015 /* read pci_intstatus */
6016 uint32 pci_intstatus
=
6017 dhdpcie_bus_cfg_read_dword(bus
, PCI_INT_STATUS
, 4);
6018 if (pci_intstatus
== (uint32
)-1) {
6019 DHD_ERROR(("%s : Invalid pci_intstatus(0x%x)\n",
6020 __FUNCTION__
, pci_intstatus
));
6021 } else if (pci_intstatus
& PCI_CTO_INT_MASK
) {
6022 DHD_ERROR(("%s: ##### CTO REPORTED BY DONGLE "
6023 "intstat=0x%x enab=%d\n", __FUNCTION__
,
6024 pci_intstatus
, bus
->cto_enable
));
6027 DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
6028 __FUNCTION__
, bcmerror
));
6032 bcmerror
= dhd_bus_request_irq(bus
);
6034 DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
6035 __FUNCTION__
, bcmerror
));
6039 bus
->dhd
->dongle_reset
= FALSE
;
6041 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
6042 dhd_irq_set_affinity(bus
->dhd
, cpumask_of(1));
6043 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
6045 bcmerror
= dhd_bus_start(dhdp
);
6047 DHD_ERROR(("%s: dhd_bus_start: %d\n",
6048 __FUNCTION__
, bcmerror
));
6049 #ifdef DEBUG_DNGL_INIT_FAIL
6050 #ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
6051 bus
->dhd
->memdump_enabled
= DUMP_MEMFILE
;
6053 /* Force panic as HAL will not be inited yet */
6054 bus
->dhd
->memdump_enabled
= DUMP_MEMONLY
;
6055 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
6056 if (bus
->dhd
->memdump_enabled
) {
6057 bus
->dhd
->memdump_type
= DUMP_TYPE_DONGLE_INIT_FAILURE
;
6058 dhdpcie_mem_dump(bus
);
6060 #endif /* DEBUG_DNGL_INIT_FAIL */
6064 /* Renabling watchdog which is disabled in dhdpcie_advertise_bus_cleanup */
6065 if (bus
->dhd
->dhd_watchdog_ms_backup
) {
6066 DHD_ERROR(("%s: Enabling wdtick after dhd init\n",
6068 dhd_os_wd_timer(bus
->dhd
, bus
->dhd
->dhd_watchdog_ms_backup
);
6070 DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__
));
6072 DHD_ERROR(("%s: what should we do here\n", __FUNCTION__
));
6079 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
6080 DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__
));
6081 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
6082 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
6088 dhdpcie_get_dma_ring_indices(dhd_pub_t
*dhd
)
6090 int h2d_support
, d2h_support
;
6092 d2h_support
= dhd
->dma_d2h_ring_upd_support
? 1 : 0;
6093 h2d_support
= dhd
->dma_h2d_ring_upd_support
? 1 : 0;
6094 return (d2h_support
| (h2d_support
<< 1));
6098 dhdpcie_set_dma_ring_indices(dhd_pub_t
*dhd
, int32 int_val
)
6101 /* Can change it only during initialization/FW download */
6102 if (dhd
->busstate
== DHD_BUS_DOWN
) {
6103 if ((int_val
> 3) || (int_val
< 0)) {
6104 DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
6105 bcmerror
= BCME_BADARG
;
6107 dhd
->dma_d2h_ring_upd_support
= (int_val
& 1) ? TRUE
: FALSE
;
6108 dhd
->dma_h2d_ring_upd_support
= (int_val
& 2) ? TRUE
: FALSE
;
6109 dhd
->dma_ring_upd_overwrite
= TRUE
;
6112 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6114 bcmerror
= BCME_NOTDOWN
;
6121 /* si_backplane_access() manages a shared resource - BAR0 mapping, hence its
6122 * calls shall be serialized. This wrapper function provides such serialization
6123 * and shall be used everywjer einstead of direct call of si_backplane_access()
6125 * Linux DHD driver calls si_backplane_access() from 3 three contexts: tasklet
6126 * (that may call dhdpcie_sssr_dump() from dhdpcie_sssr_dump()), iovar
6127 * ("sbreg", "membyres", etc.) and procfs (used by GDB proxy). To avoid race
6128 * conditions calls of si_backplane_access() shall be serialized. Presence of
6129 * tasklet context implies that serialization shall b ebased on spinlock. Hence
6130 * Linux implementation of dhd_pcie_backplane_access_[un]lock() is
6133 * Other platforms may add their own implementations of
6134 * dhd_pcie_backplane_access_[un]lock() as needed (e.g. if serialization is not
6135 * needed implementation might be empty)
6138 serialized_backplane_access(dhd_bus_t
*bus
, uint addr
, uint size
, uint
*val
, bool read
)
6141 unsigned long flags
;
6142 DHD_BACKPLANE_ACCESS_LOCK(bus
->backplane_access_lock
, flags
);
6143 ret
= si_backplane_access(bus
->sih
, addr
, size
, val
, read
);
6144 DHD_BACKPLANE_ACCESS_UNLOCK(bus
->backplane_access_lock
, flags
);
6149 * IOVAR handler of the DHD bus layer (in this case, the PCIe bus).
6151 * @param actionid e.g. IOV_SVAL(IOV_PCIEREG)
6152 * @param params input buffer
6153 * @param plen length in [bytes] of input buffer 'params'
6154 * @param arg output buffer
6155 * @param len length in [bytes] of output buffer 'arg'
6158 dhdpcie_bus_doiovar(dhd_bus_t
*bus
, const bcm_iovar_t
*vi
, uint32 actionid
, const char *name
,
6159 void *params
, uint plen
, void *arg
, uint len
, int val_size
)
6167 DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
6168 __FUNCTION__
, actionid
, name
, params
, plen
, arg
, len
, val_size
));
6170 if ((bcmerror
= bcm_iovar_lencheck(vi
, arg
, len
, IOV_ISSET(actionid
))) != 0)
6173 if (plen
>= sizeof(int_val
))
6174 bcopy(params
, &int_val
, sizeof(int_val
));
6176 if (plen
>= sizeof(int_val
) * 2)
6177 bcopy((void*)((uintptr
)params
+ sizeof(int_val
)), &int_val2
, sizeof(int_val2
));
6179 if (plen
>= sizeof(int_val
) * 3)
6180 bcopy((void*)((uintptr
)params
+ 2 * sizeof(int_val
)), &int_val3
, sizeof(int_val3
));
6182 bool_val
= (int_val
!= 0) ? TRUE
: FALSE
;
6184 /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
6185 if (bus
->dhd
->dongle_reset
&& !(actionid
== IOV_SVAL(IOV_DEVRESET
) ||
6186 actionid
== IOV_GVAL(IOV_DEVRESET
))) {
6187 bcmerror
= BCME_NOTREADY
;
6193 case IOV_SVAL(IOV_VARS
):
6194 bcmerror
= dhdpcie_downloadvars(bus
, arg
, len
);
6196 case IOV_SVAL(IOV_PCIE_LPBK
):
6197 bcmerror
= dhdpcie_bus_lpback_req(bus
, int_val
);
6200 case IOV_SVAL(IOV_PCIE_DMAXFER
): {
6201 dma_xfer_info_t
*dmaxfer
= (dma_xfer_info_t
*)arg
;
6205 if (dmaxfer
->version
!= DHD_DMAXFER_VERSION
)
6206 return BCME_VERSION
;
6207 if (dmaxfer
->length
!= sizeof(dma_xfer_info_t
)) {
6211 bcmerror
= dhdpcie_bus_dmaxfer_req(bus
, dmaxfer
->num_bytes
,
6212 dmaxfer
->src_delay
, dmaxfer
->dest_delay
,
6213 dmaxfer
->type
, dmaxfer
->core_num
,
6214 dmaxfer
->should_wait
);
6216 if (dmaxfer
->should_wait
&& bcmerror
>= 0) {
6217 bcmerror
= dhdmsgbuf_dmaxfer_status(bus
->dhd
, dmaxfer
);
6222 case IOV_GVAL(IOV_PCIE_DMAXFER
): {
6223 dma_xfer_info_t
*dmaxfer
= (dma_xfer_info_t
*)params
;
6226 if (dmaxfer
->version
!= DHD_DMAXFER_VERSION
)
6227 return BCME_VERSION
;
6228 if (dmaxfer
->length
!= sizeof(dma_xfer_info_t
)) {
6231 bcmerror
= dhdmsgbuf_dmaxfer_status(bus
->dhd
, dmaxfer
);
6236 case IOV_GVAL(IOV_INB_DW_ENABLE
):
6237 int_val
= bus
->inb_enabled
;
6238 bcopy(&int_val
, arg
, val_size
);
6240 case IOV_SVAL(IOV_INB_DW_ENABLE
):
6241 bus
->inb_enabled
= (bool)int_val
;
6243 #endif /* PCIE_INB_DW */
6244 #if defined(PCIE_INB_DW)
6245 case IOV_GVAL(IOV_DEEP_SLEEP
):
6246 int_val
= bus
->ds_enabled
;
6247 bcopy(&int_val
, arg
, val_size
);
6250 case IOV_SVAL(IOV_DEEP_SLEEP
):
6252 if (!bus
->ds_enabled
) {
6253 bus
->ds_enabled
= TRUE
;
6255 if (dhd_bus_set_device_wake(bus
, FALSE
) == BCME_OK
) {
6257 if (INBAND_DW_ENAB(bus
)) {
6259 timeleft
= dhd_os_ds_enter_wait(bus
->dhd
, NULL
);
6260 if (timeleft
== 0) {
6261 DHD_ERROR(("DS-ENTER timeout\n"));
6262 bus
->ds_enabled
= FALSE
;
6266 #endif /* PCIE_INB_DW */
6269 DHD_ERROR(("%s: Enable Deep Sleep failed !\n",
6271 bus
->ds_enabled
= FALSE
;
6274 DHD_ERROR(("%s: Deep Sleep already enabled !\n", __FUNCTION__
));
6277 else if (int_val
== 0) {
6278 if (bus
->ds_enabled
) {
6279 bus
->calc_ds_exit_latency
= TRUE
;
6281 if (dhd_bus_set_device_wake(bus
, TRUE
) == BCME_OK
) {
6282 bus
->ds_enabled
= FALSE
;
6283 if (INBAND_DW_ENAB(bus
)) {
6284 if (bus
->ds_exit_latency
!= 0) {
6285 DHD_ERROR(("DS-EXIT latency = %llu us\n",
6286 bus
->ds_exit_latency
));
6288 DHD_ERROR(("Failed to measure DS-EXIT"
6289 " latency!(Possibly a non"
6290 " waitable context)\n"));
6294 DHD_ERROR(("%s: Disable Deep Sleep failed !\n",
6297 bus
->calc_ds_exit_latency
= FALSE
;
6299 DHD_ERROR(("%s: Deep Sleep already disabled !\n", __FUNCTION__
));
6303 DHD_ERROR(("%s: Invalid number, allowed only 0|1\n", __FUNCTION__
));
6307 case IOV_GVAL(IOV_PCIE_SUSPEND
):
6308 int_val
= (bus
->dhd
->busstate
== DHD_BUS_SUSPEND
) ? 1 : 0;
6309 bcopy(&int_val
, arg
, val_size
);
6312 case IOV_SVAL(IOV_PCIE_SUSPEND
):
6313 if (bool_val
) { /* Suspend */
6315 unsigned long flags
;
6318 * If some other context is busy, wait until they are done,
6319 * before starting suspend
6321 ret
= dhd_os_busbusy_wait_condition(bus
->dhd
,
6322 &bus
->dhd
->dhd_bus_busy_state
, DHD_BUS_BUSY_IN_DHD_IOVAR
);
6324 DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n",
6325 __FUNCTION__
, bus
->dhd
->dhd_bus_busy_state
));
6329 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
6330 DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus
->dhd
);
6331 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
6332 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6333 dhdpcie_bus_suspend(bus
, TRUE
, TRUE
);
6335 dhdpcie_bus_suspend(bus
, TRUE
);
6336 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6338 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
6339 DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus
->dhd
);
6340 dhd_os_busbusy_wake(bus
->dhd
);
6341 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
6342 } else { /* Resume */
6343 unsigned long flags
;
6344 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
6345 DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus
->dhd
);
6346 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
6348 dhdpcie_bus_suspend(bus
, FALSE
);
6350 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
6351 DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus
->dhd
);
6352 dhd_os_busbusy_wake(bus
->dhd
);
6353 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
6357 case IOV_GVAL(IOV_MEMSIZE
):
6358 int_val
= (int32
)bus
->ramsize
;
6359 bcopy(&int_val
, arg
, val_size
);
6362 /* Debug related. Dumps core registers or one of the dongle memory */
6363 case IOV_GVAL(IOV_DUMP_DONGLE
):
6365 dump_dongle_in_t ddi
= *(dump_dongle_in_t
*)params
;
6366 dump_dongle_out_t
*ddo
= (dump_dongle_out_t
*)arg
;
6367 uint32
*p
= ddo
->val
;
6368 const uint max_offset
= 4096 - 1; /* one core contains max 4096/4 registers */
6370 if (plen
< sizeof(ddi
) || len
< sizeof(ddo
)) {
6371 bcmerror
= BCME_BADARG
;
6376 case DUMP_DONGLE_COREREG
:
6379 if (si_setcoreidx(bus
->sih
, ddi
.index
) == NULL
) {
6380 break; // beyond last core: core enumeration ended
6383 ddo
->address
= si_addrspace(bus
->sih
, CORE_SLAVE_PORT_0
, CORE_BASE_ADDR_0
);
6384 ddo
->address
+= ddi
.offset
; // BP address at which this dump starts
6386 ddo
->id
= si_coreid(bus
->sih
);
6387 ddo
->rev
= si_corerev(bus
->sih
);
6389 while (ddi
.offset
< max_offset
&&
6390 sizeof(dump_dongle_out_t
) + ddo
->n_bytes
< (uint
)len
) {
6391 *p
++ = si_corereg(bus
->sih
, ddi
.index
, ddi
.offset
, 0, 0);
6392 ddi
.offset
+= sizeof(uint32
);
6393 ddo
->n_bytes
+= sizeof(uint32
);
6397 // TODO: implement d11 SHM/TPL dumping
6398 bcmerror
= BCME_BADARG
;
6404 /* Debug related. Returns a string with dongle capabilities */
6405 case IOV_GVAL(IOV_DNGL_CAPS
):
6407 strlcpy(arg
, bus
->dhd
->fw_capabilities
, (size_t)len
);
6411 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
6412 case IOV_SVAL(IOV_GDB_SERVER
):
6413 /* debugger_*() functions may sleep, so cannot hold spinlock */
6415 debugger_init((void *) bus
, &bus_ops
, int_val
, SI_ENUM_BASE(bus
->sih
));
6420 #endif /* DEBUGGER || DHD_DSCOPE */
6421 #if defined(GDB_PROXY)
6422 case IOV_GVAL(IOV_GDB_PROXY_PROBE
):
6424 dhd_gdb_proxy_probe_data_t ret
;
6425 ret
.data_len
= (uint32
)sizeof(ret
);
6426 ret
.magic
= DHD_IOCTL_MAGIC
;
6428 if (bus
->gdb_proxy_access_enabled
) {
6429 ret
.flags
|= DHD_GDB_PROXY_PROBE_ACCESS_ENABLED
;
6430 if (bus
->dhd
->busstate
< DHD_BUS_LOAD
) {
6431 ret
.flags
|= DHD_GDB_PROXY_PROBE_FIRMWARE_NOT_RUNNING
;
6433 ret
.flags
|= DHD_GDB_PROXY_PROBE_FIRMWARE_RUNNING
;
6436 if (bus
->gdb_proxy_bootloader_mode
) {
6437 ret
.flags
|= DHD_GDB_PROXY_PROBE_BOOTLOADER_MODE
;
6439 ret
.last_id
= bus
->gdb_proxy_last_id
;
6440 if (plen
&& int_val
) {
6441 bus
->gdb_proxy_last_id
= (uint32
)int_val
;
6443 if (len
>= sizeof(ret
)) {
6444 bcopy(&ret
, arg
, sizeof(ret
));
6445 bus
->dhd
->gdb_proxy_active
= TRUE
;
6447 bcmerror
= BCME_BADARG
;
6451 case IOV_GVAL(IOV_GDB_PROXY_STOP_COUNT
):
6452 int_val
= (int32
)bus
->dhd
->gdb_proxy_stop_count
;
6453 bcopy(&int_val
, arg
, sizeof(int_val
));
6455 case IOV_SVAL(IOV_GDB_PROXY_STOP_COUNT
):
6456 bus
->dhd
->gdb_proxy_stop_count
= (uint32
)int_val
;
6458 #endif /* GDB_PROXY */
6461 /* Dump dongle side buzzz trace to console */
6462 case IOV_GVAL(IOV_BUZZZ_DUMP
):
6463 bcmerror
= dhd_buzzz_dump_dngl(bus
);
6465 #endif /* BCM_BUZZZ */
6467 case IOV_SVAL(IOV_SET_DOWNLOAD_STATE
):
6468 bcmerror
= dhdpcie_bus_download_state(bus
, bool_val
);
6471 #if defined(FW_SIGNATURE)
6472 case IOV_SVAL(IOV_SET_DOWNLOAD_INFO
):
6474 fw_download_info_t
*info
= (fw_download_info_t
*)params
;
6475 DHD_INFO(("dwnldinfo: sig=%s fw=%x,%u bl=%s,0x%x\n",
6476 info
->fw_signature_fname
,
6477 info
->fw_start_addr
, info
->fw_size
,
6478 info
->bootloader_fname
, info
->bootloader_start_addr
));
6479 bcmerror
= dhdpcie_bus_save_download_info(bus
,
6480 info
->fw_start_addr
, info
->fw_size
, info
->fw_signature_fname
,
6481 info
->bootloader_fname
, info
->bootloader_start_addr
);
6484 #endif /* FW_SIGNATURE */
6486 case IOV_GVAL(IOV_RAMSIZE
):
6487 int_val
= (int32
)bus
->ramsize
;
6488 bcopy(&int_val
, arg
, val_size
);
6491 case IOV_SVAL(IOV_RAMSIZE
):
6492 bus
->ramsize
= int_val
;
6493 bus
->orig_ramsize
= int_val
;
6496 case IOV_GVAL(IOV_RAMSTART
):
6497 int_val
= (int32
)bus
->dongle_ram_base
;
6498 bcopy(&int_val
, arg
, val_size
);
6501 case IOV_GVAL(IOV_CC_NVMSHADOW
):
6503 struct bcmstrbuf dump_b
;
6505 bcm_binit(&dump_b
, arg
, len
);
6506 bcmerror
= dhdpcie_cc_nvmshadow(bus
, &dump_b
);
6510 case IOV_GVAL(IOV_SLEEP_ALLOWED
):
6511 bool_val
= bus
->sleep_allowed
;
6512 bcopy(&bool_val
, arg
, val_size
);
6515 case IOV_SVAL(IOV_SLEEP_ALLOWED
):
6516 bus
->sleep_allowed
= bool_val
;
6519 case IOV_GVAL(IOV_DONGLEISOLATION
):
6520 int_val
= bus
->dhd
->dongle_isolation
;
6521 bcopy(&int_val
, arg
, val_size
);
6524 case IOV_SVAL(IOV_DONGLEISOLATION
):
6525 bus
->dhd
->dongle_isolation
= bool_val
;
6528 case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD
):
6529 int_val
= bus
->ltrsleep_on_unload
;
6530 bcopy(&int_val
, arg
, val_size
);
6533 case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD
):
6534 bus
->ltrsleep_on_unload
= bool_val
;
6537 case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK
):
6539 struct bcmstrbuf dump_b
;
6540 bcm_binit(&dump_b
, arg
, len
);
6541 bcmerror
= dhd_prot_ringupd_dump(bus
->dhd
, &dump_b
);
6544 case IOV_GVAL(IOV_DMA_RINGINDICES
):
6546 int_val
= dhdpcie_get_dma_ring_indices(bus
->dhd
);
6547 bcopy(&int_val
, arg
, sizeof(int_val
));
6550 case IOV_SVAL(IOV_DMA_RINGINDICES
):
6551 bcmerror
= dhdpcie_set_dma_ring_indices(bus
->dhd
, int_val
);
6554 case IOV_GVAL(IOV_METADATA_DBG
):
6555 int_val
= dhd_prot_metadata_dbg_get(bus
->dhd
);
6556 bcopy(&int_val
, arg
, val_size
);
6558 case IOV_SVAL(IOV_METADATA_DBG
):
6559 dhd_prot_metadata_dbg_set(bus
->dhd
, (int_val
!= 0));
6562 case IOV_GVAL(IOV_RX_METADATALEN
):
6563 int_val
= dhd_prot_metadatalen_get(bus
->dhd
, TRUE
);
6564 bcopy(&int_val
, arg
, val_size
);
6567 case IOV_SVAL(IOV_RX_METADATALEN
):
6569 bcmerror
= BCME_BUFTOOLONG
;
6572 dhd_prot_metadatalen_set(bus
->dhd
, int_val
, TRUE
);
6575 case IOV_SVAL(IOV_TXP_THRESHOLD
):
6576 dhd_prot_txp_threshold(bus
->dhd
, TRUE
, int_val
);
6579 case IOV_GVAL(IOV_TXP_THRESHOLD
):
6580 int_val
= dhd_prot_txp_threshold(bus
->dhd
, FALSE
, int_val
);
6581 bcopy(&int_val
, arg
, val_size
);
6584 case IOV_SVAL(IOV_DB1_FOR_MB
):
6586 bus
->db1_for_mb
= TRUE
;
6588 bus
->db1_for_mb
= FALSE
;
6591 case IOV_GVAL(IOV_DB1_FOR_MB
):
6592 if (bus
->db1_for_mb
)
6596 bcopy(&int_val
, arg
, val_size
);
6599 case IOV_GVAL(IOV_TX_METADATALEN
):
6600 int_val
= dhd_prot_metadatalen_get(bus
->dhd
, FALSE
);
6601 bcopy(&int_val
, arg
, val_size
);
6604 case IOV_SVAL(IOV_TX_METADATALEN
):
6606 bcmerror
= BCME_BUFTOOLONG
;
6609 dhd_prot_metadatalen_set(bus
->dhd
, int_val
, FALSE
);
6612 case IOV_SVAL(IOV_DEVRESET
):
6614 devreset_info_t
*devreset
= (devreset_info_t
*)arg
;
6620 if (devreset
->length
== sizeof(devreset_info_t
)) {
6621 if (devreset
->version
!= DHD_DEVRESET_VERSION
) {
6622 return BCME_VERSION
;
6624 int_val
= devreset
->mode
;
6628 case DHD_BUS_DEVRESET_ON
:
6629 bcmerror
= dhd_bus_devreset(bus
->dhd
, (uint8
)int_val
);
6631 case DHD_BUS_DEVRESET_OFF
:
6632 bcmerror
= dhd_bus_devreset(bus
->dhd
, (uint8
)int_val
);
6634 case DHD_BUS_DEVRESET_FLR
:
6635 bcmerror
= dhd_bus_perform_flr(bus
, bus
->flr_force_fail
);
6637 case DHD_BUS_DEVRESET_FLR_FORCE_FAIL
:
6638 bus
->flr_force_fail
= TRUE
;
6641 DHD_ERROR(("%s: invalid argument for devreset\n", __FUNCTION__
));
6646 case IOV_SVAL(IOV_FORCE_FW_TRAP
):
6647 if (bus
->dhd
->busstate
== DHD_BUS_DATA
)
6648 dhdpcie_fw_trap(bus
);
6650 DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__
));
6651 bcmerror
= BCME_NOTUP
;
6654 case IOV_GVAL(IOV_FLOW_PRIO_MAP
):
6655 int_val
= bus
->dhd
->flow_prio_map_type
;
6656 bcopy(&int_val
, arg
, val_size
);
6659 case IOV_SVAL(IOV_FLOW_PRIO_MAP
):
6660 int_val
= (int32
)dhd_update_flow_prio_map(bus
->dhd
, (uint8
)int_val
);
6661 bcopy(&int_val
, arg
, val_size
);
6664 #ifdef DHD_PCIE_RUNTIMEPM
6665 case IOV_GVAL(IOV_IDLETIME
):
6666 if (!(bus
->dhd
->op_mode
& DHD_FLAG_MFG_MODE
)) {
6667 int_val
= bus
->idletime
;
6671 bcopy(&int_val
, arg
, val_size
);
6674 case IOV_SVAL(IOV_IDLETIME
):
6676 bcmerror
= BCME_BADARG
;
6678 bus
->idletime
= int_val
;
6679 if (bus
->idletime
) {
6680 DHD_ENABLE_RUNTIME_PM(bus
->dhd
);
6682 DHD_DISABLE_RUNTIME_PM(bus
->dhd
);
6686 #endif /* DHD_PCIE_RUNTIMEPM */
6688 case IOV_GVAL(IOV_TXBOUND
):
6689 int_val
= (int32
)dhd_txbound
;
6690 bcopy(&int_val
, arg
, val_size
);
6693 case IOV_SVAL(IOV_TXBOUND
):
6694 dhd_txbound
= (uint
)int_val
;
6697 case IOV_SVAL(IOV_H2D_MAILBOXDATA
):
6698 dhdpcie_send_mb_data(bus
, (uint
)int_val
);
6701 case IOV_SVAL(IOV_INFORINGS
):
6702 dhd_prot_init_info_rings(bus
->dhd
);
6705 case IOV_SVAL(IOV_H2D_PHASE
):
6706 if (bus
->dhd
->busstate
!= DHD_BUS_DOWN
) {
6707 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6709 bcmerror
= BCME_NOTDOWN
;
6713 bus
->dhd
->h2d_phase_supported
= TRUE
;
6715 bus
->dhd
->h2d_phase_supported
= FALSE
;
6718 case IOV_GVAL(IOV_H2D_PHASE
):
6719 int_val
= (int32
) bus
->dhd
->h2d_phase_supported
;
6720 bcopy(&int_val
, arg
, val_size
);
6723 case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE
):
6724 if (bus
->dhd
->busstate
!= DHD_BUS_DOWN
) {
6725 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6727 bcmerror
= BCME_NOTDOWN
;
6731 bus
->dhd
->force_dongletrap_on_bad_h2d_phase
= TRUE
;
6733 bus
->dhd
->force_dongletrap_on_bad_h2d_phase
= FALSE
;
6736 case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE
):
6737 int_val
= (int32
) bus
->dhd
->force_dongletrap_on_bad_h2d_phase
;
6738 bcopy(&int_val
, arg
, val_size
);
6741 case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM
):
6742 if (bus
->dhd
->busstate
!= DHD_BUS_DOWN
) {
6743 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6745 bcmerror
= BCME_NOTDOWN
;
6748 dhd_prot_set_h2d_max_txpost(bus
->dhd
, (uint16
)int_val
);
6751 case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM
):
6752 int_val
= dhd_prot_get_h2d_max_txpost(bus
->dhd
);
6753 bcopy(&int_val
, arg
, val_size
);
6756 #if defined(DHD_HTPUT_TUNABLES)
6757 case IOV_SVAL(IOV_H2D_HTPUT_TXPOST_MAX_ITEM
):
6758 if (bus
->dhd
->busstate
!= DHD_BUS_DOWN
) {
6759 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6761 bcmerror
= BCME_NOTDOWN
;
6764 dhd_prot_set_h2d_htput_max_txpost(bus
->dhd
, (uint16
)int_val
);
6767 case IOV_GVAL(IOV_H2D_HTPUT_TXPOST_MAX_ITEM
):
6768 int_val
= dhd_prot_get_h2d_htput_max_txpost(bus
->dhd
);
6769 bcopy(&int_val
, arg
, val_size
);
6771 #endif /* DHD_HTPUT_TUNABLES */
6773 case IOV_GVAL(IOV_RXBOUND
):
6774 int_val
= (int32
)dhd_rxbound
;
6775 bcopy(&int_val
, arg
, val_size
);
6778 case IOV_SVAL(IOV_RXBOUND
):
6779 dhd_rxbound
= (uint
)int_val
;
6782 case IOV_GVAL(IOV_TRAPDATA
):
6784 struct bcmstrbuf dump_b
;
6785 bcm_binit(&dump_b
, arg
, len
);
6786 bcmerror
= dhd_prot_dump_extended_trap(bus
->dhd
, &dump_b
, FALSE
);
6790 case IOV_GVAL(IOV_TRAPDATA_RAW
):
6792 struct bcmstrbuf dump_b
;
6793 bcm_binit(&dump_b
, arg
, len
);
6794 bcmerror
= dhd_prot_dump_extended_trap(bus
->dhd
, &dump_b
, TRUE
);
6798 case IOV_SVAL(IOV_HANGREPORT
):
6799 bus
->dhd
->hang_report
= bool_val
;
6800 DHD_ERROR(("%s: Set hang_report as %d\n",
6801 __FUNCTION__
, bus
->dhd
->hang_report
));
6804 case IOV_GVAL(IOV_HANGREPORT
):
6805 int_val
= (int32
)bus
->dhd
->hang_report
;
6806 bcopy(&int_val
, arg
, val_size
);
6809 case IOV_SVAL(IOV_CTO_PREVENTION
):
6810 bcmerror
= dhdpcie_cto_init(bus
, bool_val
);
6813 case IOV_GVAL(IOV_CTO_PREVENTION
):
6814 if (bus
->sih
->buscorerev
< 19) {
6815 bcmerror
= BCME_UNSUPPORTED
;
6818 int_val
= (int32
)bus
->cto_enable
;
6819 bcopy(&int_val
, arg
, val_size
);
6822 case IOV_SVAL(IOV_CTO_THRESHOLD
):
6824 if (bus
->sih
->buscorerev
< 19) {
6825 bcmerror
= BCME_UNSUPPORTED
;
6828 bus
->cto_threshold
= (uint32
)int_val
;
6832 case IOV_GVAL(IOV_CTO_THRESHOLD
):
6833 if (bus
->sih
->buscorerev
< 19) {
6834 bcmerror
= BCME_UNSUPPORTED
;
6837 if (bus
->cto_threshold
) {
6838 int_val
= (int32
)bus
->cto_threshold
;
6840 int_val
= pcie_cto_to_thresh_default(bus
->sih
->buscorerev
);
6843 bcopy(&int_val
, arg
, val_size
);
6846 case IOV_SVAL(IOV_PCIE_WD_RESET
):
6848 /* Legacy chipcommon watchdog reset */
6849 dhdpcie_cc_watchdog_reset(bus
);
6853 case IOV_GVAL(IOV_HWA_ENABLE
):
6854 int_val
= bus
->hwa_enabled
;
6855 bcopy(&int_val
, arg
, val_size
);
6857 case IOV_SVAL(IOV_HWA_ENABLE
):
6858 bus
->hwa_enabled
= (bool)int_val
;
6860 case IOV_GVAL(IOV_IDMA_ENABLE
):
6861 int_val
= bus
->idma_enabled
;
6862 bcopy(&int_val
, arg
, val_size
);
6864 case IOV_SVAL(IOV_IDMA_ENABLE
):
6865 bus
->idma_enabled
= (bool)int_val
;
6867 case IOV_GVAL(IOV_IFRM_ENABLE
):
6868 int_val
= bus
->ifrm_enabled
;
6869 bcopy(&int_val
, arg
, val_size
);
6871 case IOV_SVAL(IOV_IFRM_ENABLE
):
6872 bus
->ifrm_enabled
= (bool)int_val
;
6874 case IOV_GVAL(IOV_CLEAR_RING
):
6875 bcopy(&int_val
, arg
, val_size
);
6876 dhd_flow_rings_flush(bus
->dhd
, 0);
6878 case IOV_GVAL(IOV_DAR_ENABLE
):
6879 int_val
= bus
->dar_enabled
;
6880 bcopy(&int_val
, arg
, val_size
);
6882 case IOV_SVAL(IOV_DAR_ENABLE
):
6883 bus
->dar_enabled
= (bool)int_val
;
6885 case IOV_GVAL(IOV_HSCBSIZE
):
6886 bcmerror
= dhd_get_hscb_info(bus
->dhd
, NULL
, (uint32
*)arg
);
6889 case IOV_SVAL(IOV_EXTDTXS_IN_TXCPL
):
6890 if (bus
->dhd
->busstate
!= DHD_BUS_DOWN
) {
6891 return BCME_NOTDOWN
;
6894 bus
->dhd
->extdtxs_in_txcpl
= TRUE
;
6896 bus
->dhd
->extdtxs_in_txcpl
= FALSE
;
6899 case IOV_GVAL(IOV_EXTDTXS_IN_TXCPL
):
6900 int_val
= bus
->dhd
->extdtxs_in_txcpl
;
6901 bcopy(&int_val
, arg
, val_size
);
6904 case IOV_SVAL(IOV_HOSTRDY_AFTER_INIT
):
6905 if (bus
->dhd
->busstate
!= DHD_BUS_DOWN
) {
6906 return BCME_NOTDOWN
;
6909 bus
->dhd
->hostrdy_after_init
= TRUE
;
6911 bus
->dhd
->hostrdy_after_init
= FALSE
;
6914 case IOV_GVAL(IOV_HOSTRDY_AFTER_INIT
):
6915 int_val
= bus
->dhd
->hostrdy_after_init
;
6916 bcopy(&int_val
, arg
, val_size
);
6920 bcmerror
= BCME_UNSUPPORTED
;
6926 } /* dhdpcie_bus_doiovar */
6928 /** Transfers bytes from host to dongle using pio mode */
6930 dhdpcie_bus_lpback_req(struct dhd_bus
*bus
, uint32 len
)
6932 if (bus
->dhd
== NULL
) {
6933 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__
));
6936 if (bus
->dhd
->prot
== NULL
) {
6937 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__
));
6940 if (bus
->dhd
->busstate
!= DHD_BUS_DATA
) {
6941 DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__
));
6944 dhdmsgbuf_lpbk_req(bus
->dhd
, len
);
6949 dhd_bus_dump_dar_registers(struct dhd_bus
*bus
)
6951 uint32 dar_clk_ctrl_val
, dar_pwr_ctrl_val
, dar_intstat_val
,
6952 dar_errlog_val
, dar_erraddr_val
, dar_pcie_mbint_val
;
6953 uint32 dar_clk_ctrl_reg
, dar_pwr_ctrl_reg
, dar_intstat_reg
,
6954 dar_errlog_reg
, dar_erraddr_reg
, dar_pcie_mbint_reg
;
6956 if (bus
->is_linkdown
) {
6957 DHD_ERROR(("%s: link is down\n", __FUNCTION__
));
6961 if (DAR_PWRREQ(bus
)) {
6962 dhd_bus_pcie_pwr_req(bus
);
6965 dar_clk_ctrl_reg
= (uint32
)DAR_CLK_CTRL(bus
->sih
->buscorerev
);
6966 dar_pwr_ctrl_reg
= (uint32
)DAR_PCIE_PWR_CTRL(bus
->sih
->buscorerev
);
6967 dar_intstat_reg
= (uint32
)DAR_INTSTAT(bus
->sih
->buscorerev
);
6968 dar_errlog_reg
= (uint32
)DAR_ERRLOG(bus
->sih
->buscorerev
);
6969 dar_erraddr_reg
= (uint32
)DAR_ERRADDR(bus
->sih
->buscorerev
);
6970 dar_pcie_mbint_reg
= (uint32
)DAR_PCIMailBoxInt(bus
->sih
->buscorerev
);
6972 if (bus
->sih
->buscorerev
< 24) {
6973 DHD_ERROR(("%s: DAR not supported for corerev(%d) < 24\n",
6974 __FUNCTION__
, bus
->sih
->buscorerev
));
6978 dar_clk_ctrl_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dar_clk_ctrl_reg
, 0, 0);
6979 dar_pwr_ctrl_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dar_pwr_ctrl_reg
, 0, 0);
6980 dar_intstat_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dar_intstat_reg
, 0, 0);
6981 dar_errlog_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dar_errlog_reg
, 0, 0);
6982 dar_erraddr_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dar_erraddr_reg
, 0, 0);
6983 dar_pcie_mbint_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dar_pcie_mbint_reg
, 0, 0);
6985 DHD_ERROR(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) dar_intstat(0x%x:0x%x)\n",
6986 __FUNCTION__
, dar_clk_ctrl_reg
, dar_clk_ctrl_val
,
6987 dar_pwr_ctrl_reg
, dar_pwr_ctrl_val
, dar_intstat_reg
, dar_intstat_val
));
6989 DHD_ERROR(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) dar_pcie_mbint(0x%x:0x%x)\n",
6990 __FUNCTION__
, dar_errlog_reg
, dar_errlog_val
,
6991 dar_erraddr_reg
, dar_erraddr_val
, dar_pcie_mbint_reg
, dar_pcie_mbint_val
));
6994 /* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
6996 dhd_bus_hostready(struct dhd_bus
*bus
)
6998 if (!bus
->dhd
->d2h_hostrdy_supported
) {
7002 if (bus
->is_linkdown
) {
7003 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
7007 DHD_ERROR(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__
,
7008 dhd_pcie_config_read(bus
, PCI_CFG_CMD
, sizeof(uint32
))));
7010 dhd_bus_dump_dar_registers(bus
);
7012 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dhd_bus_db1_addr_get(bus
), ~0, 0x12345678);
7013 bus
->hostready_count
++;
7014 DHD_ERROR(("%s: Ring Hostready:%d\n", __FUNCTION__
, bus
->hostready_count
));
7017 /* Clear INTSTATUS */
7019 dhdpcie_bus_clear_intstatus(struct dhd_bus
*bus
)
7021 uint32 intstatus
= 0;
7022 /* Skip after recieving D3 ACK */
7023 if (DHD_CHK_BUS_LPS_D3_ACKED(bus
)) {
7026 /* XXX: check for PCIE Gen2 also */
7027 if ((bus
->sih
->buscorerev
== 6) || (bus
->sih
->buscorerev
== 4) ||
7028 (bus
->sih
->buscorerev
== 2)) {
7029 intstatus
= dhdpcie_bus_cfg_read_dword(bus
, PCIIntstatus
, 4);
7030 dhdpcie_bus_cfg_write_dword(bus
, PCIIntstatus
, 4, intstatus
);
7032 /* this is a PCIE core register..not a config register... */
7033 intstatus
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_int
, 0, 0);
7034 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_int
, bus
->def_intmask
,
7040 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7041 dhdpcie_bus_suspend(struct dhd_bus
*bus
, bool state
, bool byint
)
7043 dhdpcie_bus_suspend(struct dhd_bus
*bus
, bool state
)
7044 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7048 unsigned long flags
;
7049 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7050 int d3_read_retry
= 0;
7051 uint32 d2h_mb_data
= 0;
7053 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7055 printf("%s: state=%d\n", __FUNCTION__
, state
);
7056 if (bus
->dhd
== NULL
) {
7057 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__
));
7060 if (bus
->dhd
->prot
== NULL
) {
7061 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__
));
7065 if (dhd_query_bus_erros(bus
->dhd
)) {
7069 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
7070 if (!(bus
->dhd
->busstate
== DHD_BUS_DATA
|| bus
->dhd
->busstate
== DHD_BUS_SUSPEND
)) {
7071 DHD_ERROR(("%s: not in a readystate\n", __FUNCTION__
));
7072 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
7075 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
7076 if (bus
->dhd
->dongle_reset
) {
7077 DHD_ERROR(("Dongle is in reset state.\n"));
7081 /* Check whether we are already in the requested state.
7082 * state=TRUE means Suspend
7083 * state=FALSE meanse Resume
7085 if (state
== TRUE
&& bus
->dhd
->busstate
== DHD_BUS_SUSPEND
) {
7086 DHD_ERROR(("Bus is already in SUSPEND state.\n"));
7088 } else if (state
== FALSE
&& bus
->dhd
->busstate
== DHD_BUS_DATA
) {
7089 DHD_ERROR(("Bus is already in RESUME state.\n"));
7097 if (bus
->is_linkdown
) {
7098 DHD_ERROR(("%s: PCIe link was down, state=%d\n",
7099 __FUNCTION__
, state
));
7104 DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__
));
7106 bus
->dhd
->dhd_watchdog_ms_backup
= dhd_watchdog_ms
;
7107 if (bus
->dhd
->dhd_watchdog_ms_backup
) {
7108 DHD_ERROR(("%s: Disabling wdtick before going to suspend\n",
7110 dhd_os_wd_timer(bus
->dhd
, 0);
7113 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
7114 if (DHD_BUS_BUSY_CHECK_IN_TX(bus
->dhd
)) {
7115 DHD_ERROR(("Tx Request is not ended\n"));
7116 bus
->dhd
->busstate
= DHD_BUS_DATA
;
7117 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
7121 bus
->last_suspend_start_time
= OSL_LOCALTIME_NS();
7123 /* stop all interface network queue. */
7124 dhd_bus_stop_queue(bus
);
7125 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
7127 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7129 DHD_OS_WAKE_LOCK_WAIVE(bus
->dhd
);
7130 /* Clear wait_for_d3_ack before sending D3_INFORM */
7131 bus
->wait_for_d3_ack
= 0;
7132 dhdpcie_send_mb_data(bus
, H2D_HOST_D3_INFORM
);
7134 timeleft
= dhd_os_d3ack_wait(bus
->dhd
, &bus
->wait_for_d3_ack
);
7135 DHD_OS_WAKE_LOCK_RESTORE(bus
->dhd
);
7137 /* Clear wait_for_d3_ack before sending D3_INFORM */
7138 bus
->wait_for_d3_ack
= 0;
7139 dhdpcie_send_mb_data(bus
, H2D_HOST_D3_INFORM
| H2D_HOST_ACK_NOINT
);
7140 while (!bus
->wait_for_d3_ack
&& d3_read_retry
< MAX_D3_ACK_TIMEOUT
) {
7141 dhdpcie_handle_mb_data(bus
);
7142 usleep_range(1000, 1500);
7147 DHD_OS_WAKE_LOCK_WAIVE(bus
->dhd
);
7150 /* As D3_INFORM will be sent after De-assert,
7151 * skip sending DS-ACK for DS-REQ.
7153 bus
->skip_ds_ack
= TRUE
;
7154 #endif /* PCIE_INB_DW */
7156 #if defined(PCIE_INB_DW)
7157 dhd_bus_set_device_wake(bus
, TRUE
);
7160 /* De-assert at this point for In-band device_wake */
7161 if (INBAND_DW_ENAB(bus
)) {
7162 dhd_bus_set_device_wake(bus
, FALSE
);
7163 DHD_BUS_INB_DW_LOCK(bus
->inb_lock
, flags
);
7164 dhdpcie_bus_set_pcie_inband_dw_state(bus
, DW_DEVICE_HOST_SLEEP_WAIT
);
7165 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
7167 #endif /* PCIE_INB_DW */
7168 /* Clear wait_for_d3_ack before sending D3_INFORM */
7169 bus
->wait_for_d3_ack
= 0;
7171 * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state
7172 * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata
7173 * inside atomic context, so that no more DBs will be
7174 * rung after sending D3_INFORM
7177 if (INBAND_DW_ENAB(bus
)) {
7178 DHD_BUS_INB_DW_LOCK(bus
->inb_lock
, flags
);
7179 dhdpcie_send_mb_data(bus
, H2D_HOST_D3_INFORM
);
7180 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
7182 #endif /* PCIE_INB_DW */
7184 dhdpcie_send_mb_data(bus
, H2D_HOST_D3_INFORM
);
7187 /* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
7189 timeleft
= dhd_os_d3ack_wait(bus
->dhd
, &bus
->wait_for_d3_ack
);
7191 #ifdef DHD_RECOVER_TIMEOUT
7192 /* XXX: WAR for missing D3 ACK MB interrupt */
7193 if (bus
->wait_for_d3_ack
== 0) {
7194 /* If wait_for_d3_ack was not updated because D2H MB was not received */
7195 uint32 intstatus
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
7196 bus
->pcie_mailbox_int
, 0, 0);
7197 int host_irq_disabled
= dhdpcie_irq_disabled(bus
);
7198 if ((intstatus
) && (intstatus
!= (uint32
)-1) &&
7199 (timeleft
== 0) && (!dhd_query_bus_erros(bus
->dhd
))) {
7200 DHD_ERROR(("%s: D3 ACK trying again intstatus=%x"
7201 " host_irq_disabled=%d\n",
7202 __FUNCTION__
, intstatus
, host_irq_disabled
));
7203 dhd_pcie_intr_count_dump(bus
->dhd
);
7204 dhd_print_tasklet_status(bus
->dhd
);
7205 if (bus
->api
.fw_rev
>= PCIE_SHARED_VERSION_6
&&
7206 !bus
->use_mailbox
) {
7207 dhd_prot_process_ctrlbuf(bus
->dhd
);
7209 dhdpcie_handle_mb_data(bus
);
7211 timeleft
= dhd_os_d3ack_wait(bus
->dhd
, &bus
->wait_for_d3_ack
);
7212 /* Clear Interrupts */
7213 dhdpcie_bus_clear_intstatus(bus
);
7215 } /* bus->wait_for_d3_ack was 0 */
7216 #endif /* DHD_RECOVER_TIMEOUT */
7218 DHD_OS_WAKE_LOCK_RESTORE(bus
->dhd
);
7219 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7221 /* To allow threads that got pre-empted to complete.
7223 while ((active
= dhd_os_check_wakelock_all(bus
->dhd
)) &&
7224 (idle_retry
< MAX_WKLK_IDLE_CHECK
)) {
7229 if (bus
->wait_for_d3_ack
) {
7230 DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__
));
7231 /* Got D3 Ack. Suspend the bus */
7233 DHD_ERROR(("%s():Suspend failed because of wakelock"
7234 "restoring Dongle to D0\n", __FUNCTION__
));
7236 if (bus
->dhd
->dhd_watchdog_ms_backup
) {
7237 DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n",
7239 dhd_os_wd_timer(bus
->dhd
,
7240 bus
->dhd
->dhd_watchdog_ms_backup
);
7244 * Dongle still thinks that it has to be in D3 state until
7245 * it gets a D0 Inform, but we are backing off from suspend.
7246 * Ensure that Dongle is brought back to D0.
7248 * Bringing back Dongle from D3 Ack state to D0 state is a
7249 * 2 step process. Dongle would want to know that D0 Inform
7250 * would be sent as a MB interrupt to bring it out of D3 Ack
7251 * state to D0 state. So we have to send both this message.
7254 /* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
7255 bus
->wait_for_d3_ack
= 0;
7257 DHD_SET_BUS_NOT_IN_LPS(bus
);
7259 if (INBAND_DW_ENAB(bus
)) {
7260 DHD_BUS_INB_DW_LOCK(bus
->inb_lock
, flags
);
7261 /* Since suspend has failed due to wakelock is held,
7262 * update the DS state to DW_DEVICE_HOST_WAKE_WAIT.
7263 * So that host sends the DS-ACK for DS-REQ.
7265 DHD_ERROR(("Suspend failed due to wakelock is held, "
7266 "set inband dw state to DW_DEVICE_HOST_WAKE_WAIT\n"));
7267 dhdpcie_bus_set_pcie_inband_dw_state(bus
,
7268 DW_DEVICE_HOST_WAKE_WAIT
);
7269 dhd_bus_ds_trace(bus
, 0, TRUE
,
7270 dhdpcie_bus_get_pcie_inband_dw_state(bus
));
7271 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
7273 bus
->skip_ds_ack
= FALSE
;
7274 #endif /* PCIE_INB_DW */
7275 /* For Linux, Macos etc (otherthan NDIS) enable back the dongle
7276 * interrupts using intmask and host interrupts
7277 * which were disabled in the dhdpcie_bus_isr()->
7278 * dhd_bus_handle_d3_ack().
7280 /* Enable back interrupt using Intmask!! */
7281 dhdpcie_bus_intr_enable(bus
);
7282 /* Defer enabling host irq after RPM suspend failure */
7283 if (!DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(bus
->dhd
)) {
7284 /* Enable back interrupt from Host side!! */
7285 if (dhdpcie_irq_disabled(bus
)) {
7286 dhdpcie_enable_irq(bus
);
7287 bus
->resume_intr_enable_count
++;
7290 if (bus
->use_d0_inform
) {
7291 DHD_OS_WAKE_LOCK_WAIVE(bus
->dhd
);
7292 dhdpcie_send_mb_data(bus
,
7293 (H2D_HOST_D0_INFORM_IN_USE
| H2D_HOST_D0_INFORM
));
7294 DHD_OS_WAKE_LOCK_RESTORE(bus
->dhd
);
7296 /* ring doorbell 1 (hostready) */
7297 dhd_bus_hostready(bus
);
7299 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
7300 bus
->dhd
->busstate
= DHD_BUS_DATA
;
7301 /* resume all interface network queue. */
7302 dhd_bus_start_queue(bus
);
7303 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
7306 /* Actual Suspend after no wakelock */
7307 /* At this time bus->bus_low_power_state will be
7308 * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK
7309 * in dhd_bus_handle_d3_ack()
7312 if (INBAND_DW_ENAB(bus
)) {
7313 DHD_BUS_INB_DW_LOCK(bus
->inb_lock
, flags
);
7314 if (dhdpcie_bus_get_pcie_inband_dw_state(bus
) ==
7315 DW_DEVICE_HOST_SLEEP_WAIT
) {
7316 dhdpcie_bus_set_pcie_inband_dw_state(bus
,
7317 DW_DEVICE_HOST_SLEEP
);
7319 dhd_bus_ds_trace(bus
, 0, TRUE
,
7320 dhdpcie_bus_get_pcie_inband_dw_state(bus
));
7322 dhd_bus_ds_trace(bus
, 0, TRUE
);
7323 #endif /* PCIE_INB_DW */
7325 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
7327 #endif /* PCIE_INB_DW */
7328 if (bus
->use_d0_inform
&&
7329 (bus
->api
.fw_rev
< PCIE_SHARED_VERSION_6
)) {
7330 DHD_OS_WAKE_LOCK_WAIVE(bus
->dhd
);
7331 dhdpcie_send_mb_data(bus
, (H2D_HOST_D0_INFORM_IN_USE
));
7332 DHD_OS_WAKE_LOCK_RESTORE(bus
->dhd
);
7335 #if defined(BCMPCIE_OOB_HOST_WAKE)
7336 if (bus
->dhd
->dhd_induce_error
== DHD_INDUCE_DROP_OOB_IRQ
) {
7337 DHD_ERROR(("%s: Inducing DROP OOB IRQ\n", __FUNCTION__
));
7339 dhdpcie_oob_intr_set(bus
, TRUE
);
7341 #endif /* BCMPCIE_OOB_HOST_WAKE */
7343 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
7344 /* The Host cannot process interrupts now so disable the same.
7345 * No need to disable the dongle INTR using intmask, as we are
7346 * already calling disabling INTRs from DPC context after
7347 * getting D3_ACK in dhd_bus_handle_d3_ack.
7348 * Code may not look symmetric between Suspend and
7349 * Resume paths but this is done to close down the timing window
7350 * between DPC and suspend context and bus->bus_low_power_state
7351 * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC.
7353 bus
->dhd
->d3ackcnt_timeout
= 0;
7354 bus
->dhd
->busstate
= DHD_BUS_SUSPEND
;
7355 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
7356 dhdpcie_dump_resource(bus
);
7357 rc
= dhdpcie_pci_suspend_resume(bus
, state
);
7359 bus
->last_suspend_end_time
= OSL_LOCALTIME_NS();
7362 } else if (timeleft
== 0) { /* D3 ACK Timeout */
7363 #ifdef DHD_FW_COREDUMP
7364 uint32 cur_memdump_mode
= bus
->dhd
->memdump_enabled
;
7365 #endif /* DHD_FW_COREDUMP */
7367 /* check if the D3 ACK timeout due to scheduling issue */
7368 bus
->dhd
->is_sched_error
= !dhd_query_bus_erros(bus
->dhd
) &&
7369 bus
->isr_entry_time
> bus
->last_d3_inform_time
&&
7370 dhd_bus_query_dpc_sched_errors(bus
->dhd
);
7371 bus
->dhd
->d3ack_timeout_occured
= TRUE
;
7372 /* If the D3 Ack has timeout */
7373 bus
->dhd
->d3ackcnt_timeout
++;
7374 DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n",
7375 __FUNCTION__
, bus
->dhd
->is_sched_error
?
7376 " due to scheduling problem" : "", bus
->dhd
->d3ackcnt_timeout
));
7377 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
7378 /* XXX DHD triggers Kernel panic if the resumed on timeout occurrs
7379 * due to tasklet or workqueue scheduling problems in the Linux Kernel.
7380 * Customer informs that it is hard to find any clue from the
7381 * host memory dump since the important tasklet or workqueue information
7382 * is already disappered due the latency while printing out the timestamp
7383 * logs for debugging scan timeout issue.
7384 * For this reason, customer requestes us to trigger Kernel Panic rather
7385 * than taking a SOCRAM dump.
7387 if (bus
->dhd
->is_sched_error
&& cur_memdump_mode
== DUMP_MEMFILE_BUGON
) {
7388 /* change g_assert_type to trigger Kernel panic */
7390 /* use ASSERT() to trigger panic */
7393 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
7394 DHD_SET_BUS_NOT_IN_LPS(bus
);
7396 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
7397 bus
->dhd
->busstate
= DHD_BUS_DATA
;
7398 /* resume all interface network queue. */
7399 dhd_bus_start_queue(bus
);
7400 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
7401 /* XXX : avoid multiple socram dump from dongle trap and
7402 * invalid PCIe bus assceess due to PCIe link down
7404 if (!bus
->dhd
->dongle_trap_occured
&&
7405 !bus
->is_linkdown
&&
7406 !bus
->cto_triggered
) {
7407 uint32 intstatus
= 0;
7409 /* Check if PCIe bus status is valid */
7410 intstatus
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
7411 bus
->pcie_mailbox_int
, 0, 0);
7412 if (intstatus
== (uint32
)-1) {
7413 /* Invalidate PCIe bus status */
7414 bus
->is_linkdown
= 1;
7417 dhd_bus_dump_console_buffer(bus
);
7418 dhd_prot_debug_info_print(bus
->dhd
);
7419 #ifdef DHD_FW_COREDUMP
7420 if (cur_memdump_mode
) {
7421 /* write core dump to file */
7422 bus
->dhd
->memdump_type
= DUMP_TYPE_D3_ACK_TIMEOUT
;
7423 dhdpcie_mem_dump(bus
);
7425 #endif /* DHD_FW_COREDUMP */
7427 DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
7429 #ifdef SUPPORT_LINKDOWN_RECOVERY
7430 #ifdef CONFIG_ARCH_MSM
7431 bus
->no_cfg_restore
= 1;
7432 #endif /* CONFIG_ARCH_MSM */
7433 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7434 dhd_os_check_hang(bus
->dhd
, 0, -ETIMEDOUT
);
7436 #if defined(DHD_ERPOM)
7437 dhd_schedule_reset(bus
->dhd
);
7443 DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__
));
7444 bus
->last_resume_start_time
= OSL_LOCALTIME_NS();
7447 * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
7448 * si_backplane_access(function to read/write backplane)
7449 * updates the window(PCIE2_BAR0_CORE2_WIN) only if
7450 * window being accessed is different form the window
7451 * being pointed by second_bar0win.
7452 * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold,
7453 * invalidating second_bar0win after resume updates
7454 * PCIE2_BAR0_CORE2_WIN with right window.
7456 si_invalidate_second_bar0win(bus
->sih
);
7457 #if defined(BCMPCIE_OOB_HOST_WAKE)
7458 DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus
->dhd
);
7459 #endif /* BCMPCIE_OOB_HOST_WAKE */
7461 if (INBAND_DW_ENAB(bus
)) {
7462 DHD_BUS_INB_DW_LOCK(bus
->inb_lock
, flags
);
7463 if (dhdpcie_bus_get_pcie_inband_dw_state(bus
) == DW_DEVICE_HOST_SLEEP
) {
7464 dhdpcie_bus_set_pcie_inband_dw_state(bus
, DW_DEVICE_HOST_WAKE_WAIT
);
7466 dhd_bus_ds_trace(bus
, 0, TRUE
,
7467 dhdpcie_bus_get_pcie_inband_dw_state(bus
));
7469 dhd_bus_ds_trace(bus
, 0, TRUE
);
7470 #endif /* PCIE_INB_DW */
7472 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
7474 bus
->skip_ds_ack
= FALSE
;
7475 #endif /* PCIE_INB_DW */
7476 rc
= dhdpcie_pci_suspend_resume(bus
, state
);
7477 dhdpcie_dump_resource(bus
);
7479 /* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */
7480 DHD_SET_BUS_NOT_IN_LPS(bus
);
7482 if (!rc
&& bus
->dhd
->busstate
== DHD_BUS_SUSPEND
) {
7483 if (bus
->use_d0_inform
) {
7484 DHD_OS_WAKE_LOCK_WAIVE(bus
->dhd
);
7485 dhdpcie_send_mb_data(bus
, (H2D_HOST_D0_INFORM
));
7486 DHD_OS_WAKE_LOCK_RESTORE(bus
->dhd
);
7488 /* ring doorbell 1 (hostready) */
7489 dhd_bus_hostready(bus
);
7491 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
7492 bus
->dhd
->busstate
= DHD_BUS_DATA
;
7493 #ifdef DHD_PCIE_RUNTIMEPM
7494 if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus
->dhd
)) {
7497 wake_up(&bus
->rpm_queue
);
7499 #endif /* DHD_PCIE_RUNTIMEPM */
7500 /* resume all interface network queue. */
7501 dhd_bus_start_queue(bus
);
7503 /* For Linux, Macos etc (otherthan NDIS) enable back the dongle interrupts
7504 * using intmask and host interrupts
7505 * which were disabled in the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack().
7507 dhdpcie_bus_intr_enable(bus
); /* Enable back interrupt using Intmask!! */
7508 /* Defer enabling host interrupt until RPM resume done */
7509 if (!DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(bus
->dhd
)) {
7510 if (dhdpcie_irq_disabled(bus
)) {
7511 dhdpcie_enable_irq(bus
);
7512 bus
->resume_intr_enable_count
++;
7516 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
7518 if (bus
->dhd
->dhd_watchdog_ms_backup
) {
7519 DHD_ERROR(("%s: Enabling wdtick after resume\n",
7521 dhd_os_wd_timer(bus
->dhd
, bus
->dhd
->dhd_watchdog_ms_backup
);
7524 bus
->last_resume_end_time
= OSL_LOCALTIME_NS();
7526 /* Update TCM rd index for EDL ring */
7527 DHD_EDL_RING_TCM_RD_UPDATE(bus
->dhd
);
7533 #define BUS_SUSPEND TRUE
7534 #define BUS_RESUME FALSE
7535 int dhd_bus_suspend(dhd_pub_t
*dhd
)
7538 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7539 /* TODO: Check whether the arguments are correct */
7540 ret
= dhdpcie_bus_suspend(dhd
->bus
, TRUE
, BUS_SUSPEND
);
7542 ret
= dhdpcie_bus_suspend(dhd
->bus
, BUS_SUSPEND
);
7547 int dhd_bus_resume(dhd_pub_t
*dhd
, int stage
)
7550 BCM_REFERENCE(stage
);
7552 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7553 /* TODO: Check whether the arguments are correct */
7554 ret
= dhdpcie_bus_suspend(dhd
->bus
, FALSE
, BUS_RESUME
);
7556 ret
= dhdpcie_bus_suspend(dhd
->bus
, BUS_RESUME
);
7562 dhdpcie_force_alp(struct dhd_bus
*bus
, bool enable
)
7564 ASSERT(bus
&& bus
->sih
);
7566 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
7567 OFFSETOF(sbpcieregs_t
, u
.pcie2
.clk_ctl_st
), CCS_FORCEALP
, CCS_FORCEALP
);
7569 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
7570 OFFSETOF(sbpcieregs_t
, u
.pcie2
.clk_ctl_st
), CCS_FORCEALP
, 0);
7575 /* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */
7577 dhdpcie_set_l1_entry_time(struct dhd_bus
*bus
, int l1_entry_time
)
7581 ASSERT(bus
&& bus
->sih
);
7583 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, OFFSETOF(sbpcieregs_t
, configaddr
), ~0,
7585 reg_val
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
7586 OFFSETOF(sbpcieregs_t
, configdata
), 0, 0);
7587 reg_val
= (reg_val
& ~(0x7f << 16)) | ((l1_entry_time
& 0x7f) << 16);
7588 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, OFFSETOF(sbpcieregs_t
, configdata
), ~0,
7595 dhd_apply_d11_war_length(struct dhd_bus
*bus
, uint32 len
, uint32 d11_lpbk
)
7597 uint16 chipid
= si_chipid(bus
->sih
);
7599 * XXX: WAR for CRWLDOT11M-3011
7600 * program the DMA descriptor Buffer length as the expected frame length
7601 * + 8 bytes extra for corerev 82 when buffer length % 128 is equal to 4
7603 if ((chipid
== BCM4375_CHIP_ID
||
7604 chipid
== BCM4362_CHIP_ID
||
7605 chipid
== BCM4377_CHIP_ID
||
7606 chipid
== BCM43751_CHIP_ID
||
7607 chipid
== BCM43752_CHIP_ID
) &&
7608 (d11_lpbk
!= M2M_DMA_LPBK
&& d11_lpbk
!= M2M_NON_DMA_LPBK
)) {
7611 DHD_ERROR(("%s: len %d\n", __FUNCTION__
, len
));
7615 /** Transfers bytes from host to dongle and to host again using DMA */
7617 dhdpcie_bus_dmaxfer_req(struct dhd_bus
*bus
,
7618 uint32 len
, uint32 srcdelay
, uint32 destdelay
,
7619 uint32 d11_lpbk
, uint32 core_num
, uint32 wait
)
7623 if (bus
->dhd
== NULL
) {
7624 DHD_ERROR(("%s: bus not inited\n", __FUNCTION__
));
7627 if (bus
->dhd
->prot
== NULL
) {
7628 DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__
));
7631 if (bus
->dhd
->busstate
!= DHD_BUS_DATA
) {
7632 DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__
));
7636 if (len
< 5 || len
> 4194296) {
7637 DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__
));
7641 len
= dhd_apply_d11_war_length(bus
, len
, d11_lpbk
);
7643 bus
->dmaxfer_complete
= FALSE
;
7644 ret
= dhdmsgbuf_dmaxfer_req(bus
->dhd
, len
, srcdelay
, destdelay
,
7645 d11_lpbk
, core_num
);
7646 if (ret
!= BCME_OK
|| !wait
) {
7647 DHD_INFO(("%s: dmaxfer req returns status %u; wait = %u\n", __FUNCTION__
,
7650 ret
= dhd_os_dmaxfer_wait(bus
->dhd
, &bus
->dmaxfer_complete
);
7652 ret
= BCME_NOTREADY
;
7660 dhd_bus_is_multibp_capable(struct dhd_bus
*bus
)
7662 return MULTIBP_CAP(bus
->sih
);
7665 #define PCIE_REV_FOR_4378A0 66 /* dhd_bus_perform_flr_with_quiesce() causes problems */
7666 #define PCIE_REV_FOR_4378B0 68
7669 dhdpcie_bus_download_state(dhd_bus_t
*bus
, bool enter
)
7672 volatile uint32
*cr4_regs
;
7674 bool do_wr_flops
= TRUE
;
7677 DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__
));
7681 do_flr
= ((bus
->sih
->buscorerev
!= PCIE_REV_FOR_4378A0
) &&
7682 (bus
->sih
->buscorerev
!= PCIE_REV_FOR_4378B0
));
7685 * Jira SWWLAN-214966: 4378B0 BToverPCIe: fails to download firmware
7686 * with "insmod dhd.ko firmware_path=rtecdc.bin nvram_path=nvram.txt" format
7687 * CTO is seen during autoload case.
7688 * Need to assert PD1 power req during ARM out of reset.
7689 * And doing FLR after this would conflict as FLR resets PCIe enum space.
7691 if (MULTIBP_ENAB(bus
->sih
) && !do_flr
) {
7692 dhd_bus_pcie_pwr_req(bus
);
7695 /* To enter download state, disable ARM and reset SOCRAM.
7696 * To exit download state, simply reset ARM (default is RAM boot).
7700 /* Make sure BAR1 maps to backplane address 0 */
7701 dhdpcie_setbar1win(bus
, 0x00000000);
7702 bus
->alp_only
= TRUE
;
7704 bus
->gdb_proxy_access_enabled
= TRUE
;
7705 bus
->gdb_proxy_bootloader_mode
= FALSE
;
7706 #endif /* GDB_PROXY */
7708 /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
7709 cr4_regs
= si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0);
7711 if (cr4_regs
== NULL
&& !(si_setcore(bus
->sih
, ARM7S_CORE_ID
, 0)) &&
7712 !(si_setcore(bus
->sih
, ARMCM3_CORE_ID
, 0)) &&
7713 !(si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0))) {
7714 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__
));
7715 bcmerror
= BCME_ERROR
;
7719 if (si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0)) {
7720 /* Halt ARM & remove reset */
7721 si_core_reset(bus
->sih
, SICF_CPUHALT
, SICF_CPUHALT
);
7722 if (!(si_setcore(bus
->sih
, SYSMEM_CORE_ID
, 0))) {
7723 DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__
));
7724 bcmerror
= BCME_ERROR
;
7727 si_core_reset(bus
->sih
, 0, 0);
7728 /* reset last 4 bytes of RAM address. to be used for shared area */
7729 dhdpcie_init_shared_addr(bus
);
7730 } else if (cr4_regs
== NULL
) { /* no CR4 present on chip */
7731 si_core_disable(bus
->sih
, 0);
7733 if (!(si_setcore(bus
->sih
, SOCRAM_CORE_ID
, 0))) {
7734 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__
));
7735 bcmerror
= BCME_ERROR
;
7739 si_core_reset(bus
->sih
, 0, 0);
7741 /* Clear the top bit of memory */
7744 if (dhdpcie_bus_membytes(bus
, TRUE
, bus
->ramsize
- 4,
7745 (uint8
*)&zeros
, 4) < 0) {
7746 bcmerror
= BCME_ERROR
;
7754 * Read RAM base address [0x18_0000]
7755 * [next] Download firmware
7756 * [done at else] Populate the reset vector
7757 * [done at else] Remove ARM halt
7759 /* Halt ARM & remove reset */
7760 si_core_reset(bus
->sih
, SICF_CPUHALT
, SICF_CPUHALT
);
7761 if (BCM43602_CHIP(bus
->sih
->chip
)) {
7762 /* XXX CRWLARMCR4-53 43602a0 HW bug when banks are powered down */
7763 W_REG(bus
->pcie_mb_intr_osh
, cr4_regs
+ ARMCR4REG_BANKIDX
, 5);
7764 W_REG(bus
->pcie_mb_intr_osh
, cr4_regs
+ ARMCR4REG_BANKPDA
, 0);
7765 W_REG(bus
->pcie_mb_intr_osh
, cr4_regs
+ ARMCR4REG_BANKIDX
, 7);
7766 W_REG(bus
->pcie_mb_intr_osh
, cr4_regs
+ ARMCR4REG_BANKPDA
, 0);
7768 /* reset last 4 bytes of RAM address. to be used for shared area */
7769 dhdpcie_init_shared_addr(bus
);
7772 if (si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0)) {
7774 if ((bcmerror
= dhdpcie_bus_write_vars(bus
))) {
7775 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__
));
7778 /* write random numbers to sysmem for the purpose of
7779 * randomizing heap address space.
7781 if ((bcmerror
= dhdpcie_wrt_rnd(bus
)) != BCME_OK
) {
7782 DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
7787 #if defined(FW_SIGNATURE)
7788 if ((bcmerror
= dhdpcie_bus_download_fw_signature(bus
, &do_wr_flops
))
7792 #endif /* FW_SIGNATURE */
7795 /* switch back to arm core again */
7796 if (!(si_setcore(bus
->sih
, ARMCA7_CORE_ID
, 0))) {
7797 DHD_ERROR(("%s: Failed to find ARM CA7 core!\n",
7799 bcmerror
= BCME_ERROR
;
7802 /* write address 0 with reset instruction */
7803 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
, 0,
7804 (uint8
*)&bus
->resetinstr
, sizeof(bus
->resetinstr
));
7805 /* now remove reset and halt and continue to run CA7 */
7807 } else if (!si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0)) {
7808 if (!(si_setcore(bus
->sih
, SOCRAM_CORE_ID
, 0))) {
7809 DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__
));
7810 bcmerror
= BCME_ERROR
;
7814 if (!si_iscoreup(bus
->sih
)) {
7815 DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__
));
7816 bcmerror
= BCME_ERROR
;
7820 /* Enable remap before ARM reset but after vars.
7821 * No backplane access in remap mode
7823 if (!si_setcore(bus
->sih
, PCMCIA_CORE_ID
, 0) &&
7824 !si_setcore(bus
->sih
, SDIOD_CORE_ID
, 0)) {
7825 DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__
));
7826 bcmerror
= BCME_ERROR
;
7830 /* XXX Change standby configuration here if necessary */
7832 if (!(si_setcore(bus
->sih
, ARM7S_CORE_ID
, 0)) &&
7833 !(si_setcore(bus
->sih
, ARMCM3_CORE_ID
, 0))) {
7834 DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__
));
7835 bcmerror
= BCME_ERROR
;
7839 if (BCM43602_CHIP(bus
->sih
->chip
)) {
7840 /* Firmware crashes on SOCSRAM access when core is in reset */
7841 if (!(si_setcore(bus
->sih
, SOCRAM_CORE_ID
, 0))) {
7842 DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
7844 bcmerror
= BCME_ERROR
;
7847 si_core_reset(bus
->sih
, 0, 0);
7848 si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0);
7852 if ((bcmerror
= dhdpcie_bus_write_vars(bus
))) {
7853 DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__
));
7857 /* write a random number rTLV to TCM for the purpose of
7858 * randomizing heap address space.
7860 if ((bcmerror
= dhdpcie_wrt_rnd(bus
)) != BCME_OK
) {
7861 DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
7866 #if defined(FW_SIGNATURE)
7867 if ((bcmerror
= dhdpcie_bus_download_fw_signature(bus
, &do_wr_flops
))
7871 #endif /* FW_SIGNATURE */
7873 /* switch back to arm core again */
7874 if (!(si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0))) {
7875 DHD_ERROR(("%s: Failed to find ARM CR4 core!\n",
7877 bcmerror
= BCME_ERROR
;
7881 /* write address 0 with reset instruction */
7882 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
, 0,
7883 (uint8
*)&bus
->resetinstr
, sizeof(bus
->resetinstr
));
7885 if (bcmerror
== BCME_OK
) {
7888 bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
, 0,
7889 (uint8
*)&tmp
, sizeof(tmp
));
7891 if (bcmerror
== BCME_OK
&& tmp
!= bus
->resetinstr
) {
7892 DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
7893 __FUNCTION__
, bus
->resetinstr
));
7894 DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
7895 __FUNCTION__
, tmp
));
7896 bcmerror
= BCME_ERROR
;
7900 /* now remove reset and halt and continue to run CR4 */
7904 bus
->arm_oor_time
= OSL_LOCALTIME_NS();
7905 si_core_reset(bus
->sih
, 0, 0);
7907 /* Allow HT Clock now that the ARM is running. */
7908 bus
->alp_only
= FALSE
;
7910 bus
->dhd
->busstate
= DHD_BUS_LOAD
;
7914 /* Always return to PCIE core */
7915 si_setcore(bus
->sih
, PCIE2_CORE_ID
, 0);
7917 if (MULTIBP_ENAB(bus
->sih
) && !do_flr
) {
7918 dhd_bus_pcie_pwr_req_clear(bus
);
7922 } /* dhdpcie_bus_download_state */
7924 #if defined(FW_SIGNATURE)
7927 dhdpcie_bus_download_fw_signature(dhd_bus_t
*bus
, bool *do_write
)
7929 int bcmerror
= BCME_OK
;
7931 DHD_INFO(("FWSIG: bl=%s,%x fw=%x,%u sig=%s,%x,%u"
7932 " stat=%x,%u ram=%x,%x\n",
7933 bus
->bootloader_filename
, bus
->bootloader_addr
,
7934 bus
->fw_download_addr
, bus
->fw_download_len
,
7935 bus
->fwsig_filename
, bus
->fwsig_download_addr
,
7936 bus
->fwsig_download_len
,
7937 bus
->fwstat_download_addr
, bus
->fwstat_download_len
,
7938 bus
->dongle_ram_base
, bus
->ramtop_addr
));
7940 if (bus
->fwsig_filename
[0] == 0) {
7941 DHD_INFO(("%s: missing signature file\n", __FUNCTION__
));
7945 /* Write RAM Bootloader to TCM if requested */
7946 if ((bcmerror
= dhdpcie_bus_download_ram_bootloader(bus
))
7948 DHD_ERROR(("%s: could not write RAM BL to TCM, err %d\n",
7949 __FUNCTION__
, bcmerror
));
7953 /* Write FW signature rTLV to TCM */
7954 if ((bcmerror
= dhdpcie_bus_write_fwsig(bus
, bus
->fwsig_filename
,
7956 DHD_ERROR(("%s: could not write FWsig to TCM, err %d\n",
7957 __FUNCTION__
, bcmerror
));
7961 /* Write FW signature verification status rTLV to TCM */
7962 if ((bcmerror
= dhdpcie_bus_write_fws_status(bus
)) != BCME_OK
) {
7963 DHD_ERROR(("%s: could not write FWinfo to TCM, err %d\n",
7964 __FUNCTION__
, bcmerror
));
7968 /* Write FW memory map rTLV to TCM */
7969 if ((bcmerror
= dhdpcie_bus_write_fws_mem_info(bus
)) != BCME_OK
) {
7970 DHD_ERROR(("%s: could not write FWinfo to TCM, err %d\n",
7971 __FUNCTION__
, bcmerror
));
7975 /* Write a end-of-TLVs marker to TCM */
7976 if ((bcmerror
= dhdpcie_download_rtlv_end(bus
)) != BCME_OK
) {
7977 DHD_ERROR(("%s: could not write rTLV-end marker to TCM, err %d\n",
7978 __FUNCTION__
, bcmerror
));
7982 /* In case of BL RAM, do write flops */
7983 if (bus
->bootloader_filename
[0] != 0) {
7993 /* Download a reversed-TLV to the top of dongle RAM without overlapping any existing rTLVs */
7995 dhdpcie_download_rtlv(dhd_bus_t
*bus
, dngl_rtlv_type_t type
, dngl_rtlv_len_t len
, uint8
*value
)
7997 int bcmerror
= BCME_OK
;
7999 uint8
*readback_buf
= NULL
;
8000 uint32 readback_val
= 0;
8001 #endif /* DHD_DEBUG */
8002 uint32 dest_addr
= 0; /* dongle RAM dest address */
8003 uint32 dest_size
= 0; /* dongle RAM dest size */
8004 uint32 dest_raw_size
= 0; /* dest size with added checksum */
8006 /* Calculate the destination dongle RAM address and size */
8007 dest_size
= ROUNDUP(len
, 4);
8008 dest_addr
= bus
->ramtop_addr
- sizeof(dngl_rtlv_type_t
) - sizeof(dngl_rtlv_len_t
)
8010 bus
->ramtop_addr
= dest_addr
;
8012 /* Create the rTLV size field. This consists of 2 16-bit fields:
8013 * The lower 16 bits is the size. The higher 16 bits is a checksum
8014 * consisting of the size with all bits reversed.
8015 * +-------------+-------------+
8016 * | checksum | size |
8017 * +-------------+-------------+
8018 * High 16 bits Low 16 bits
8020 dest_raw_size
= (~dest_size
<< 16) | (dest_size
& 0x0000FFFF);
8022 /* Write the value block */
8023 if (dest_size
> 0) {
8024 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
, dest_addr
, value
, dest_size
);
8026 DHD_ERROR(("%s: error %d on writing %d membytes to 0x%08x\n",
8027 __FUNCTION__
, bcmerror
, dest_size
, dest_addr
));
8032 /* Write the length word */
8033 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
, dest_addr
+ dest_size
,
8034 (uint8
*)&dest_raw_size
, sizeof(dngl_rtlv_len_t
));
8036 /* Write the type word */
8037 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
,
8038 dest_addr
+ dest_size
+ sizeof(dngl_rtlv_len_t
),
8039 (uint8
*)&type
, sizeof(dngl_rtlv_type_t
));
8042 /* Read back and compare the downloaded data */
8043 if (dest_size
> 0) {
8044 readback_buf
= (uint8
*)MALLOC(bus
->dhd
->osh
, dest_size
);
8045 if (!readback_buf
) {
8046 bcmerror
= BCME_NOMEM
;
8049 memset(readback_buf
, 0xaa, dest_size
);
8050 bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
, dest_addr
, readback_buf
, dest_size
);
8052 DHD_ERROR(("%s: readback error %d, %d bytes from 0x%08x\n",
8053 __FUNCTION__
, bcmerror
, dest_size
, dest_addr
));
8056 if (memcmp(value
, readback_buf
, dest_size
) != 0) {
8057 DHD_ERROR(("%s: Downloaded data mismatch.\n", __FUNCTION__
));
8058 bcmerror
= BCME_ERROR
;
8061 DHD_ERROR(("Download and compare of TLV 0x%x succeeded"
8062 " (size %u, addr %x).\n", type
, dest_size
, dest_addr
));
8066 /* Read back and compare the downloaded len field */
8067 bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
, dest_addr
+ dest_size
,
8068 (uint8
*)&readback_val
, sizeof(dngl_rtlv_len_t
));
8070 if (readback_val
!= dest_raw_size
) {
8071 bcmerror
= BCME_BADLEN
;
8075 DHD_ERROR(("%s: Downloaded len error %d\n", __FUNCTION__
, bcmerror
));
8079 /* Read back and compare the downloaded type field */
8080 bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
,
8081 dest_addr
+ dest_size
+ sizeof(dngl_rtlv_len_t
),
8082 (uint8
*)&readback_val
, sizeof(dngl_rtlv_type_t
));
8084 if (readback_val
!= type
) {
8085 bcmerror
= BCME_BADOPTION
;
8089 DHD_ERROR(("%s: Downloaded type error %d\n", __FUNCTION__
, bcmerror
));
8092 #endif /* DHD_DEBUG */
8094 bus
->ramtop_addr
= dest_addr
;
8099 MFREE(bus
->dhd
->osh
, readback_buf
, dest_size
);
8101 #endif /* DHD_DEBUG */
8104 } /* dhdpcie_download_rtlv */
8106 /* Download a reversed-TLV END marker to the top of dongle RAM */
8108 dhdpcie_download_rtlv_end(dhd_bus_t
*bus
)
8110 return dhdpcie_download_rtlv(bus
, DNGL_RTLV_TYPE_END_MARKER
, 0, NULL
);
8113 /* Write the FW signature verification status to dongle memory */
8115 dhdpcie_bus_write_fws_status(dhd_bus_t
*bus
)
8117 bcm_fwsign_verif_status_t vstatus
;
8120 bzero(&vstatus
, sizeof(vstatus
));
8122 ret
= dhdpcie_download_rtlv(bus
, DNGL_RTLV_TYPE_FWSIGN_STATUS
, sizeof(vstatus
),
8124 bus
->fwstat_download_addr
= bus
->ramtop_addr
;
8125 bus
->fwstat_download_len
= sizeof(vstatus
);
8128 } /* dhdpcie_bus_write_fws_status */
8130 /* Write the FW signature verification memory map to dongle memory */
8132 dhdpcie_bus_write_fws_mem_info(dhd_bus_t
*bus
)
8134 bcm_fwsign_mem_info_t memmap
;
8137 bzero(&memmap
, sizeof(memmap
));
8138 memmap
.firmware
.start
= bus
->fw_download_addr
;
8139 memmap
.firmware
.end
= memmap
.firmware
.start
+ bus
->fw_download_len
;
8140 memmap
.heap
.start
= ROUNDUP(memmap
.firmware
.end
+ BL_HEAP_START_GAP_SIZE
, 4);
8141 memmap
.heap
.end
= memmap
.heap
.start
+ BL_HEAP_SIZE
;
8142 memmap
.signature
.start
= bus
->fwsig_download_addr
;
8143 memmap
.signature
.end
= memmap
.signature
.start
+ bus
->fwsig_download_len
;
8144 memmap
.vstatus
.start
= bus
->fwstat_download_addr
;
8145 memmap
.vstatus
.end
= memmap
.vstatus
.start
+ bus
->fwstat_download_len
;
8146 DHD_INFO(("%s: mem_info: fw=%x-%x heap=%x-%x sig=%x-%x vst=%x-%x res=%x\n",
8148 memmap
.firmware
.start
, memmap
.firmware
.end
,
8149 memmap
.heap
.start
, memmap
.heap
.end
,
8150 memmap
.signature
.start
, memmap
.signature
.end
,
8151 memmap
.vstatus
.start
, memmap
.vstatus
.end
,
8152 memmap
.reset_vec
.start
));
8154 ret
= dhdpcie_download_rtlv(bus
, DNGL_RTLV_TYPE_FWSIGN_MEM_MAP
, sizeof(memmap
),
8156 bus
->fw_memmap_download_addr
= bus
->ramtop_addr
;
8157 bus
->fw_memmap_download_len
= sizeof(memmap
);
8160 } /* dhdpcie_bus_write_fws_mem_info */
8162 /* Download a bootloader image to dongle RAM */
8164 dhdpcie_bus_download_ram_bootloader(dhd_bus_t
*bus
)
8167 uint32 dongle_ram_base_save
;
8169 DHD_INFO(("download_bloader: %s,0x%x. ramtop=0x%x\n",
8170 bus
->bootloader_filename
, bus
->bootloader_addr
, bus
->ramtop_addr
));
8171 if (bus
->bootloader_filename
[0] == '\0') {
8176 dongle_ram_base_save
= bus
->dongle_ram_base
;
8178 /* Set ram base to bootloader download start address */
8179 bus
->dongle_ram_base
= bus
->bootloader_addr
;
8181 /* Download the bootloader image to TCM */
8182 ret
= dhdpcie_download_code_file(bus
, bus
->bootloader_filename
);
8184 /* Restore ram base */
8185 bus
->dongle_ram_base
= dongle_ram_base_save
;
8188 } /* dhdpcie_bus_download_ram_bootloader */
8190 /* Save the FW download address and size */
8192 dhdpcie_bus_save_download_info(dhd_bus_t
*bus
, uint32 download_addr
,
8193 uint32 download_size
, const char *signature_fname
,
8194 const char *bloader_fname
, uint32 bloader_download_addr
)
8196 bus
->fw_download_len
= download_size
;
8197 bus
->fw_download_addr
= download_addr
;
8198 strlcpy(bus
->fwsig_filename
, signature_fname
, sizeof(bus
->fwsig_filename
));
8199 strlcpy(bus
->bootloader_filename
, bloader_fname
, sizeof(bus
->bootloader_filename
));
8200 bus
->bootloader_addr
= bloader_download_addr
;
8202 /* GDB proxy bootloader mode - if signature file specified (i.e.
8203 * bootloader is used), but bootloader is not specified (i.e. ROM
8204 * bootloader is uses).
8205 * Bootloader mode is significant only for for preattachment debugging
8206 * of chips, in which debug cell can't be initialized before ARM CPU
8209 bus
->gdb_proxy_bootloader_mode
=
8210 (bus
->fwsig_filename
[0] != 0) && (bus
->bootloader_filename
[0] == 0);
8211 #endif /* GDB_PROXY */
8213 } /* dhdpcie_bus_save_download_info */
8215 /* Read a small binary file and write it to the specified socram dest address */
8217 dhdpcie_download_sig_file(dhd_bus_t
*bus
, char *path
, uint32 type
)
8219 int bcmerror
= BCME_OK
;
8221 uint8
*srcbuf
= NULL
;
8224 uint32 dest_size
= 0; /* dongle RAM dest size */
8226 if (path
== NULL
|| path
[0] == '\0') {
8227 DHD_ERROR(("%s: no file\n", __FUNCTION__
));
8228 bcmerror
= BCME_NOTFOUND
;
8232 /* Open file, get size */
8233 filep
= dhd_os_open_image1(bus
->dhd
, path
);
8234 if (filep
== NULL
) {
8235 DHD_ERROR(("%s: error opening file %s\n", __FUNCTION__
, path
));
8236 bcmerror
= BCME_NOTFOUND
;
8239 srcsize
= dhd_os_get_image_size(filep
);
8240 if (srcsize
<= 0 || srcsize
> MEMBLOCK
) {
8241 DHD_ERROR(("%s: invalid fwsig size %u\n", __FUNCTION__
, srcsize
));
8242 bcmerror
= BCME_BUFTOOSHORT
;
8245 dest_size
= ROUNDUP(srcsize
, 4);
8247 /* Allocate src buffer, read in the entire file */
8248 srcbuf
= (uint8
*)MALLOCZ(bus
->dhd
->osh
, dest_size
);
8250 bcmerror
= BCME_NOMEM
;
8253 len
= dhd_os_get_image_block(srcbuf
, srcsize
, filep
);
8254 if (len
!= srcsize
) {
8255 DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__
, len
));
8256 bcmerror
= BCME_BADLEN
;
8260 /* Write the src buffer as a rTLV to the dongle */
8261 bcmerror
= dhdpcie_download_rtlv(bus
, type
, dest_size
, srcbuf
);
8263 bus
->fwsig_download_addr
= bus
->ramtop_addr
;
8264 bus
->fwsig_download_len
= dest_size
;
8268 dhd_os_close_image1(bus
->dhd
, filep
);
8271 MFREE(bus
->dhd
->osh
, srcbuf
, dest_size
);
8275 } /* dhdpcie_download_sig_file */
8278 dhdpcie_bus_write_fwsig(dhd_bus_t
*bus
, char *fwsig_path
, char *nvsig_path
)
8280 int bcmerror
= BCME_OK
;
8282 /* Download the FW signature file to the chip */
8283 bcmerror
= dhdpcie_download_sig_file(bus
, fwsig_path
, DNGL_RTLV_TYPE_FW_SIGNATURE
);
8290 DHD_ERROR(("%s: error %d\n", __FUNCTION__
, bcmerror
));
8293 } /* dhdpcie_bus_write_fwsig */
8295 /* Dump secure firmware status. */
8297 dhd_bus_dump_fws(dhd_bus_t
*bus
, struct bcmstrbuf
*strbuf
)
8299 bcm_fwsign_verif_status_t status
;
8300 bcm_fwsign_mem_info_t meminfo
;
8303 bzero(&status
, sizeof(status
));
8304 if (bus
->fwstat_download_addr
!= 0) {
8305 err
= dhdpcie_bus_membytes(bus
, FALSE
, bus
->fwstat_download_addr
,
8306 (uint8
*)&status
, sizeof(status
));
8307 if (err
!= BCME_OK
) {
8308 DHD_ERROR(("%s: error %d on reading %zu membytes at 0x%08x\n",
8309 __FUNCTION__
, err
, sizeof(status
), bus
->fwstat_download_addr
));
8314 bzero(&meminfo
, sizeof(meminfo
));
8315 if (bus
->fw_memmap_download_addr
!= 0) {
8316 err
= dhdpcie_bus_membytes(bus
, FALSE
, bus
->fw_memmap_download_addr
,
8317 (uint8
*)&meminfo
, sizeof(meminfo
));
8318 if (err
!= BCME_OK
) {
8319 DHD_ERROR(("%s: error %d on reading %zu membytes at 0x%08x\n",
8320 __FUNCTION__
, err
, sizeof(meminfo
), bus
->fw_memmap_download_addr
));
8325 bcm_bprintf(strbuf
, "Firmware signing\nSignature: (%08x) len (%d)\n",
8326 bus
->fwsig_download_addr
, bus
->fwsig_download_len
);
8329 "Verification status: (%08x)\n"
8332 "\talloc_bytes: %u\n"
8333 "\tmax_alloc_bytes: %u\n"
8334 "\ttotal_alloc_bytes: %u\n"
8335 "\ttotal_freed_bytes: %u\n"
8336 "\tnum_allocs: %u\n"
8337 "\tmax_allocs: %u\n"
8338 "\tmax_alloc_size: %u\n"
8339 "\talloc_failures: %u\n",
8340 bus
->fwstat_download_addr
,
8344 status
.max_alloc_bytes
,
8345 status
.total_alloc_bytes
,
8346 status
.total_freed_bytes
,
8349 status
.max_alloc_size
,
8350 status
.alloc_failures
);
8353 "Memory info: (%08x)\n"
8354 "\tfw %08x-%08x\n\theap %08x-%08x\n\tsig %08x-%08x\n\tvst %08x-%08x\n",
8355 bus
->fw_memmap_download_addr
,
8356 meminfo
.firmware
.start
, meminfo
.firmware
.end
,
8357 meminfo
.heap
.start
, meminfo
.heap
.end
,
8358 meminfo
.signature
.start
, meminfo
.signature
.end
,
8359 meminfo
.vstatus
.start
, meminfo
.vstatus
.end
);
8363 #endif /* FW_SIGNATURE */
8365 /* Write nvram data to the top of dongle RAM, ending with a size in # of 32-bit words */
8367 dhdpcie_bus_write_vars(dhd_bus_t
*bus
)
8370 uint32 varsize
, phys_size
;
8375 uint8
*nvram_ularray
;
8376 #endif /* DHD_DEBUG */
8378 /* Even if there are no vars are to be written, we still need to set the ramsize. */
8379 varsize
= bus
->varsz
? ROUNDUP(bus
->varsz
, 4) : 0;
8380 varaddr
= (bus
->ramsize
- 4) - varsize
;
8382 varaddr
+= bus
->dongle_ram_base
;
8383 bus
->ramtop_addr
= varaddr
;
8387 /* XXX In case the controller has trouble with odd bytes... */
8388 vbuffer
= (uint8
*)MALLOC(bus
->dhd
->osh
, varsize
);
8392 bzero(vbuffer
, varsize
);
8393 bcopy(bus
->vars
, vbuffer
, bus
->varsz
);
8394 /* Write the vars list */
8395 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
, varaddr
, vbuffer
, varsize
);
8397 /* Implement read back and verify later */
8399 /* Verify NVRAM bytes */
8400 DHD_INFO(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__
, varsize
));
8401 nvram_ularray
= (uint8
*)MALLOC(bus
->dhd
->osh
, varsize
);
8402 if (!nvram_ularray
) {
8403 MFREE(bus
->dhd
->osh
, vbuffer
, varsize
);
8407 /* Upload image to verify downloaded contents. */
8408 memset(nvram_ularray
, 0xaa, varsize
);
8410 /* Read the vars list to temp buffer for comparison */
8411 bcmerror
= dhdpcie_bus_membytes(bus
, FALSE
, varaddr
, nvram_ularray
, varsize
);
8413 DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
8414 __FUNCTION__
, bcmerror
, varsize
, varaddr
));
8417 /* Compare the org NVRAM with the one read from RAM */
8418 if (memcmp(vbuffer
, nvram_ularray
, varsize
)) {
8419 DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__
));
8420 prhex("nvram file", vbuffer
, varsize
);
8421 prhex("downloaded nvram", nvram_ularray
, varsize
);
8422 MFREE(bus
->dhd
->osh
, nvram_ularray
, varsize
);
8423 MFREE(bus
->dhd
->osh
, vbuffer
, varsize
);
8426 DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
8429 MFREE(bus
->dhd
->osh
, nvram_ularray
, varsize
);
8430 #endif /* DHD_DEBUG */
8432 MFREE(bus
->dhd
->osh
, vbuffer
, varsize
);
8435 phys_size
= REMAP_ENAB(bus
) ? bus
->ramsize
: bus
->orig_ramsize
;
8437 phys_size
+= bus
->dongle_ram_base
;
8439 /* adjust to the user specified RAM */
8440 DHD_INFO(("%s: Physical memory size: %d, usable memory size: %d\n", __FUNCTION__
,
8441 phys_size
, bus
->ramsize
));
8442 DHD_INFO(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__
,
8444 varsize
= ((phys_size
- 4) - varaddr
);
8447 * Determine the length token:
8448 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
8452 bus
->nvram_csm
= varsizew
;
8454 varsizew
= varsize
/ 4;
8455 varsizew
= (~varsizew
<< 16) | (varsizew
& 0x0000FFFF);
8456 bus
->nvram_csm
= varsizew
;
8457 varsizew
= htol32(varsizew
);
8460 DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__
, varsize
, varsizew
));
8462 /* Write the length token to the last word */
8463 bcmerror
= dhdpcie_bus_membytes(bus
, TRUE
, (phys_size
- 4),
8464 (uint8
*)&varsizew
, 4);
8467 } /* dhdpcie_bus_write_vars */
8470 dhdpcie_downloadvars(dhd_bus_t
*bus
, void *arg
, int len
)
8472 int bcmerror
= BCME_OK
;
8473 #ifdef KEEP_JP_REGREV
8474 /* XXX Needed by customer's request */
8477 #endif /* KEEP_JP_REGREV */
8479 const char nodeadman_record
[] = "deadman_to=0";
8480 #endif /* GDB_PROXY */
8482 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
8485 bcmerror
= BCME_BUFTOOSHORT
;
8489 /* Free the old ones and replace with passed variables */
8491 MFREE(bus
->dhd
->osh
, bus
->vars
, bus
->varsz
);
8493 if (bus
->dhd
->gdb_proxy_nodeadman
) {
8494 len
+= sizeof(nodeadman_record
);
8496 #endif /* GDB_PROXY */
8498 bus
->vars
= MALLOC(bus
->dhd
->osh
, len
);
8499 bus
->varsz
= bus
->vars
? len
: 0;
8500 if (bus
->vars
== NULL
) {
8501 bcmerror
= BCME_NOMEM
;
8505 /* Copy the passed variables, which should include the terminating double-null */
8506 bcopy(arg
, bus
->vars
, bus
->varsz
);
8508 if (bus
->dhd
->gdb_proxy_nodeadman
&&
8509 !replace_nvram_variable(bus
->vars
, bus
->varsz
, nodeadman_record
, NULL
))
8511 bcmerror
= BCME_NOMEM
;
8514 #endif /* GDB_PROXY */
8516 /* Re-Calculate htclkratio only for QT, for FPGA it is fixed at 30 */
8518 #ifdef DHD_USE_SINGLE_NVRAM_FILE
8519 /* XXX Change the default country code only for MFG firmware */
8520 if (dhd_bus_get_fw_mode(bus
->dhd
) == DHD_FLAG_MFG_MODE
) {
8524 char tag
[2][8] = {"ccode=", "regrev="};
8526 /* Find ccode and regrev info */
8527 for (i
= 0; i
< 2; i
++) {
8528 sp
= strnstr(bus
->vars
, tag
[i
], bus
->varsz
);
8530 DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n",
8531 __FUNCTION__
, bus
->nv_path
));
8532 bcmerror
= BCME_ERROR
;
8535 sp
= strchr(sp
, '=');
8536 ep
= strchr(sp
, '\0');
8537 /* We assumed that string length of both ccode and
8538 * regrev values should not exceed WLC_CNTRY_BUF_SZ
8540 if (ep
&& ((ep
- sp
) <= WLC_CNTRY_BUF_SZ
)) {
8542 while (*sp
!= '\0') {
8543 DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
8544 __FUNCTION__
, tag
[i
], *sp
));
8548 DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n",
8549 __FUNCTION__
, tag
[i
]));
8550 bcmerror
= BCME_ERROR
;
8555 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
8557 #ifdef KEEP_JP_REGREV
8558 /* XXX Needed by customer's request */
8559 #ifdef DHD_USE_SINGLE_NVRAM_FILE
8560 if (dhd_bus_get_fw_mode(bus
->dhd
) != DHD_FLAG_MFG_MODE
)
8561 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
8564 tmpbuf
= MALLOCZ(bus
->dhd
->osh
, bus
->varsz
+ 1);
8565 if (tmpbuf
== NULL
) {
8568 memcpy(tmpbuf
, bus
->vars
, bus
->varsz
);
8569 for (tmpidx
= 0; tmpidx
< bus
->varsz
; tmpidx
++) {
8570 if (tmpbuf
[tmpidx
] == 0) {
8571 tmpbuf
[tmpidx
] = '\n';
8574 bus
->dhd
->vars_ccode
[0] = 0;
8575 bus
->dhd
->vars_regrev
= 0;
8576 if ((pos
= strstr(tmpbuf
, "ccode"))) {
8577 sscanf(pos
, "ccode=%3s\n", bus
->dhd
->vars_ccode
);
8579 if ((pos
= strstr(tmpbuf
, "regrev"))) {
8580 sscanf(pos
, "regrev=%u\n", &(bus
->dhd
->vars_regrev
));
8582 MFREE(bus
->dhd
->osh
, tmpbuf
, bus
->varsz
+ 1);
8584 #endif /* KEEP_JP_REGREV */
8590 /* loop through the capability list and see if the pcie capabilty exists */
8592 dhdpcie_find_pci_capability(osl_t
*osh
, uint8 req_cap_id
)
8598 /* check for Header type 0 */
8599 byte_val
= read_pci_cfg_byte(PCI_CFG_HDR
);
8600 if ((byte_val
& 0x7f) != PCI_HEADER_NORMAL
) {
8601 DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__
));
8605 /* check if the capability pointer field exists */
8606 byte_val
= read_pci_cfg_byte(PCI_CFG_STAT
);
8607 if (!(byte_val
& PCI_CAPPTR_PRESENT
)) {
8608 DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__
));
8612 cap_ptr
= read_pci_cfg_byte(PCI_CFG_CAPPTR
);
8613 /* check if the capability pointer is 0x00 */
8614 if (cap_ptr
== 0x00) {
8615 DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__
));
8619 /* loop thr'u the capability list and see if the pcie capabilty exists */
8621 cap_id
= read_pci_cfg_byte(cap_ptr
);
8623 while (cap_id
!= req_cap_id
) {
8624 cap_ptr
= read_pci_cfg_byte((cap_ptr
+ 1));
8625 if (cap_ptr
== 0x00) break;
8626 cap_id
= read_pci_cfg_byte(cap_ptr
);
8634 dhdpcie_pme_active(osl_t
*osh
, bool enable
)
8639 cap_ptr
= dhdpcie_find_pci_capability(osh
, PCI_CAP_POWERMGMTCAP_ID
);
8642 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__
));
8646 pme_csr
= OSL_PCI_READ_CONFIG(osh
, cap_ptr
+ PME_CSR_OFFSET
, sizeof(uint32
));
8647 DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__
, pme_csr
));
8649 pme_csr
|= PME_CSR_PME_STAT
;
8651 pme_csr
|= PME_CSR_PME_EN
;
8653 pme_csr
&= ~PME_CSR_PME_EN
;
8656 OSL_PCI_WRITE_CONFIG(osh
, cap_ptr
+ PME_CSR_OFFSET
, sizeof(uint32
), pme_csr
);
8660 dhdpcie_pme_cap(osl_t
*osh
)
8665 cap_ptr
= dhdpcie_find_pci_capability(osh
, PCI_CAP_POWERMGMTCAP_ID
);
8668 DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__
));
8672 pme_cap
= OSL_PCI_READ_CONFIG(osh
, cap_ptr
, sizeof(uint32
));
8674 DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__
, pme_cap
));
8676 return ((pme_cap
& PME_CAP_PM_STATES
) != 0);
8680 dhdpcie_pme_stat_clear(dhd_bus_t
*bus
)
8682 uint32 pmcsr
= dhd_pcie_config_read(bus
, PCIE_CFG_PMCSR
, sizeof(uint32
));
8684 OSL_PCI_WRITE_CONFIG(bus
->osh
, PCIE_CFG_PMCSR
, sizeof(uint32
), pmcsr
| PCIE_PMCSR_PMESTAT
);
8688 dhdpcie_lcreg(osl_t
*osh
, uint32 mask
, uint32 val
)
8692 uint8 lcreg_offset
; /* PCIE capability LCreg offset in the config space */
8695 pcie_cap
= dhdpcie_find_pci_capability(osh
, PCI_CAP_PCIECAP_ID
);
8698 DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__
));
8702 lcreg_offset
= pcie_cap
+ PCIE_CAP_LINKCTRL_OFFSET
;
8707 reg_val
= OSL_PCI_READ_CONFIG(osh
, lcreg_offset
, sizeof(uint32
));
8711 reg_val
|= (mask
& val
);
8714 OSL_PCI_WRITE_CONFIG(osh
, lcreg_offset
, sizeof(uint32
), reg_val
);
8716 return OSL_PCI_READ_CONFIG(osh
, lcreg_offset
, sizeof(uint32
));
8720 dhdpcie_clkreq(osl_t
*osh
, uint32 mask
, uint32 val
)
8724 uint8 lcreg_offset
; /* PCIE capability LCreg offset in the config space */
8726 pcie_cap
= dhdpcie_find_pci_capability(osh
, PCI_CAP_PCIECAP_ID
);
8729 DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__
));
8733 lcreg_offset
= pcie_cap
+ PCIE_CAP_LINKCTRL_OFFSET
;
8735 reg_val
= OSL_PCI_READ_CONFIG(osh
, lcreg_offset
, sizeof(uint32
));
8739 reg_val
|= PCIE_CLKREQ_ENAB
;
8741 reg_val
&= ~PCIE_CLKREQ_ENAB
;
8742 OSL_PCI_WRITE_CONFIG(osh
, lcreg_offset
, sizeof(uint32
), reg_val
);
8743 reg_val
= OSL_PCI_READ_CONFIG(osh
, lcreg_offset
, sizeof(uint32
));
8745 if (reg_val
& PCIE_CLKREQ_ENAB
)
8751 void dhd_dump_intr_counters(dhd_pub_t
*dhd
, struct bcmstrbuf
*strbuf
)
8754 uint64 current_time
= OSL_LOCALTIME_NS();
8757 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__
));
8763 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__
));
8767 bcm_bprintf(strbuf
, "\n ------- DUMPING INTR enable/disable counters-------\n");
8768 bcm_bprintf(strbuf
, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
8769 "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
8770 "dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
8771 bus
->resume_intr_enable_count
, bus
->dpc_intr_enable_count
,
8772 bus
->isr_intr_disable_count
, bus
->suspend_intr_disable_count
,
8773 bus
->dpc_return_busdown_count
, bus
->non_ours_irq_count
);
8774 #ifdef BCMPCIE_OOB_HOST_WAKE
8775 bcm_bprintf(strbuf
, "oob_intr_count=%lu oob_intr_enable_count=%lu"
8776 " oob_intr_disable_count=%lu\noob_irq_num=%d"
8777 " last_oob_irq_times="SEC_USEC_FMT
":"SEC_USEC_FMT
8778 " last_oob_irq_enable_time="SEC_USEC_FMT
"\nlast_oob_irq_disable_time="SEC_USEC_FMT
8779 " oob_irq_enabled=%d oob_gpio_level=%d\n",
8780 bus
->oob_intr_count
, bus
->oob_intr_enable_count
,
8781 bus
->oob_intr_disable_count
, dhdpcie_get_oob_irq_num(bus
),
8782 GET_SEC_USEC(bus
->last_oob_irq_isr_time
),
8783 GET_SEC_USEC(bus
->last_oob_irq_thr_time
),
8784 GET_SEC_USEC(bus
->last_oob_irq_enable_time
),
8785 GET_SEC_USEC(bus
->last_oob_irq_disable_time
), dhdpcie_get_oob_irq_status(bus
),
8786 dhdpcie_get_oob_irq_level());
8787 #endif /* BCMPCIE_OOB_HOST_WAKE */
8788 bcm_bprintf(strbuf
, "\ncurrent_time="SEC_USEC_FMT
" isr_entry_time="SEC_USEC_FMT
8789 " isr_exit_time="SEC_USEC_FMT
"\n"
8790 "isr_sched_dpc_time="SEC_USEC_FMT
" rpm_sched_dpc_time="SEC_USEC_FMT
"\n"
8791 " last_non_ours_irq_time="SEC_USEC_FMT
" dpc_entry_time="SEC_USEC_FMT
"\n"
8792 "last_process_ctrlbuf_time="SEC_USEC_FMT
" last_process_flowring_time="SEC_USEC_FMT
8793 " last_process_txcpl_time="SEC_USEC_FMT
"\nlast_process_rxcpl_time="SEC_USEC_FMT
8794 " last_process_infocpl_time="SEC_USEC_FMT
" last_process_edl_time="SEC_USEC_FMT
8795 "\ndpc_exit_time="SEC_USEC_FMT
" resched_dpc_time="SEC_USEC_FMT
"\n"
8796 "last_d3_inform_time="SEC_USEC_FMT
"\n",
8797 GET_SEC_USEC(current_time
), GET_SEC_USEC(bus
->isr_entry_time
),
8798 GET_SEC_USEC(bus
->isr_exit_time
), GET_SEC_USEC(bus
->isr_sched_dpc_time
),
8799 GET_SEC_USEC(bus
->rpm_sched_dpc_time
),
8800 GET_SEC_USEC(bus
->last_non_ours_irq_time
), GET_SEC_USEC(bus
->dpc_entry_time
),
8801 GET_SEC_USEC(bus
->last_process_ctrlbuf_time
),
8802 GET_SEC_USEC(bus
->last_process_flowring_time
),
8803 GET_SEC_USEC(bus
->last_process_txcpl_time
),
8804 GET_SEC_USEC(bus
->last_process_rxcpl_time
),
8805 GET_SEC_USEC(bus
->last_process_infocpl_time
),
8806 GET_SEC_USEC(bus
->last_process_edl_time
),
8807 GET_SEC_USEC(bus
->dpc_exit_time
), GET_SEC_USEC(bus
->resched_dpc_time
),
8808 GET_SEC_USEC(bus
->last_d3_inform_time
));
8810 bcm_bprintf(strbuf
, "\nlast_suspend_start_time="SEC_USEC_FMT
" last_suspend_end_time="
8811 SEC_USEC_FMT
" last_resume_start_time="SEC_USEC_FMT
" last_resume_end_time="
8812 SEC_USEC_FMT
"\n", GET_SEC_USEC(bus
->last_suspend_start_time
),
8813 GET_SEC_USEC(bus
->last_suspend_end_time
),
8814 GET_SEC_USEC(bus
->last_resume_start_time
),
8815 GET_SEC_USEC(bus
->last_resume_end_time
));
8817 #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
8818 bcm_bprintf(strbuf
, "logtrace_thread_entry_time="SEC_USEC_FMT
8819 " logtrace_thread_sem_down_time="SEC_USEC_FMT
8820 "\nlogtrace_thread_flush_time="SEC_USEC_FMT
8821 " logtrace_thread_unexpected_break_time="SEC_USEC_FMT
8822 "\nlogtrace_thread_complete_time="SEC_USEC_FMT
"\n",
8823 GET_SEC_USEC(dhd
->logtrace_thr_ts
.entry_time
),
8824 GET_SEC_USEC(dhd
->logtrace_thr_ts
.sem_down_time
),
8825 GET_SEC_USEC(dhd
->logtrace_thr_ts
.flush_time
),
8826 GET_SEC_USEC(dhd
->logtrace_thr_ts
.unexpected_break_time
),
8827 GET_SEC_USEC(dhd
->logtrace_thr_ts
.complete_time
));
8828 #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
8831 void dhd_dump_intr_registers(dhd_pub_t
*dhd
, struct bcmstrbuf
*strbuf
)
8833 uint32 intstatus
= 0;
8836 uint32 d2h_mb_data
= 0;
8838 intstatus
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
8839 dhd
->bus
->pcie_mailbox_int
, 0, 0);
8840 intmask
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
8841 dhd
->bus
->pcie_mailbox_mask
, 0, 0);
8842 d2h_db0
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, PCID2H_MailBox
, 0, 0);
8843 dhd_bus_cmn_readshared(dhd
->bus
, &d2h_mb_data
, D2H_MB_DATA
, 0);
8845 bcm_bprintf(strbuf
, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
8846 intstatus
, intmask
, d2h_db0
);
8847 bcm_bprintf(strbuf
, "d2h_mb_data=0x%x def_intmask=0x%x\n",
8848 d2h_mb_data
, dhd
->bus
->def_intmask
);
8850 /** Add bus dump output to a buffer */
8851 void dhd_bus_dump(dhd_pub_t
*dhdp
, struct bcmstrbuf
*strbuf
)
8855 flow_ring_node_t
*flow_ring_node
;
8856 flow_info_t
*flow_info
;
8857 #ifdef TX_STATUS_LATENCY_STATS
8859 if_flow_lkup_t
*if_flow_lkup
;
8860 dhd_if_tx_status_latency_t if_tx_status_latency
[DHD_MAX_IFS
];
8861 #endif /* TX_STATUS_LATENCY_STATS */
8863 #if defined(FW_SIGNATURE)
8864 /* Dump secure firmware status. */
8865 if (dhdp
->busstate
<= DHD_BUS_LOAD
) {
8866 dhd_bus_dump_fws(dhdp
->bus
, strbuf
);
8870 if (dhdp
->busstate
!= DHD_BUS_DATA
)
8873 #ifdef TX_STATUS_LATENCY_STATS
8874 memset(if_tx_status_latency
, 0, sizeof(if_tx_status_latency
));
8875 #endif /* TX_STATUS_LATENCY_STATS */
8876 #ifdef DHD_WAKE_STATUS
8877 bcm_bprintf(strbuf
, "wake %u rxwake %u readctrlwake %u\n",
8878 bcmpcie_get_total_wake(dhdp
->bus
), dhdp
->bus
->wake_counts
.rxwake
,
8879 dhdp
->bus
->wake_counts
.rcwake
);
8880 #ifdef DHD_WAKE_RX_STATUS
8881 bcm_bprintf(strbuf
, " unicast %u muticast %u broadcast %u arp %u\n",
8882 dhdp
->bus
->wake_counts
.rx_ucast
, dhdp
->bus
->wake_counts
.rx_mcast
,
8883 dhdp
->bus
->wake_counts
.rx_bcast
, dhdp
->bus
->wake_counts
.rx_arp
);
8884 bcm_bprintf(strbuf
, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
8885 dhdp
->bus
->wake_counts
.rx_multi_ipv4
, dhdp
->bus
->wake_counts
.rx_multi_ipv6
,
8886 dhdp
->bus
->wake_counts
.rx_icmpv6
, dhdp
->bus
->wake_counts
.rx_multi_other
);
8887 bcm_bprintf(strbuf
, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
8888 dhdp
->bus
->wake_counts
.rx_icmpv6_ra
, dhdp
->bus
->wake_counts
.rx_icmpv6_na
,
8889 dhdp
->bus
->wake_counts
.rx_icmpv6_ns
);
8890 #endif /* DHD_WAKE_RX_STATUS */
8891 #ifdef DHD_WAKE_EVENT_STATUS
8892 for (flowid
= 0; flowid
< WLC_E_LAST
; flowid
++)
8893 if (dhdp
->bus
->wake_counts
.rc_event
[flowid
] != 0)
8894 bcm_bprintf(strbuf
, " %s = %u\n", bcmevent_get_name(flowid
),
8895 dhdp
->bus
->wake_counts
.rc_event
[flowid
]);
8896 bcm_bprintf(strbuf
, "\n");
8897 #endif /* DHD_WAKE_EVENT_STATUS */
8898 #endif /* DHD_WAKE_STATUS */
8900 dhd_prot_print_info(dhdp
, strbuf
);
8901 dhd_dump_intr_registers(dhdp
, strbuf
);
8902 dhd_dump_intr_counters(dhdp
, strbuf
);
8903 bcm_bprintf(strbuf
, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
8904 dhdp
->bus
->h2d_mb_data_ptr_addr
, dhdp
->bus
->d2h_mb_data_ptr_addr
);
8905 bcm_bprintf(strbuf
, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp
->cumm_ctr
));
8906 #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
8907 bcm_bprintf(strbuf
, "multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
8908 dhdp
->multi_client_flow_rings
, dhdp
->max_multi_client_flow_rings
);
8909 #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
8910 #if defined(DHD_HTPUT_TUNABLES)
8911 bcm_bprintf(strbuf
, "htput_flow_ring_start:%d total_htput:%d client_htput=%d\n",
8912 dhdp
->htput_flow_ring_start
, HTPUT_TOTAL_FLOW_RINGS
, dhdp
->htput_client_flow_rings
);
8913 #endif /* DHD_HTPUT_TUNABLES */
8915 "%4s %4s %2s %4s %17s %4s %4s %6s %10s %17s %17s %17s %17s %14s %14s %10s ",
8916 "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
8917 " Overflows", "TRD: HLRD: HDRD", "TWR: HLWR: HDWR", "BASE(VA)", "BASE(PA)",
8918 "WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
8920 #ifdef TX_STATUS_LATENCY_STATS
8921 /* Average Tx status/Completion Latency in micro secs */
8922 bcm_bprintf(strbuf
, "%16s %16s ", " NumTxPkts", " AvgTxCmpL_Us");
8923 #endif /* TX_STATUS_LATENCY_STATS */
8925 bcm_bprintf(strbuf
, "\n");
8927 for (flowid
= 0; flowid
< dhdp
->num_h2d_rings
; flowid
++) {
8928 flow_ring_node
= DHD_FLOW_RING(dhdp
, flowid
);
8929 if (!flow_ring_node
->active
)
8932 flow_info
= &flow_ring_node
->flow_info
;
8934 "%4d %4d %2d %4d "MACDBG
" %4d %4d %6d %10u ", ix
++,
8935 flow_ring_node
->flowid
, flow_info
->ifindex
, flow_info
->tid
,
8936 MAC2STRDBG(flow_info
->da
),
8937 DHD_FLOW_QUEUE_LEN(&flow_ring_node
->queue
),
8938 DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node
->queue
)),
8939 DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node
->queue
)),
8940 DHD_FLOW_QUEUE_FAILURES(&flow_ring_node
->queue
));
8941 dhd_prot_print_flow_ring(dhdp
, flow_ring_node
->prot_info
, TRUE
, strbuf
,
8942 "%5d:%5d:%5d %5d:%5d:%5d %17p %8x:%8x %14d %14d %10d");
8944 #ifdef TX_STATUS_LATENCY_STATS
8945 bcm_bprintf(strbuf
, "%16llu %16llu ",
8946 flow_info
->num_tx_pkts
,
8947 flow_info
->num_tx_status
?
8948 DIV_U64_BY_U64(flow_info
->cum_tx_status_latency
,
8949 flow_info
->num_tx_status
) : 0);
8950 ifindex
= flow_info
->ifindex
;
8951 ASSERT(ifindex
< DHD_MAX_IFS
);
8952 if (ifindex
< DHD_MAX_IFS
) {
8953 if_tx_status_latency
[ifindex
].num_tx_status
+= flow_info
->num_tx_status
;
8954 if_tx_status_latency
[ifindex
].cum_tx_status_latency
+=
8955 flow_info
->cum_tx_status_latency
;
8957 DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n",
8958 __FUNCTION__
, ifindex
, flowid
));
8960 #endif /* TX_STATUS_LATENCY_STATS */
8961 bcm_bprintf(strbuf
, "\n");
8964 #ifdef TX_STATUS_LATENCY_STATS
8965 bcm_bprintf(strbuf
, "\n%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus");
8966 if_flow_lkup
= (if_flow_lkup_t
*)dhdp
->if_flow_lkup
;
8967 for (ix
= 0; ix
< DHD_MAX_IFS
; ix
++) {
8968 if (!if_flow_lkup
[ix
].status
) {
8971 bcm_bprintf(strbuf
, "%2d %16llu %16llu\n",
8973 if_tx_status_latency
[ix
].num_tx_status
?
8974 DIV_U64_BY_U64(if_tx_status_latency
[ix
].cum_tx_status_latency
,
8975 if_tx_status_latency
[ix
].num_tx_status
): 0,
8976 if_tx_status_latency
[ix
].num_tx_status
);
8978 #endif /* TX_STATUS_LATENCY_STATS */
8980 bcm_bprintf(strbuf
, "D3 inform cnt %d\n", dhdp
->bus
->d3_inform_cnt
);
8981 bcm_bprintf(strbuf
, "D0 inform cnt %d\n", dhdp
->bus
->d0_inform_cnt
);
8982 bcm_bprintf(strbuf
, "D0 inform in use cnt %d\n", dhdp
->bus
->d0_inform_in_use_cnt
);
8983 if (dhdp
->d2h_hostrdy_supported
) {
8984 bcm_bprintf(strbuf
, "hostready count:%d\n", dhdp
->bus
->hostready_count
);
8987 /* Inband device wake counters */
8988 if (INBAND_DW_ENAB(dhdp
->bus
)) {
8989 bcm_bprintf(strbuf
, "Inband device_wake assert count: %d\n",
8990 dhdp
->bus
->inband_dw_assert_cnt
);
8991 bcm_bprintf(strbuf
, "Inband device_wake deassert count: %d\n",
8992 dhdp
->bus
->inband_dw_deassert_cnt
);
8993 bcm_bprintf(strbuf
, "Inband DS-EXIT <host initiated> count: %d\n",
8994 dhdp
->bus
->inband_ds_exit_host_cnt
);
8995 bcm_bprintf(strbuf
, "Inband DS-EXIT <device initiated> count: %d\n",
8996 dhdp
->bus
->inband_ds_exit_device_cnt
);
8997 bcm_bprintf(strbuf
, "Inband DS-EXIT Timeout count: %d\n",
8998 dhdp
->bus
->inband_ds_exit_to_cnt
);
8999 bcm_bprintf(strbuf
, "Inband HOST_SLEEP-EXIT Timeout count: %d\n",
9000 dhdp
->bus
->inband_host_sleep_exit_to_cnt
);
9002 #endif /* PCIE_INB_DW */
9003 bcm_bprintf(strbuf
, "d2h_intr_method -> %s\n",
9004 dhdp
->bus
->d2h_intr_method
? "PCIE_MSI" : "PCIE_INTX");
9006 bcm_bprintf(strbuf
, "\n\nDB7 stats - db7_send_cnt: %d, db7_trap_cnt: %d, "
9007 "max duration: %lld (%lld - %lld), db7_timing_error_cnt: %d\n",
9008 dhdp
->db7_trap
.debug_db7_send_cnt
,
9009 dhdp
->db7_trap
.debug_db7_trap_cnt
,
9010 dhdp
->db7_trap
.debug_max_db7_dur
,
9011 dhdp
->db7_trap
.debug_max_db7_trap_time
,
9012 dhdp
->db7_trap
.debug_max_db7_send_time
,
9013 dhdp
->db7_trap
.debug_db7_timing_error_cnt
);
9016 #ifdef DNGL_AXI_ERROR_LOGGING
9018 dhd_axi_sig_match(dhd_pub_t
*dhdp
)
9020 uint32 axi_tcm_addr
= dhdpcie_bus_rtcm32(dhdp
->bus
, dhdp
->axierror_logbuf_addr
);
9022 if (dhdp
->dhd_induce_error
== DHD_INDUCE_DROP_AXI_SIG
) {
9023 DHD_ERROR(("%s: Induce AXI signature drop\n", __FUNCTION__
));
9027 DHD_ERROR(("%s: axi_tcm_addr: 0x%x, tcm range: 0x%x ~ 0x%x\n",
9028 __FUNCTION__
, axi_tcm_addr
, dhdp
->bus
->dongle_ram_base
,
9029 dhdp
->bus
->dongle_ram_base
+ dhdp
->bus
->ramsize
));
9030 if (axi_tcm_addr
>= dhdp
->bus
->dongle_ram_base
&&
9031 axi_tcm_addr
< dhdp
->bus
->dongle_ram_base
+ dhdp
->bus
->ramsize
) {
9032 uint32 axi_signature
= dhdpcie_bus_rtcm32(dhdp
->bus
, (axi_tcm_addr
+
9033 OFFSETOF(hnd_ext_trap_axi_error_v1_t
, signature
)));
9034 if (axi_signature
== HND_EXT_TRAP_AXIERROR_SIGNATURE
) {
9037 DHD_ERROR(("%s: No AXI signature: 0x%x\n",
9038 __FUNCTION__
, axi_signature
));
9042 DHD_ERROR(("%s: No AXI shared tcm address debug info.\n", __FUNCTION__
));
9048 dhd_axi_error(dhd_pub_t
*dhdp
)
9050 dhd_axi_error_dump_t
*axi_err_dump
;
9051 uint8
*axi_err_buf
= NULL
;
9052 uint8
*p_axi_err
= NULL
;
9053 uint32 axi_logbuf_addr
;
9054 uint32 axi_tcm_addr
;
9057 /* XXX: On the Dongle side, if an invalid Host Address is generated for a transaction
9058 * it results in SMMU Fault. Now the Host won't respond for the invalid transaction.
9059 * On the Dongle side, after 50msec this results in AXI Slave Error.
9060 * Hence introduce a delay higher than 50msec to ensure AXI Slave error happens and
9061 * the Dongle collects the required information.
9065 axi_logbuf_addr
= dhdp
->axierror_logbuf_addr
;
9066 if (!axi_logbuf_addr
) {
9067 DHD_ERROR(("%s: No AXI TCM address debug info.\n", __FUNCTION__
));
9071 axi_err_dump
= dhdp
->axi_err_dump
;
9072 if (!axi_err_dump
) {
9076 if (!dhd_axi_sig_match(dhdp
)) {
9080 /* Reading AXI error data for SMMU fault */
9081 DHD_ERROR(("%s: Read AXI data from TCM address\n", __FUNCTION__
));
9082 axi_tcm_addr
= dhdpcie_bus_rtcm32(dhdp
->bus
, axi_logbuf_addr
);
9083 size
= sizeof(hnd_ext_trap_axi_error_v1_t
);
9084 axi_err_buf
= MALLOCZ(dhdp
->osh
, size
);
9085 if (axi_err_buf
== NULL
) {
9086 DHD_ERROR(("%s: out of memory !\n", __FUNCTION__
));
9090 p_axi_err
= axi_err_buf
;
9091 err
= dhdpcie_bus_membytes(dhdp
->bus
, FALSE
, axi_tcm_addr
, p_axi_err
, size
);
9093 DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
9094 __FUNCTION__
, err
, size
, axi_tcm_addr
));
9098 /* Dump data to Dmesg */
9099 dhd_log_dump_axi_error(axi_err_buf
);
9100 err
= memcpy_s(&axi_err_dump
->etd_axi_error_v1
, size
, axi_err_buf
, size
);
9102 DHD_ERROR(("%s: failed to copy etd axi error info, err=%d\n",
9103 __FUNCTION__
, err
));
9108 MFREE(dhdp
->osh
, axi_err_buf
, size
);
9110 dhd_schedule_axi_error_dump(dhdp
, NULL
);
9114 dhd_log_dump_axi_error(uint8
*axi_err
)
9116 dma_dentry_v1_t dma_dentry
;
9117 dma_fifo_v1_t dma_fifo
;
9120 if (*(uint8
*)axi_err
== HND_EXT_TRAP_AXIERROR_VERSION_1
) {
9121 hnd_ext_trap_axi_error_v1_t
*axi_err_v1
= (hnd_ext_trap_axi_error_v1_t
*)axi_err
;
9122 DHD_ERROR(("%s: signature : 0x%x\n", __FUNCTION__
, axi_err_v1
->signature
));
9123 DHD_ERROR(("%s: version : 0x%x\n", __FUNCTION__
, axi_err_v1
->version
));
9124 DHD_ERROR(("%s: length : 0x%x\n", __FUNCTION__
, axi_err_v1
->length
));
9125 DHD_ERROR(("%s: dma_fifo_valid_count : 0x%x\n",
9126 __FUNCTION__
, axi_err_v1
->dma_fifo_valid_count
));
9127 DHD_ERROR(("%s: axi_errorlog_status : 0x%x\n",
9128 __FUNCTION__
, axi_err_v1
->axi_errorlog_status
));
9129 DHD_ERROR(("%s: axi_errorlog_core : 0x%x\n",
9130 __FUNCTION__
, axi_err_v1
->axi_errorlog_core
));
9131 DHD_ERROR(("%s: axi_errorlog_hi : 0x%x\n",
9132 __FUNCTION__
, axi_err_v1
->axi_errorlog_hi
));
9133 DHD_ERROR(("%s: axi_errorlog_lo : 0x%x\n",
9134 __FUNCTION__
, axi_err_v1
->axi_errorlog_lo
));
9135 DHD_ERROR(("%s: axi_errorlog_id : 0x%x\n",
9136 __FUNCTION__
, axi_err_v1
->axi_errorlog_id
));
9138 for (i
= 0; i
< MAX_DMAFIFO_ENTRIES_V1
; i
++) {
9139 dma_fifo
= axi_err_v1
->dma_fifo
[i
];
9140 DHD_ERROR(("%s: valid:%d : 0x%x\n",
9141 __FUNCTION__
, i
, dma_fifo
.valid
));
9142 DHD_ERROR(("%s: direction:%d : 0x%x\n",
9143 __FUNCTION__
, i
, dma_fifo
.direction
));
9144 DHD_ERROR(("%s: index:%d : 0x%x\n",
9145 __FUNCTION__
, i
, dma_fifo
.index
));
9146 DHD_ERROR(("%s: dpa:%d : 0x%x\n",
9147 __FUNCTION__
, i
, dma_fifo
.dpa
));
9148 DHD_ERROR(("%s: desc_lo:%d : 0x%x\n",
9149 __FUNCTION__
, i
, dma_fifo
.desc_lo
));
9150 DHD_ERROR(("%s: desc_hi:%d : 0x%x\n",
9151 __FUNCTION__
, i
, dma_fifo
.desc_hi
));
9152 DHD_ERROR(("%s: din:%d : 0x%x\n",
9153 __FUNCTION__
, i
, dma_fifo
.din
));
9154 DHD_ERROR(("%s: dout:%d : 0x%x\n",
9155 __FUNCTION__
, i
, dma_fifo
.dout
));
9156 for (j
= 0; j
< MAX_DMAFIFO_DESC_ENTRIES_V1
; j
++) {
9157 dma_dentry
= axi_err_v1
->dma_fifo
[i
].dentry
[j
];
9158 DHD_ERROR(("%s: ctrl1:%d : 0x%x\n",
9159 __FUNCTION__
, i
, dma_dentry
.ctrl1
));
9160 DHD_ERROR(("%s: ctrl2:%d : 0x%x\n",
9161 __FUNCTION__
, i
, dma_dentry
.ctrl2
));
9162 DHD_ERROR(("%s: addrlo:%d : 0x%x\n",
9163 __FUNCTION__
, i
, dma_dentry
.addrlo
));
9164 DHD_ERROR(("%s: addrhi:%d : 0x%x\n",
9165 __FUNCTION__
, i
, dma_dentry
.addrhi
));
9170 DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", __FUNCTION__
, (*(uint8
*)axi_err
)));
9173 #endif /* DNGL_AXI_ERROR_LOGGING */
9176 * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
9177 * flow queue to their flow ring.
9180 dhd_update_txflowrings(dhd_pub_t
*dhd
)
9182 unsigned long flags
;
9184 flow_ring_node_t
*flow_ring_node
;
9185 struct dhd_bus
*bus
= dhd
->bus
;
9188 if (dhd_query_bus_erros(dhd
)) {
9192 /* Hold flowring_list_lock to ensure no race condition while accessing the List */
9193 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
9194 for (item
= dll_head_p(&bus
->flowring_active_list
);
9195 (!dhd_is_device_removed(dhd
) && !dll_end(&bus
->flowring_active_list
, item
));
9196 item
= next
, count
++) {
9197 if (dhd
->hang_was_sent
) {
9201 if (count
> bus
->max_tx_flowrings
) {
9202 DHD_ERROR(("%s : overflow max flowrings\n", __FUNCTION__
));
9203 dhd
->hang_reason
= HANG_REASON_UNKNOWN
;
9204 dhd_os_send_hang_message(dhd
);
9208 next
= dll_next_p(item
);
9209 flow_ring_node
= dhd_constlist_to_flowring(item
);
9211 /* Ensure that flow_ring_node in the list is Not Null */
9212 ASSERT(flow_ring_node
!= NULL
);
9214 /* Ensure that the flowring node has valid contents */
9215 ASSERT(flow_ring_node
->prot_info
!= NULL
);
9217 dhd_prot_update_txflowring(dhd
, flow_ring_node
->flowid
, flow_ring_node
->prot_info
);
9219 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
9222 /** Mailbox ringbell Function */
9224 dhd_bus_gen_devmb_intr(struct dhd_bus
*bus
)
9226 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
9227 (bus
->sih
->buscorerev
== 4)) {
9228 DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__
));
9231 if (bus
->db1_for_mb
) {
9232 /* this is a pcie core register, not the config register */
9233 /* XXX: make sure we are on PCIE */
9234 DHD_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__
));
9235 if (DAR_PWRREQ(bus
)) {
9236 dhd_bus_pcie_pwr_req(bus
);
9238 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dhd_bus_db1_addr_get(bus
),
9241 DHD_INFO(("%s: writing a mail box interrupt to the device, through config space\n", __FUNCTION__
));
9242 dhdpcie_bus_cfg_write_dword(bus
, PCISBMbx
, 4, (1 << 0));
9243 /* XXX CRWLPCIEGEN2-182 requires double write */
9244 dhdpcie_bus_cfg_write_dword(bus
, PCISBMbx
, 4, (1 << 0));
9248 /* Upon receiving a mailbox interrupt,
9249 * if H2D_FW_TRAP bit is set in mailbox location
9253 dhdpcie_fw_trap(dhd_bus_t
*bus
)
9255 DHD_ERROR(("%s: send trap!!!\n", __FUNCTION__
));
9256 if (bus
->dhd
->db7_trap
.fw_db7w_trap
) {
9257 uint32 addr
= dhd_bus_db1_addr_3_get(bus
);
9258 bus
->dhd
->db7_trap
.debug_db7_send_time
= OSL_LOCALTIME_NS();
9259 bus
->dhd
->db7_trap
.debug_db7_send_cnt
++;
9260 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, addr
, ~0,
9261 bus
->dhd
->db7_trap
.db7_magic_number
);
9265 /* Send the mailbox data and generate mailbox intr. */
9266 dhdpcie_send_mb_data(bus
, H2D_FW_TRAP
);
9267 /* For FWs that cannot interprete H2D_FW_TRAP */
9268 (void)dhd_wl_ioctl_set_intiovar(bus
->dhd
, "bus:disconnect", 99, WLC_SET_VAR
, TRUE
, 0);
9274 dhd_bus_inb_ack_pending_ds_req(dhd_bus_t
*bus
)
9276 /* The DHD_BUS_INB_DW_LOCK must be held before
9277 * calling this function !!
9279 if ((dhdpcie_bus_get_pcie_inband_dw_state(bus
) ==
9280 DW_DEVICE_DS_DEV_SLEEP_PEND
) &&
9281 (bus
->host_active_cnt
== 0)) {
9282 dhdpcie_bus_set_pcie_inband_dw_state(bus
, DW_DEVICE_DS_DEV_SLEEP
);
9283 dhdpcie_send_mb_data(bus
, H2D_HOST_DS_ACK
);
9288 dhd_bus_inb_set_device_wake(struct dhd_bus
*bus
, bool val
)
9291 unsigned long flags
;
9294 if (!INBAND_DW_ENAB(bus
)) {
9298 DHD_BUS_INB_DW_LOCK(bus
->inb_lock
, flags
);
9301 * Reset the Door Bell Timeout value. So that the Watchdog
9302 * doesn't try to Deassert Device Wake, while we are in
9303 * the process of still Asserting the same.
9305 dhd_bus_doorbell_timeout_reset(bus
);
9307 if (dhdpcie_bus_get_pcie_inband_dw_state(bus
) ==
9308 DW_DEVICE_DS_DEV_SLEEP
) {
9309 /* Clear wait_for_ds_exit */
9310 bus
->wait_for_ds_exit
= 0;
9311 if (bus
->calc_ds_exit_latency
) {
9312 bus
->ds_exit_latency
= 0;
9313 bus
->ds_exit_ts2
= 0;
9314 bus
->ds_exit_ts1
= OSL_SYSUPTIME_US();
9316 ret
= dhdpcie_send_mb_data(bus
, H2DMB_DS_DEVICE_WAKE_ASSERT
);
9317 if (ret
!= BCME_OK
) {
9318 DHD_ERROR(("Failed: assert Inband device_wake\n"));
9319 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
9323 dhdpcie_bus_set_pcie_inband_dw_state(bus
,
9324 DW_DEVICE_DS_DISABLED_WAIT
);
9325 bus
->inband_dw_assert_cnt
++;
9326 } else if (dhdpcie_bus_get_pcie_inband_dw_state(bus
) ==
9327 DW_DEVICE_DS_DISABLED_WAIT
) {
9328 DHD_ERROR(("Inband device wake is already asserted, "
9329 "waiting for DS-Exit\n"));
9332 DHD_INFO(("Not in DS SLEEP state \n"));
9333 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
9339 * Since we are going to wait/sleep .. release the lock.
9340 * The Device Wake sanity is still valid, because
9341 * a) If there is another context that comes in and tries
9342 * to assert DS again and if it gets the lock, since
9343 * ds_state would be now != DW_DEVICE_DS_DEV_SLEEP the
9344 * context would return saying Not in DS Sleep.
9345 * b) If ther is another context that comes in and tries
9346 * to de-assert DS and gets the lock,
9347 * since the ds_state is != DW_DEVICE_DS_DEV_WAKE
9348 * that context would return too. This can not happen
9349 * since the watchdog is the only context that can
9350 * De-Assert Device Wake and as the first step of
9351 * Asserting the Device Wake, we have pushed out the
9352 * Door Bell Timeout.
9357 dhdpcie_bus_set_pcie_inband_dw_state(bus
,
9358 DW_DEVICE_DS_DEV_WAKE
);
9359 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
9360 /* Called from context that cannot sleep */
9363 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
9364 /* Wait for DS EXIT for DS_EXIT_TIMEOUT seconds */
9365 timeleft
= dhd_os_ds_exit_wait(bus
->dhd
, &bus
->wait_for_ds_exit
);
9366 if (!bus
->wait_for_ds_exit
|| timeleft
== 0) {
9367 DHD_ERROR(("dhd_bus_inb_set_device_wake:DS-EXIT timeout, "
9368 "wait_for_ds_exit : %d\n", bus
->wait_for_ds_exit
));
9369 bus
->inband_ds_exit_to_cnt
++;
9370 bus
->ds_exit_timeout
= 0;
9371 #ifdef DHD_FW_COREDUMP
9372 if (bus
->dhd
->memdump_enabled
) {
9373 /* collect core dump */
9374 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
9375 DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(bus
->dhd
);
9376 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
9377 bus
->dhd
->memdump_type
=
9378 DUMP_TYPE_INBAND_DEVICE_WAKE_FAILURE
;
9379 dhd_bus_mem_dump(bus
->dhd
);
9383 #endif /* DHD_FW_COREDUMP */
9390 DHD_BUS_INB_DW_LOCK(bus
->inb_lock
, flags
);
9391 if ((dhdpcie_bus_get_pcie_inband_dw_state(bus
) ==
9392 DW_DEVICE_DS_DEV_WAKE
)) {
9393 ret
= dhdpcie_send_mb_data(bus
, H2DMB_DS_DEVICE_WAKE_DEASSERT
);
9394 if (ret
!= BCME_OK
) {
9395 DHD_ERROR(("Failed: deassert Inband device_wake\n"));
9396 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
9399 dhdpcie_bus_set_pcie_inband_dw_state(bus
,
9400 DW_DEVICE_DS_ACTIVE
);
9401 bus
->inband_dw_deassert_cnt
++;
9402 } else if ((dhdpcie_bus_get_pcie_inband_dw_state(bus
) ==
9403 DW_DEVICE_DS_DEV_SLEEP_PEND
) &&
9404 (bus
->host_active_cnt
== 0)) {
9405 dhdpcie_bus_set_pcie_inband_dw_state(bus
, DW_DEVICE_DS_DEV_SLEEP
);
9406 dhdpcie_send_mb_data(bus
, H2D_HOST_DS_ACK
);
9410 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
9416 #endif /* PCIE_INB_DW */
9417 #if defined(PCIE_INB_DW)
9419 dhd_bus_doorbell_timeout_reset(struct dhd_bus
*bus
)
9421 if (dhd_doorbell_timeout
) {
9422 #ifdef DHD_PCIE_RUNTIMEPM
9423 if (dhd_runtimepm_ms
) {
9424 dhd_timeout_start(&bus
->doorbell_timer
,
9425 (dhd_doorbell_timeout
* 1000) / dhd_runtimepm_ms
);
9428 uint wd_scale
= dhd_watchdog_ms
;
9429 if (dhd_watchdog_ms
) {
9430 dhd_timeout_start(&bus
->doorbell_timer
,
9431 (dhd_doorbell_timeout
* 1000) / wd_scale
);
9433 #endif /* DHD_PCIE_RUNTIMEPM */
9435 else if (!(bus
->dhd
->busstate
== DHD_BUS_SUSPEND
)) {
9436 dhd_bus_set_device_wake(bus
, FALSE
);
9441 dhd_bus_set_device_wake(struct dhd_bus
*bus
, bool val
)
9443 if (bus
->ds_enabled
&& bus
->dhd
->ring_attached
) {
9445 if (INBAND_DW_ENAB(bus
)) {
9446 return dhd_bus_inb_set_device_wake(bus
, val
);
9448 #endif /* PCIE_INB_DW */
9454 dhd_bus_dw_deassert(dhd_pub_t
*dhd
)
9456 dhd_bus_t
*bus
= dhd
->bus
;
9457 unsigned long flags
;
9459 /* If haven't communicated with device for a while, deassert the Device_Wake GPIO */
9460 if (dhd_doorbell_timeout
!= 0 && bus
->dhd
->busstate
== DHD_BUS_DATA
&&
9461 dhd_timeout_expired(&bus
->doorbell_timer
)) {
9462 DHD_GENERAL_LOCK(dhd
, flags
);
9463 if (DHD_BUS_BUSY_CHECK_IDLE(dhd
) &&
9464 !DHD_CHECK_CFG_IN_PROGRESS(dhd
)) {
9465 DHD_BUS_BUSY_SET_IN_DS_DEASSERT(dhd
);
9466 DHD_GENERAL_UNLOCK(dhd
, flags
);
9467 dhd_bus_set_device_wake(bus
, FALSE
);
9468 DHD_GENERAL_LOCK(dhd
, flags
);
9469 DHD_BUS_BUSY_CLEAR_IN_DS_DEASSERT(dhd
);
9470 dhd_os_busbusy_wake(bus
->dhd
);
9471 DHD_GENERAL_UNLOCK(dhd
, flags
);
9473 DHD_GENERAL_UNLOCK(dhd
, flags
);
9478 if (INBAND_DW_ENAB(bus
)) {
9479 if (bus
->ds_exit_timeout
) {
9480 bus
->ds_exit_timeout
--;
9481 if (bus
->ds_exit_timeout
== 1) {
9482 DHD_ERROR(("DS-EXIT TIMEOUT\n"));
9483 bus
->ds_exit_timeout
= 0;
9484 bus
->inband_ds_exit_to_cnt
++;
9487 if (bus
->host_sleep_exit_timeout
) {
9488 bus
->host_sleep_exit_timeout
--;
9489 if (bus
->host_sleep_exit_timeout
== 1) {
9490 DHD_ERROR(("HOST_SLEEP-EXIT TIMEOUT\n"));
9491 bus
->host_sleep_exit_timeout
= 0;
9492 bus
->inband_host_sleep_exit_to_cnt
++;
9496 #endif /* PCIE_INB_DW */
9500 /** mailbox doorbell ring function */
9502 dhd_bus_ringbell(struct dhd_bus
*bus
, uint32 value
)
9504 /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
9505 if (__DHD_CHK_BUS_IN_LPS(bus
)) {
9506 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
9507 __FUNCTION__
, bus
->bus_low_power_state
));
9511 /* Skip in the case of link down */
9512 if (bus
->is_linkdown
) {
9513 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
9517 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
9518 (bus
->sih
->buscorerev
== 4)) {
9519 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, bus
->pcie_mailbox_int
,
9520 PCIE_INTB
, PCIE_INTB
);
9522 /* this is a pcie core register, not the config regsiter */
9523 /* XXX: makesure we are on PCIE */
9524 DHD_INFO(("%s: writing a door bell to the device\n", __FUNCTION__
));
9525 if (IDMA_ACTIVE(bus
->dhd
)) {
9526 if (DAR_PWRREQ(bus
)) {
9527 dhd_bus_pcie_pwr_req(bus
);
9529 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dhd_bus_db0_addr_2_get(bus
),
9532 if (DAR_PWRREQ(bus
)) {
9533 dhd_bus_pcie_pwr_req(bus
);
9535 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
9536 dhd_bus_db0_addr_get(bus
), ~0, 0x12345678);
9541 /** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */
9543 dhd_bus_ringbell_2(struct dhd_bus
*bus
, uint32 value
, bool devwake
)
9545 /* this is a pcie core register, not the config regsiter */
9546 /* XXX: makesure we are on PCIE */
9547 /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
9548 if (__DHD_CHK_BUS_IN_LPS(bus
)) {
9549 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
9550 __FUNCTION__
, bus
->bus_low_power_state
));
9554 /* Skip in the case of link down */
9555 if (bus
->is_linkdown
) {
9556 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
9560 DHD_INFO(("writing a door bell 2 to the device\n"));
9561 if (DAR_PWRREQ(bus
)) {
9562 dhd_bus_pcie_pwr_req(bus
);
9564 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
, dhd_bus_db0_addr_2_get(bus
),
9569 dhdpcie_bus_ringbell_fast(struct dhd_bus
*bus
, uint32 value
)
9571 /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
9572 if (__DHD_CHK_BUS_IN_LPS(bus
)) {
9573 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
9574 __FUNCTION__
, bus
->bus_low_power_state
));
9578 /* Skip in the case of link down */
9579 if (bus
->is_linkdown
) {
9580 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
9584 #if defined(PCIE_INB_DW)
9585 if (OOB_DW_ENAB(bus
)) {
9586 dhd_bus_set_device_wake(bus
, TRUE
);
9588 dhd_bus_doorbell_timeout_reset(bus
);
9590 if (DAR_PWRREQ(bus
)) {
9591 dhd_bus_pcie_pwr_req(bus
);
9595 if (bus
->dhd
->db0ts_capable
) {
9601 value
= htol32(ts
& 0xFFFFFFFF);
9602 DHD_INFO(("%s: usec timer = 0x%x\n", __FUNCTION__
, value
));
9604 #endif /* DHD_DB0TS */
9605 W_REG(bus
->pcie_mb_intr_osh
, bus
->pcie_mb_intr_addr
, value
);
9609 dhdpcie_bus_ringbell_2_fast(struct dhd_bus
*bus
, uint32 value
, bool devwake
)
9611 /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
9612 if (__DHD_CHK_BUS_IN_LPS(bus
)) {
9613 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
9614 __FUNCTION__
, bus
->bus_low_power_state
));
9618 /* Skip in the case of link down */
9619 if (bus
->is_linkdown
) {
9620 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
9624 #if defined(PCIE_INB_DW)
9626 if (OOB_DW_ENAB(bus
)) {
9627 dhd_bus_set_device_wake(bus
, TRUE
);
9630 dhd_bus_doorbell_timeout_reset(bus
);
9633 if (DAR_PWRREQ(bus
)) {
9634 dhd_bus_pcie_pwr_req(bus
);
9636 W_REG(bus
->pcie_mb_intr_osh
, bus
->pcie_mb_intr_2_addr
, value
);
9640 dhd_bus_ringbell_oldpcie(struct dhd_bus
*bus
, uint32 value
)
9643 /* Skip once bus enters low power state (D3_INFORM/D3_ACK) */
9644 if (__DHD_CHK_BUS_IN_LPS(bus
)) {
9645 DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
9646 __FUNCTION__
, bus
->bus_low_power_state
));
9650 /* Skip in the case of link down */
9651 if (bus
->is_linkdown
) {
9652 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
9656 w
= (R_REG(bus
->pcie_mb_intr_osh
, bus
->pcie_mb_intr_addr
) & ~PCIE_INTB
) | PCIE_INTB
;
9657 W_REG(bus
->pcie_mb_intr_osh
, bus
->pcie_mb_intr_addr
, w
);
9661 dhd_bus_get_mbintr_fn(struct dhd_bus
*bus
)
9663 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
9664 (bus
->sih
->buscorerev
== 4)) {
9665 bus
->pcie_mb_intr_addr
= si_corereg_addr(bus
->sih
, bus
->sih
->buscoreidx
,
9666 bus
->pcie_mailbox_int
);
9667 if (bus
->pcie_mb_intr_addr
) {
9668 bus
->pcie_mb_intr_osh
= si_osh(bus
->sih
);
9669 return dhd_bus_ringbell_oldpcie
;
9672 bus
->pcie_mb_intr_addr
= si_corereg_addr(bus
->sih
, bus
->sih
->buscoreidx
,
9673 dhd_bus_db0_addr_get(bus
));
9674 if (bus
->pcie_mb_intr_addr
) {
9675 bus
->pcie_mb_intr_osh
= si_osh(bus
->sih
);
9676 return dhdpcie_bus_ringbell_fast
;
9679 return dhd_bus_ringbell
;
9683 dhd_bus_get_mbintr_2_fn(struct dhd_bus
*bus
)
9685 bus
->pcie_mb_intr_2_addr
= si_corereg_addr(bus
->sih
, bus
->sih
->buscoreidx
,
9686 dhd_bus_db0_addr_2_get(bus
));
9687 if (bus
->pcie_mb_intr_2_addr
) {
9688 bus
->pcie_mb_intr_osh
= si_osh(bus
->sih
);
9689 return dhdpcie_bus_ringbell_2_fast
;
9691 return dhd_bus_ringbell_2
;
9695 BCMFASTPATH(dhd_bus_dpc
)(struct dhd_bus
*bus
)
9697 bool resched
= FALSE
; /* Flag indicating resched wanted */
9698 unsigned long flags
;
9700 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
9702 bus
->dpc_entry_time
= OSL_LOCALTIME_NS();
9704 /* must be the fisrt activity in this function */
9705 if (dhd_query_bus_erros(bus
->dhd
)) {
9706 dhdpcie_disable_irq_nosync(bus
);
9710 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
9711 /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
9712 * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
9713 * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
9714 * and if we return from here, then IOCTL response will never be handled
9716 if (bus
->dhd
->busstate
== DHD_BUS_DOWN
) {
9717 DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__
));
9719 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
9720 bus
->dpc_return_busdown_count
++;
9723 #ifdef DHD_PCIE_RUNTIMEPM
9725 #endif /* DHD_PCIE_RUNTIMEPM */
9726 DHD_BUS_BUSY_SET_IN_DPC(bus
->dhd
);
9727 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
9729 #ifdef DHD_READ_INTSTATUS_IN_DPC
9732 bus
->intstatus
= dhdpcie_bus_intstatus(bus
);
9733 /* Check if the interrupt is ours or not */
9734 if (bus
->intstatus
== 0) {
9739 #endif /* DHD_READ_INTSTATUS_IN_DPC */
9741 resched
= dhdpcie_bus_process_mailbox_intr(bus
, bus
->intstatus
);
9744 #ifdef DHD_READ_INTSTATUS_IN_DPC
9746 #endif /* DHD_READ_INTSTATUS_IN_DPC */
9747 bus
->dpc_intr_enable_count
++;
9748 /* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts
9749 * which has been disabled in the dhdpcie_bus_isr()
9751 if (dhdpcie_irq_disabled(bus
)) {
9752 dhdpcie_enable_irq(bus
); /* Enable back interrupt!! */
9753 bus
->dpc_intr_enable_count
++;
9755 bus
->dpc_exit_time
= OSL_LOCALTIME_NS();
9757 bus
->resched_dpc_time
= OSL_LOCALTIME_NS();
9760 bus
->dpc_sched
= resched
;
9762 DHD_GENERAL_LOCK(bus
->dhd
, flags
);
9763 DHD_BUS_BUSY_CLEAR_IN_DPC(bus
->dhd
);
9764 dhd_os_busbusy_wake(bus
->dhd
);
9765 DHD_GENERAL_UNLOCK(bus
->dhd
, flags
);
9772 dhdpcie_send_mb_data(dhd_bus_t
*bus
, uint32 h2d_mb_data
)
9774 uint32 cur_h2d_mb_data
= 0;
9776 if (bus
->is_linkdown
) {
9777 DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__
));
9781 DHD_INFO(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__
, h2d_mb_data
));
9784 dhd_bus_ds_trace(bus
, h2d_mb_data
, FALSE
, dhdpcie_bus_get_pcie_inband_dw_state(bus
));
9786 dhd_bus_ds_trace(bus
, h2d_mb_data
, FALSE
);
9787 #endif /* PCIE_INB_DW */
9788 if (bus
->api
.fw_rev
>= PCIE_SHARED_VERSION_6
&& !bus
->use_mailbox
) {
9789 DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n",
9791 /* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */
9792 /* XXX: check the error return value here... */
9793 if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus
->dhd
, h2d_mb_data
)) {
9794 DHD_ERROR(("failure sending the H2D Mailbox message "
9801 dhd_bus_cmn_readshared(bus
, &cur_h2d_mb_data
, H2D_MB_DATA
, 0);
9803 if (cur_h2d_mb_data
!= 0) {
9805 DHD_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n", __FUNCTION__
, cur_h2d_mb_data
));
9806 /* XXX: start a zero length timer to keep checking this to be zero */
9807 while ((i
++ < 100) && cur_h2d_mb_data
) {
9809 dhd_bus_cmn_readshared(bus
, &cur_h2d_mb_data
, H2D_MB_DATA
, 0);
9812 DHD_ERROR(("%s : waited 1ms for the dngl "
9813 "to ack the previous mb transaction\n", __FUNCTION__
));
9814 DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
9815 __FUNCTION__
, cur_h2d_mb_data
));
9819 dhd_bus_cmn_writeshared(bus
, &h2d_mb_data
, sizeof(uint32
), H2D_MB_DATA
, 0);
9820 dhd_bus_gen_devmb_intr(bus
);
9823 if (h2d_mb_data
== H2D_HOST_D3_INFORM
) {
9824 DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__
));
9825 bus
->last_d3_inform_time
= OSL_LOCALTIME_NS();
9826 bus
->d3_inform_cnt
++;
9828 if (h2d_mb_data
== H2D_HOST_D0_INFORM_IN_USE
) {
9829 DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__
));
9830 bus
->d0_inform_in_use_cnt
++;
9832 if (h2d_mb_data
== H2D_HOST_D0_INFORM
) {
9833 DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__
));
9834 bus
->d0_inform_cnt
++;
9842 dhd_bus_handle_d3_ack(dhd_bus_t
*bus
)
9844 bus
->suspend_intr_disable_count
++;
9845 /* Disable dongle Interrupts Immediately after D3 */
9847 /* For Linux, Macos etc (otherthan NDIS) along with disabling
9848 * dongle interrupt by clearing the IntMask, disable directly
9849 * interrupt from the host side as well. Also clear the intstatus
9850 * if it is set to avoid unnecessary intrrupts after D3 ACK.
9852 dhdpcie_bus_intr_disable(bus
); /* Disable interrupt using IntMask!! */
9853 dhdpcie_bus_clear_intstatus(bus
);
9854 dhdpcie_disable_irq_nosync(bus
); /* Disable host interrupt!! */
9856 DHD_SET_BUS_LPS_D3_ACKED(bus
);
9857 DHD_ERROR(("%s: D3_ACK Recieved\n", __FUNCTION__
));
9859 if (bus
->dhd
->dhd_induce_error
== DHD_INDUCE_D3_ACK_TIMEOUT
) {
9860 /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
9861 DHD_ERROR(("%s: Due to d3ack induce error forcefully set "
9862 "bus_low_power_state to DHD_BUS_D3_INFORM_SENT\n", __FUNCTION__
));
9863 DHD_SET_BUS_LPS_D3_INFORMED(bus
);
9865 /* Check for D3 ACK induce flag, which is set by firing dhd iovar to induce D3 Ack timeout.
9866 * If flag is set, D3 wake is skipped, which results in to D3 Ack timeout.
9868 if (bus
->dhd
->dhd_induce_error
!= DHD_INDUCE_D3_ACK_TIMEOUT
) {
9869 bus
->wait_for_d3_ack
= 1;
9870 dhd_os_d3ack_wake(bus
->dhd
);
9872 DHD_ERROR(("%s: Inducing D3 ACK timeout\n", __FUNCTION__
));
9876 dhd_bus_handle_mb_data(dhd_bus_t
*bus
, uint32 d2h_mb_data
)
9879 unsigned long flags
= 0;
9880 #endif /* PCIE_INB_DW */
9881 if (MULTIBP_ENAB(bus
->sih
)) {
9882 dhd_bus_pcie_pwr_req(bus
);
9885 DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data
));
9887 DHD_BUS_INB_DW_LOCK(bus
->inb_lock
, flags
);
9888 dhd_bus_ds_trace(bus
, d2h_mb_data
, TRUE
, dhdpcie_bus_get_pcie_inband_dw_state(bus
));
9889 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
9891 dhd_bus_ds_trace(bus
, d2h_mb_data
, TRUE
);
9892 #endif /* PCIE_INB_DW */
9894 if (d2h_mb_data
& D2H_DEV_FWHALT
) {
9895 if (bus
->dhd
->db7_trap
.fw_db7w_trap_inprogress
== FALSE
) {
9896 DHD_ERROR(("FW trap has happened, dongle_trap_data 0x%8x\n",
9897 bus
->dhd
->dongle_trap_data
));
9900 if (bus
->dhd
->dongle_trap_data
& D2H_DEV_TRAP_HOSTDB
) {
9903 bus
->dhd
->db7_trap
.debug_db7_trap_time
= OSL_LOCALTIME_NS();
9904 bus
->dhd
->db7_trap
.debug_db7_trap_cnt
++;
9905 db7_dur
= bus
->dhd
->db7_trap
.debug_db7_trap_time
-
9906 bus
->dhd
->db7_trap
.debug_db7_send_time
;
9907 if (db7_dur
> bus
->dhd
->db7_trap
.debug_max_db7_dur
) {
9908 bus
->dhd
->db7_trap
.debug_max_db7_send_time
=
9909 bus
->dhd
->db7_trap
.debug_db7_send_time
;
9910 bus
->dhd
->db7_trap
.debug_max_db7_trap_time
=
9911 bus
->dhd
->db7_trap
.debug_db7_trap_time
;
9913 bus
->dhd
->db7_trap
.debug_max_db7_dur
=
9914 MAX(bus
->dhd
->db7_trap
.debug_max_db7_dur
, db7_dur
);
9915 if (bus
->dhd
->db7_trap
.fw_db7w_trap_inprogress
== FALSE
) {
9916 bus
->dhd
->db7_trap
.debug_db7_timing_error_cnt
++;
9919 dhdpcie_checkdied(bus
, NULL
, 0);
9920 #ifdef SUPPORT_LINKDOWN_RECOVERY
9921 #ifdef CONFIG_ARCH_MSM
9922 bus
->no_cfg_restore
= 1;
9923 #endif /* CONFIG_ARCH_MSM */
9924 #endif /* SUPPORT_LINKDOWN_RECOVERY */
9925 dhd_os_check_hang(bus
->dhd
, 0, -EREMOTEIO
);
9927 if (bus
->dhd
->db7_trap
.fw_db7w_trap_inprogress
) {
9928 bus
->dhd
->db7_trap
.fw_db7w_trap_inprogress
= FALSE
;
9929 bus
->dhd
->dongle_trap_occured
= TRUE
;
9933 if (d2h_mb_data
& D2H_DEV_DS_ENTER_REQ
) {
9934 bool ds_acked
= FALSE
;
9935 BCM_REFERENCE(ds_acked
);
9936 if (__DHD_CHK_BUS_LPS_D3_ACKED(bus
)) {
9937 DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
9938 DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__
));
9939 bus
->dhd
->busstate
= DHD_BUS_DOWN
;
9942 /* what should we do */
9943 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
9945 if (INBAND_DW_ENAB(bus
)) {
9946 /* As per inband state machine, host should not send DS-ACK
9947 * during suspend or suspend in progress, instead D3 inform will be sent.
9949 if (!bus
->skip_ds_ack
) {
9950 DHD_BUS_INB_DW_LOCK(bus
->inb_lock
, flags
);
9951 if (dhdpcie_bus_get_pcie_inband_dw_state(bus
)
9952 == DW_DEVICE_DS_ACTIVE
) {
9953 dhdpcie_bus_set_pcie_inband_dw_state(bus
,
9954 DW_DEVICE_DS_DEV_SLEEP_PEND
);
9955 if (bus
->host_active_cnt
== 0) {
9956 dhdpcie_bus_set_pcie_inband_dw_state(bus
,
9957 DW_DEVICE_DS_DEV_SLEEP
);
9958 dhdpcie_send_mb_data(bus
, H2D_HOST_DS_ACK
);
9960 DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP"
9963 DHD_ERROR(("%s: Failed to send DS-ACK, "
9964 "host_active_cnt is %d\n",
9965 __FUNCTION__
, bus
->host_active_cnt
));
9968 /* Currently DW_DEVICE_HOST_SLEEP_WAIT is set only
9969 * under dhd_bus_suspend() function.
9971 else if (dhdpcie_bus_get_pcie_inband_dw_state(bus
)
9972 == DW_DEVICE_HOST_SLEEP_WAIT
) {
9973 DHD_ERROR(("%s: DS-ACK not sent due to suspend "
9974 "in progress\n", __FUNCTION__
));
9976 DHD_ERROR(("%s: Failed to send DS-ACK, DS state is %d",
9978 dhdpcie_bus_get_pcie_inband_dw_state(bus
)));
9980 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
9981 dhd_os_ds_enter_wake(bus
->dhd
);
9983 DHD_INFO(("%s: Skip DS-ACK due to "
9984 "suspend in progress\n", __FUNCTION__
));
9987 #endif /* PCIE_INB_DW */
9989 dhdpcie_send_mb_data(bus
, H2D_HOST_DS_ACK
);
9990 DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
9993 if (d2h_mb_data
& D2H_DEV_DS_EXIT_NOTE
) {
9995 if (INBAND_DW_ENAB(bus
)) {
9996 if (bus
->calc_ds_exit_latency
) {
9997 bus
->ds_exit_ts2
= OSL_SYSUPTIME_US();
9998 if (bus
->ds_exit_ts2
> bus
->ds_exit_ts1
&&
9999 bus
->ds_exit_ts1
!= 0)
10000 bus
->ds_exit_latency
= bus
->ds_exit_ts2
- bus
->ds_exit_ts1
;
10002 bus
->ds_exit_latency
= 0;
10005 #endif /* PCIE_INB_DW */
10006 /* what should we do */
10007 DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
10009 if (INBAND_DW_ENAB(bus
)) {
10010 DHD_BUS_INB_DW_LOCK(bus
->inb_lock
, flags
);
10011 if (dhdpcie_bus_get_pcie_inband_dw_state(bus
) ==
10012 DW_DEVICE_DS_DISABLED_WAIT
) {
10013 /* wake up only if some one is waiting in
10014 * DW_DEVICE_DS_DISABLED_WAIT state
10015 * in this case the waiter will change the state
10016 * to DW_DEVICE_DS_DEV_WAKE
10018 bus
->inband_ds_exit_host_cnt
++;
10019 /* To synchronize with the previous memory operations call wmb() */
10021 bus
->wait_for_ds_exit
= 1;
10022 /* Call another wmb() to make sure before waking up the
10023 * other event value gets updated.
10026 dhdpcie_bus_set_pcie_inband_dw_state(bus
,
10027 DW_DEVICE_DS_DEV_WAKE
);
10028 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
10029 dhd_os_ds_exit_wake(bus
->dhd
);
10030 } else if (dhdpcie_bus_get_pcie_inband_dw_state(bus
) ==
10031 DW_DEVICE_DS_DEV_SLEEP
) {
10032 DHD_INFO(("recvd unsolicited DS-EXIT from dongle in DEV_SLEEP\n"));
10034 * unsolicited state change to DW_DEVICE_DS_DEV_WAKE if
10035 * D2H_DEV_DS_EXIT_NOTE received in DW_DEVICE_DS_DEV_SLEEP state.
10036 * This is need when dongle is woken by external events like
10039 bus
->inband_ds_exit_device_cnt
++;
10040 dhdpcie_bus_set_pcie_inband_dw_state(bus
,
10041 DW_DEVICE_DS_DEV_WAKE
);
10042 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
10044 DHD_INFO(("D2H_MB_DATA: not in DS_DISABLED_WAIT/DS_DEV_SLEEP\n"));
10045 bus
->inband_ds_exit_host_cnt
++;
10046 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
10048 dhd_bus_set_device_wake(bus
, FALSE
);
10050 #endif /* PCIE_INB_DW */
10052 if (d2h_mb_data
& D2HMB_DS_HOST_SLEEP_EXIT_ACK
) {
10053 /* what should we do */
10054 DHD_INFO(("D2H_MB_DATA: D0 ACK\n"));
10056 if (INBAND_DW_ENAB(bus
)) {
10057 DHD_BUS_INB_DW_LOCK(bus
->inb_lock
, flags
);
10058 if (dhdpcie_bus_get_pcie_inband_dw_state(bus
) ==
10059 DW_DEVICE_HOST_WAKE_WAIT
) {
10060 dhdpcie_bus_set_pcie_inband_dw_state(bus
, DW_DEVICE_DS_ACTIVE
);
10062 DHD_BUS_INB_DW_UNLOCK(bus
->inb_lock
, flags
);
10064 #endif /* PCIE_INB_DW */
10066 if (d2h_mb_data
& D2H_DEV_D3_ACK
) {
10067 /* what should we do */
10068 DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
10069 if (!bus
->wait_for_d3_ack
) {
10070 #if defined(DHD_HANG_SEND_UP_TEST)
10071 if (bus
->dhd
->req_hang_type
== HANG_REASON_D3_ACK_TIMEOUT
) {
10072 DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
10074 dhd_bus_handle_d3_ack(bus
);
10076 #else /* DHD_HANG_SEND_UP_TEST */
10077 dhd_bus_handle_d3_ack(bus
);
10078 #endif /* DHD_HANG_SEND_UP_TEST */
10083 if (MULTIBP_ENAB(bus
->sih
)) {
10084 dhd_bus_pcie_pwr_req_clear(bus
);
10089 dhdpcie_handle_mb_data(dhd_bus_t
*bus
)
10091 uint32 d2h_mb_data
= 0;
10094 if (MULTIBP_ENAB(bus
->sih
)) {
10095 dhd_bus_pcie_pwr_req(bus
);
10098 if (bus
->is_linkdown
) {
10099 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__
));
10103 dhd_bus_cmn_readshared(bus
, &d2h_mb_data
, D2H_MB_DATA
, 0);
10104 if (D2H_DEV_MB_INVALIDATED(d2h_mb_data
)) {
10105 DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
10106 __FUNCTION__
, d2h_mb_data
));
10110 dhd_bus_cmn_writeshared(bus
, &zero
, sizeof(uint32
), D2H_MB_DATA
, 0);
10112 DHD_INFO_HW4(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__
, d2h_mb_data
));
10113 if (d2h_mb_data
& D2H_DEV_FWHALT
) {
10114 DHD_ERROR(("FW trap has happened\n"));
10115 dhdpcie_checkdied(bus
, NULL
, 0);
10116 /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
10119 if (d2h_mb_data
& D2H_DEV_DS_ENTER_REQ
) {
10120 /* what should we do */
10121 DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__
));
10122 dhdpcie_send_mb_data(bus
, H2D_HOST_DS_ACK
);
10123 DHD_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__
));
10125 if (d2h_mb_data
& D2H_DEV_DS_EXIT_NOTE
) {
10126 /* what should we do */
10127 DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__
));
10129 if (d2h_mb_data
& D2H_DEV_D3_ACK
) {
10130 /* what should we do */
10131 DHD_INFO_HW4(("%s: D2H_MB_DATA: D3 ACK\n", __FUNCTION__
));
10132 if (!bus
->wait_for_d3_ack
) {
10133 #if defined(DHD_HANG_SEND_UP_TEST)
10134 if (bus
->dhd
->req_hang_type
== HANG_REASON_D3_ACK_TIMEOUT
) {
10135 DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
10137 dhd_bus_handle_d3_ack(bus
);
10139 #else /* DHD_HANG_SEND_UP_TEST */
10140 dhd_bus_handle_d3_ack(bus
);
10141 #endif /* DHD_HANG_SEND_UP_TEST */
10146 if (MULTIBP_ENAB(bus
->sih
)) {
10147 dhd_bus_pcie_pwr_req_clear(bus
);
10152 dhdpcie_read_handle_mb_data(dhd_bus_t
*bus
)
10154 uint32 d2h_mb_data
= 0;
10157 if (MULTIBP_ENAB(bus
->sih
)) {
10158 dhd_bus_pcie_pwr_req(bus
);
10161 dhd_bus_cmn_readshared(bus
, &d2h_mb_data
, D2H_MB_DATA
, 0);
10162 if (!d2h_mb_data
) {
10166 dhd_bus_cmn_writeshared(bus
, &zero
, sizeof(uint32
), D2H_MB_DATA
, 0);
10168 dhd_bus_handle_mb_data(bus
, d2h_mb_data
);
10171 if (MULTIBP_ENAB(bus
->sih
)) {
10172 dhd_bus_pcie_pwr_req_clear(bus
);
10177 dhdpcie_bus_process_mailbox_intr(dhd_bus_t
*bus
, uint32 intstatus
)
10179 bool resched
= FALSE
;
10181 if (MULTIBP_ENAB(bus
->sih
)) {
10182 dhd_bus_pcie_pwr_req(bus
);
10184 if ((bus
->sih
->buscorerev
== 2) || (bus
->sih
->buscorerev
== 6) ||
10185 (bus
->sih
->buscorerev
== 4)) {
10186 /* Msg stream interrupt */
10187 if (intstatus
& I_BIT1
) {
10188 resched
= dhdpci_bus_read_frames(bus
);
10189 } else if (intstatus
& I_BIT0
) {
10190 /* do nothing for Now */
10193 if (intstatus
& (PCIE_MB_TOPCIE_FN0_0
| PCIE_MB_TOPCIE_FN0_1
))
10194 bus
->api
.handle_mb_data(bus
);
10196 /* Do no process any rings after recieving D3_ACK */
10197 if (DHD_CHK_BUS_LPS_D3_ACKED(bus
)) {
10198 DHD_ERROR(("%s: D3 Ack Recieved. "
10199 "Skip processing rest of ring buffers.\n", __FUNCTION__
));
10203 /* The fact that we are here implies that dhdpcie_bus_intstatus( )
10204 * retuned a non-zer0 status after applying the current mask.
10205 * No further check required, in fact bus->instatus can be eliminated.
10206 * Both bus->instatus, and bud->intdis are shared between isr and dpc.
10208 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
10209 if (pm_runtime_get(dhd_bus_to_dev(bus
)) >= 0) {
10210 resched
= dhdpci_bus_read_frames(bus
);
10211 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus
));
10212 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus
));
10215 resched
= dhdpci_bus_read_frames(bus
);
10216 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
10220 if (MULTIBP_ENAB(bus
->sih
)) {
10221 dhd_bus_pcie_pwr_req_clear(bus
);
10226 #if defined(DHD_H2D_LOG_TIME_SYNC)
10228 dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t
*bus
)
10230 unsigned long time_elapsed
;
10232 /* Poll for timeout value periodically */
10233 if ((bus
->dhd
->busstate
== DHD_BUS_DATA
) &&
10234 (bus
->dhd
->dhd_rte_time_sync_ms
!= 0) &&
10235 DHD_CHK_BUS_NOT_IN_LPS(bus
)) {
10237 * XXX OSL_SYSUPTIME_US() overflow should not happen.
10238 * As it is a unsigned 64 bit value 18446744073709551616L,
10239 * which needs 213503982334 days to overflow
10241 time_elapsed
= OSL_SYSUPTIME_US() - bus
->dhd_rte_time_sync_count
;
10242 /* Compare time is milli seconds */
10243 if ((time_elapsed
/ 1000) >= bus
->dhd
->dhd_rte_time_sync_ms
) {
10245 * Its fine, if it has crossed the timeout value. No need to adjust the
10248 bus
->dhd_rte_time_sync_count
+= time_elapsed
;
10250 /* Schedule deffered work. Work function will send IOVAR. */
10251 dhd_h2d_log_time_sync_deferred_wq_schedule(bus
->dhd
);
10255 #endif /* DHD_H2D_LOG_TIME_SYNC */
10258 dhdpci_bus_read_frames(dhd_bus_t
*bus
)
10262 /* First check if there a FW trap */
10263 if ((bus
->api
.fw_rev
>= PCIE_SHARED_VERSION_6
) &&
10264 (bus
->dhd
->dongle_trap_data
= dhd_prot_process_trapbuf(bus
->dhd
))) {
10265 #ifdef DNGL_AXI_ERROR_LOGGING
10266 if (bus
->dhd
->axi_error
) {
10267 DHD_ERROR(("AXI Error happened\n"));
10270 #endif /* DNGL_AXI_ERROR_LOGGING */
10271 dhd_bus_handle_mb_data(bus
, D2H_DEV_FWHALT
);
10275 /* There may be frames in both ctrl buf and data buf; check ctrl buf first */
10276 dhd_prot_process_ctrlbuf(bus
->dhd
);
10277 bus
->last_process_ctrlbuf_time
= OSL_LOCALTIME_NS();
10279 /* Do not process rest of ring buf once bus enters low power state (D3_INFORM/D3_ACK) */
10280 if (DHD_CHK_BUS_IN_LPS(bus
)) {
10281 DHD_ERROR(("%s: Bus is in power save state (%d). "
10282 "Skip processing rest of ring buffers.\n",
10283 __FUNCTION__
, bus
->bus_low_power_state
));
10287 /* update the flow ring cpls */
10288 dhd_update_txflowrings(bus
->dhd
);
10289 bus
->last_process_flowring_time
= OSL_LOCALTIME_NS();
10291 /* With heavy TX traffic, we could get a lot of TxStatus
10294 more
|= dhd_prot_process_msgbuf_txcpl(bus
->dhd
, dhd_txbound
, DHD_REGULAR_RING
);
10295 bus
->last_process_txcpl_time
= OSL_LOCALTIME_NS();
10297 /* With heavy RX traffic, this routine potentially could spend some time
10298 * processing RX frames without RX bound
10300 more
|= dhd_prot_process_msgbuf_rxcpl(bus
->dhd
, dhd_rxbound
, DHD_REGULAR_RING
);
10301 bus
->last_process_rxcpl_time
= OSL_LOCALTIME_NS();
10303 /* Process info ring completion messages */
10305 if (!bus
->dhd
->dongle_edl_support
)
10308 more
|= dhd_prot_process_msgbuf_infocpl(bus
->dhd
, DHD_INFORING_BOUND
);
10309 bus
->last_process_infocpl_time
= OSL_LOCALTIME_NS();
10313 more
|= dhd_prot_process_msgbuf_edl(bus
->dhd
);
10314 bus
->last_process_edl_time
= OSL_LOCALTIME_NS();
10316 #endif /* EWP_EDL */
10318 #ifdef IDLE_TX_FLOW_MGMT
10319 if (bus
->enable_idle_flowring_mgmt
) {
10320 /* Look for idle flow rings */
10321 dhd_bus_check_idle_scan(bus
);
10323 #endif /* IDLE_TX_FLOW_MGMT */
10325 /* don't talk to the dongle if fw is about to be reloaded */
10326 if (bus
->dhd
->hang_was_sent
) {
10330 #ifdef SUPPORT_LINKDOWN_RECOVERY
10331 /* XXX : It seems that linkdown is occurred without notification,
10332 * In case read shared memory failed, recovery hang is needed
10334 if (bus
->read_shm_fail
) {
10335 /* Read interrupt state once again to confirm linkdown */
10336 int intstatus
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
10337 bus
->pcie_mailbox_int
, 0, 0);
10338 if (intstatus
!= (uint32
)-1) {
10339 DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__
));
10340 #ifdef DHD_FW_COREDUMP
10341 if (bus
->dhd
->memdump_enabled
) {
10342 DHD_OS_WAKE_LOCK(bus
->dhd
);
10343 bus
->dhd
->memdump_type
= DUMP_TYPE_READ_SHM_FAIL
;
10344 dhd_bus_mem_dump(bus
->dhd
);
10345 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
10347 #endif /* DHD_FW_COREDUMP */
10349 DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__
));
10350 #ifdef CONFIG_ARCH_MSM
10351 bus
->no_cfg_restore
= 1;
10352 #endif /* CONFIG_ARCH_MSM */
10353 bus
->is_linkdown
= 1;
10356 /* XXX The dhd_prot_debug_info_print() function *has* to be
10357 * invoked only if the bus->is_linkdown is updated so that
10358 * host doesn't need to read any pcie registers if
10359 * PCIe link is down.
10361 dhd_prot_debug_info_print(bus
->dhd
);
10362 bus
->dhd
->hang_reason
= HANG_REASON_PCIE_LINK_DOWN_EP_DETECT
;
10363 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
10364 copy_hang_info_linkdown(bus
->dhd
);
10365 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
10366 dhd_os_send_hang_message(bus
->dhd
);
10369 #endif /* SUPPORT_LINKDOWN_RECOVERY */
10370 #if defined(DHD_H2D_LOG_TIME_SYNC)
10371 dhdpci_bus_rte_log_time_sync_poll(bus
);
10372 #endif /* DHD_H2D_LOG_TIME_SYNC */
10377 dhdpcie_tcm_valid(dhd_bus_t
*bus
)
10382 pciedev_shared_t sh
;
10384 shaddr
= bus
->dongle_ram_base
+ bus
->ramsize
- 4;
10386 /* Read last word in memory to determine address of pciedev_shared structure */
10387 addr
= LTOH32(dhdpcie_bus_rtcm32(bus
, shaddr
));
10389 if ((addr
== 0) || (addr
== bus
->nvram_csm
) || (addr
< bus
->dongle_ram_base
) ||
10391 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
10392 __FUNCTION__
, addr
));
10396 /* Read hndrte_shared structure */
10397 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
, (uint8
*)&sh
,
10398 sizeof(pciedev_shared_t
))) < 0) {
10399 DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv
));
10403 /* Compare any field in pciedev_shared_t */
10404 if (sh
.console_addr
!= bus
->pcie_sh
->console_addr
) {
10405 DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n"));
10413 dhdpcie_update_bus_api_revisions(uint32 firmware_api_version
, uint32 host_api_version
)
10415 snprintf(bus_api_revision
, BUS_API_REV_STR_LEN
, "\nBus API revisions:(FW rev%d)(DHD rev%d)",
10416 firmware_api_version
, host_api_version
);
10421 dhdpcie_check_firmware_compatible(uint32 firmware_api_version
, uint32 host_api_version
)
10423 bool retcode
= FALSE
;
10425 DHD_INFO(("firmware api revision %d, host api revision %d\n",
10426 firmware_api_version
, host_api_version
));
10428 switch (firmware_api_version
) {
10429 case PCIE_SHARED_VERSION_7
:
10430 case PCIE_SHARED_VERSION_6
:
10431 case PCIE_SHARED_VERSION_5
:
10435 if (firmware_api_version
<= host_api_version
)
10442 dhdpcie_readshared(dhd_bus_t
*bus
)
10445 int rv
, dma_indx_wr_buf
, dma_indx_rd_buf
;
10447 pciedev_shared_t
*sh
= bus
->pcie_sh
;
10449 bool idma_en
= FALSE
;
10450 #if defined(PCIE_INB_DW)
10451 bool d2h_inband_dw
= FALSE
;
10452 #endif /* defined(PCIE_INB_DW) */
10453 uint32 timeout
= MAX_READ_TIMEOUT
;
10456 if (MULTIBP_ENAB(bus
->sih
)) {
10457 dhd_bus_pcie_pwr_req(bus
);
10460 shaddr
= bus
->dongle_ram_base
+ bus
->ramsize
- 4;
10462 /* start a timer for 5 seconds */
10463 dhd_timeout_start(&tmo
, timeout
);
10465 while (((addr
== 0) || (addr
== bus
->nvram_csm
)) && !dhd_timeout_expired(&tmo
)) {
10466 /* Read last word in memory to determine address of pciedev_shared structure */
10467 addr
= LTOH32(dhdpcie_bus_rtcm32(bus
, shaddr
));
10470 if (addr
== (uint32
)-1) {
10471 DHD_ERROR(("%s: PCIe link might be down\n", __FUNCTION__
));
10472 #ifdef SUPPORT_LINKDOWN_RECOVERY
10473 #ifdef CONFIG_ARCH_MSM
10474 bus
->no_cfg_restore
= 1;
10475 #endif /* CONFIG_ARCH_MSM */
10476 #endif /* SUPPORT_LINKDOWN_RECOVERY */
10477 bus
->is_linkdown
= 1;
10481 if ((addr
== 0) || (addr
== bus
->nvram_csm
) || (addr
< bus
->dongle_ram_base
) ||
10483 elapsed
= tmo
.elapsed
;
10484 DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
10485 __FUNCTION__
, addr
));
10486 DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__
, tmo
.elapsed
));
10487 #ifdef DEBUG_DNGL_INIT_FAIL
10488 if (addr
!= (uint32
)-1) { /* skip further PCIE reads if read this addr */
10489 #ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
10490 bus
->dhd
->memdump_enabled
= DUMP_MEMFILE
;
10492 /* Force panic as HAL will not be inited yet */
10493 bus
->dhd
->memdump_enabled
= DUMP_MEMONLY
;
10494 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
10495 if (bus
->dhd
->memdump_enabled
) {
10496 bus
->dhd
->memdump_type
= DUMP_TYPE_DONGLE_INIT_FAILURE
;
10497 dhdpcie_mem_dump(bus
);
10500 #endif /* DEBUG_DNGL_INIT_FAIL */
10503 bus
->rd_shared_pass_time
= OSL_LOCALTIME_NS();
10504 elapsed
= tmo
.elapsed
;
10505 bus
->shared_addr
= (ulong
)addr
;
10506 DHD_ERROR(("### Total time ARM OOR to Readshared pass took %llu usec ###\n",
10507 DIV_U64_BY_U32((bus
->rd_shared_pass_time
- bus
->arm_oor_time
),
10509 DHD_ERROR(("%s: PCIe shared addr (0x%08x) read took %u usec "
10510 "before dongle is ready\n", __FUNCTION__
, addr
, elapsed
));
10513 /* Read hndrte_shared structure */
10514 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, addr
, (uint8
*)sh
,
10515 sizeof(pciedev_shared_t
))) < 0) {
10516 DHD_ERROR(("%s: Failed to read PCIe shared struct with %d\n", __FUNCTION__
, rv
));
10521 sh
->flags
= ltoh32(sh
->flags
);
10522 sh
->trap_addr
= ltoh32(sh
->trap_addr
);
10523 sh
->assert_exp_addr
= ltoh32(sh
->assert_exp_addr
);
10524 sh
->assert_file_addr
= ltoh32(sh
->assert_file_addr
);
10525 sh
->assert_line
= ltoh32(sh
->assert_line
);
10526 sh
->console_addr
= ltoh32(sh
->console_addr
);
10527 sh
->msgtrace_addr
= ltoh32(sh
->msgtrace_addr
);
10528 sh
->dma_rxoffset
= ltoh32(sh
->dma_rxoffset
);
10529 sh
->rings_info_ptr
= ltoh32(sh
->rings_info_ptr
);
10530 sh
->flags2
= ltoh32(sh
->flags2
);
10532 /* load bus console address */
10533 bus
->console_addr
= sh
->console_addr
;
10535 /* Read the dma rx offset */
10536 bus
->dma_rxoffset
= bus
->pcie_sh
->dma_rxoffset
;
10537 dhd_prot_rx_dataoffset(bus
->dhd
, bus
->dma_rxoffset
);
10539 DHD_INFO(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__
, bus
->dma_rxoffset
));
10541 bus
->api
.fw_rev
= sh
->flags
& PCIE_SHARED_VERSION_MASK
;
10542 if (!(dhdpcie_check_firmware_compatible(bus
->api
.fw_rev
, PCIE_SHARED_VERSION
)))
10544 DHD_ERROR(("%s: pcie_shared version %d in dhd "
10545 "is older than pciedev_shared version %d in dongle\n",
10546 __FUNCTION__
, PCIE_SHARED_VERSION
,
10550 dhdpcie_update_bus_api_revisions(bus
->api
.fw_rev
, PCIE_SHARED_VERSION
);
10552 bus
->rw_index_sz
= (sh
->flags
& PCIE_SHARED_2BYTE_INDICES
) ?
10553 sizeof(uint16
) : sizeof(uint32
);
10554 DHD_INFO(("%s: Dongle advertizes %d size indices\n",
10555 __FUNCTION__
, bus
->rw_index_sz
));
10557 #ifdef IDLE_TX_FLOW_MGMT
10558 if (sh
->flags
& PCIE_SHARED_IDLE_FLOW_RING
) {
10559 DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n",
10561 bus
->enable_idle_flowring_mgmt
= TRUE
;
10563 #endif /* IDLE_TX_FLOW_MGMT */
10566 bus
->dhd
->d2h_inband_dw
= (sh
->flags
& PCIE_SHARED_INBAND_DS
) ? TRUE
: FALSE
;
10567 d2h_inband_dw
= bus
->dhd
->d2h_inband_dw
;
10568 #endif /* PCIE_INB_DW */
10570 #if defined(PCIE_INB_DW)
10571 DHD_ERROR(("FW supports Inband dw ? %s\n",
10572 d2h_inband_dw
? "Y":"N"));
10573 #endif /* defined(PCIE_INB_DW) */
10575 if (IDMA_CAPABLE(bus
)) {
10576 if (bus
->sih
->buscorerev
== 23) {
10578 if (bus
->dhd
->d2h_inband_dw
)
10582 #endif /* PCIE_INB_DW */
10588 /* Read flag2 HWA bit */
10589 bus
->dhd
->hwa_capable
= (sh
->flags2
& PCIE_SHARED2_HWA
) ? TRUE
: FALSE
;
10590 DHD_ERROR(("FW supports HWA ? %s\n", bus
->dhd
->hwa_capable
? "Y":"N"));
10591 bus
->hwa_db_index_sz
= PCIE_HWA_DB_INDEX_SZ
;
10594 bus
->dhd
->idma_enable
= (sh
->flags
& PCIE_SHARED_IDMA
) ? TRUE
: FALSE
;
10595 bus
->dhd
->ifrm_enable
= (sh
->flags
& PCIE_SHARED_IFRM
) ? TRUE
: FALSE
;
10598 bus
->dhd
->d2h_sync_mode
= sh
->flags
& PCIE_SHARED_D2H_SYNC_MODE_MASK
;
10600 bus
->dhd
->dar_enable
= (sh
->flags
& PCIE_SHARED_DAR
) ? TRUE
: FALSE
;
10602 /* Does the FW support DMA'ing r/w indices */
10603 if (sh
->flags
& PCIE_SHARED_DMA_INDEX
) {
10604 if (!bus
->dhd
->dma_ring_upd_overwrite
) {
10606 if (!IFRM_ENAB(bus
->dhd
)) {
10607 bus
->dhd
->dma_h2d_ring_upd_support
= TRUE
;
10609 bus
->dhd
->dma_d2h_ring_upd_support
= TRUE
;
10613 if (bus
->dhd
->dma_d2h_ring_upd_support
&& bus
->dhd
->d2h_sync_mode
) {
10614 DHD_ERROR(("%s: ERROR COMBO: sync (0x%x) enabled for DMA indices\n",
10615 __FUNCTION__
, bus
->dhd
->d2h_sync_mode
));
10618 DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
10620 (bus
->dhd
->dma_h2d_ring_upd_support
? 1 : 0),
10621 (bus
->dhd
->dma_d2h_ring_upd_support
? 1 : 0)));
10622 } else if (!(sh
->flags
& PCIE_SHARED_D2H_SYNC_MODE_MASK
)) {
10623 DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n",
10625 return BCME_UNSUPPORTED
;
10627 bus
->dhd
->dma_h2d_ring_upd_support
= FALSE
;
10628 bus
->dhd
->dma_d2h_ring_upd_support
= FALSE
;
10631 /* Does the firmware support fast delete ring? */
10632 if (sh
->flags2
& PCIE_SHARED2_FAST_DELETE_RING
) {
10633 DHD_INFO(("%s: Firmware supports fast delete ring\n",
10635 bus
->dhd
->fast_delete_ring_support
= TRUE
;
10637 DHD_INFO(("%s: Firmware does not support fast delete ring\n",
10639 bus
->dhd
->fast_delete_ring_support
= FALSE
;
10642 /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
10644 ring_info_t ring_info
;
10646 /* boundary check */
10647 if ((sh
->rings_info_ptr
< bus
->dongle_ram_base
) || (sh
->rings_info_ptr
> shaddr
)) {
10648 DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n",
10649 __FUNCTION__
, sh
->rings_info_ptr
));
10653 if ((rv
= dhdpcie_bus_membytes(bus
, FALSE
, sh
->rings_info_ptr
,
10654 (uint8
*)&ring_info
, sizeof(ring_info_t
))) < 0)
10657 bus
->h2d_mb_data_ptr_addr
= ltoh32(sh
->h2d_mb_data_ptr
);
10658 bus
->d2h_mb_data_ptr_addr
= ltoh32(sh
->d2h_mb_data_ptr
);
10660 if (bus
->api
.fw_rev
>= PCIE_SHARED_VERSION_6
) {
10661 bus
->max_tx_flowrings
= ltoh16(ring_info
.max_tx_flowrings
);
10662 bus
->max_submission_rings
= ltoh16(ring_info
.max_submission_queues
);
10663 bus
->max_completion_rings
= ltoh16(ring_info
.max_completion_rings
);
10664 bus
->max_cmn_rings
= bus
->max_submission_rings
- bus
->max_tx_flowrings
;
10665 bus
->api
.handle_mb_data
= dhdpcie_read_handle_mb_data
;
10666 bus
->use_mailbox
= sh
->flags
& PCIE_SHARED_USE_MAILBOX
;
10669 bus
->max_tx_flowrings
= ltoh16(ring_info
.max_tx_flowrings
);
10670 bus
->max_submission_rings
= bus
->max_tx_flowrings
;
10671 bus
->max_completion_rings
= BCMPCIE_D2H_COMMON_MSGRINGS
;
10672 bus
->max_cmn_rings
= BCMPCIE_H2D_COMMON_MSGRINGS
;
10673 bus
->api
.handle_mb_data
= dhdpcie_handle_mb_data
;
10674 bus
->use_mailbox
= TRUE
;
10676 if (bus
->max_completion_rings
== 0) {
10677 DHD_ERROR(("dongle completion rings are invalid %d\n",
10678 bus
->max_completion_rings
));
10681 if (bus
->max_submission_rings
== 0) {
10682 DHD_ERROR(("dongle submission rings are invalid %d\n",
10683 bus
->max_submission_rings
));
10686 if (bus
->max_tx_flowrings
== 0) {
10687 DHD_ERROR(("dongle txflow rings are invalid %d\n", bus
->max_tx_flowrings
));
10691 /* If both FW and Host support DMA'ing indices, allocate memory and notify FW
10692 * The max_sub_queues is read from FW initialized ring_info
10694 if (bus
->dhd
->dma_h2d_ring_upd_support
|| IDMA_ENAB(bus
->dhd
)) {
10695 dma_indx_wr_buf
= dhd_prot_dma_indx_init(bus
->dhd
, bus
->rw_index_sz
,
10696 H2D_DMA_INDX_WR_BUF
, bus
->max_submission_rings
);
10697 dma_indx_rd_buf
= dhd_prot_dma_indx_init(bus
->dhd
, bus
->rw_index_sz
,
10698 D2H_DMA_INDX_RD_BUF
, bus
->max_completion_rings
);
10700 if ((dma_indx_wr_buf
!= BCME_OK
) || (dma_indx_rd_buf
!= BCME_OK
)) {
10701 DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
10702 "Host will use w/r indices in TCM\n",
10704 bus
->dhd
->dma_h2d_ring_upd_support
= FALSE
;
10705 bus
->dhd
->idma_enable
= FALSE
;
10709 if (bus
->dhd
->dma_d2h_ring_upd_support
) {
10710 dma_indx_wr_buf
= dhd_prot_dma_indx_init(bus
->dhd
, bus
->rw_index_sz
,
10711 D2H_DMA_INDX_WR_BUF
, bus
->max_completion_rings
);
10712 dma_indx_rd_buf
= dhd_prot_dma_indx_init(bus
->dhd
, bus
->rw_index_sz
,
10713 H2D_DMA_INDX_RD_BUF
, bus
->max_submission_rings
);
10715 if ((dma_indx_wr_buf
!= BCME_OK
) || (dma_indx_rd_buf
!= BCME_OK
)) {
10716 DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
10717 "Host will use w/r indices in TCM\n",
10719 bus
->dhd
->dma_d2h_ring_upd_support
= FALSE
;
10723 if (IFRM_ENAB(bus
->dhd
)) {
10724 dma_indx_wr_buf
= dhd_prot_dma_indx_init(bus
->dhd
, bus
->rw_index_sz
,
10725 H2D_IFRM_INDX_WR_BUF
, bus
->max_tx_flowrings
);
10727 if (dma_indx_wr_buf
!= BCME_OK
) {
10728 DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n",
10730 bus
->dhd
->ifrm_enable
= FALSE
;
10734 /* read ringmem and ringstate ptrs from shared area and store in host variables */
10735 dhd_fillup_ring_sharedptr_info(bus
, &ring_info
);
10736 if (dhd_msg_level
& DHD_INFO_VAL
) {
10737 bcm_print_bytes("ring_info_raw", (uchar
*)&ring_info
, sizeof(ring_info_t
));
10739 DHD_INFO(("%s: ring_info\n", __FUNCTION__
));
10741 DHD_ERROR(("%s: max H2D queues %d\n",
10742 __FUNCTION__
, ltoh16(ring_info
.max_tx_flowrings
)));
10744 DHD_INFO(("mail box address\n"));
10745 DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
10746 __FUNCTION__
, bus
->h2d_mb_data_ptr_addr
));
10747 DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n",
10748 __FUNCTION__
, bus
->d2h_mb_data_ptr_addr
));
10751 DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
10752 __FUNCTION__
, bus
->dhd
->d2h_sync_mode
));
10754 bus
->dhd
->d2h_hostrdy_supported
=
10755 ((sh
->flags
& PCIE_SHARED_HOSTRDY_SUPPORT
) == PCIE_SHARED_HOSTRDY_SUPPORT
);
10757 bus
->dhd
->ext_trap_data_supported
=
10758 ((sh
->flags2
& PCIE_SHARED2_EXTENDED_TRAP_DATA
) == PCIE_SHARED2_EXTENDED_TRAP_DATA
);
10760 if ((sh
->flags2
& PCIE_SHARED2_TXSTATUS_METADATA
) == 0)
10761 bus
->dhd
->pcie_txs_metadata_enable
= 0;
10763 if (sh
->flags2
& PCIE_SHARED2_TRAP_ON_HOST_DB7
) {
10764 memset(&bus
->dhd
->db7_trap
, 0, sizeof(bus
->dhd
->db7_trap
));
10765 bus
->dhd
->db7_trap
.fw_db7w_trap
= 1;
10766 /* add an option to let the user select ?? */
10767 bus
->dhd
->db7_trap
.db7_magic_number
= PCIE_DB7_MAGIC_NUMBER_DPC_TRAP
;
10770 bus
->dhd
->hscb_enable
=
10771 (sh
->flags2
& PCIE_SHARED2_HSCB
) == PCIE_SHARED2_HSCB
;
10774 if (host_edl_support
) {
10775 bus
->dhd
->dongle_edl_support
= (sh
->flags2
& PCIE_SHARED2_EDL_RING
) ? TRUE
: FALSE
;
10776 DHD_ERROR(("Dongle EDL support: %u\n", bus
->dhd
->dongle_edl_support
));
10778 #endif /* EWP_EDL */
10780 bus
->dhd
->debug_buf_dest_support
=
10781 (sh
->flags2
& PCIE_SHARED2_DEBUG_BUF_DEST
) ? TRUE
: FALSE
;
10782 DHD_ERROR(("FW supports debug buf dest ? %s \n",
10783 bus
->dhd
->debug_buf_dest_support
? "Y" : "N"));
10786 bus
->dhd
->db0ts_capable
=
10787 (sh
->flags
& PCIE_SHARED_TIMESTAMP_DB0
) == PCIE_SHARED_TIMESTAMP_DB0
;
10788 #endif /* DHD_DB0TS */
10790 if (MULTIBP_ENAB(bus
->sih
)) {
10791 dhd_bus_pcie_pwr_req_clear(bus
);
10794 * WAR to fix ARM cold boot;
10795 * De-assert WL domain in DAR
10797 if (bus
->sih
->buscorerev
>= 68) {
10798 dhd_bus_pcie_pwr_req_wl_domain(bus
,
10799 DAR_PCIE_PWR_CTRL((bus
->sih
)->buscorerev
), FALSE
);
10803 } /* dhdpcie_readshared */
10805 /** Read ring mem and ring state ptr info from shared memory area in device memory */
10807 dhd_fillup_ring_sharedptr_info(dhd_bus_t
*bus
, ring_info_t
*ring_info
)
10812 uint32 d2h_w_idx_ptr
, d2h_r_idx_ptr
, h2d_w_idx_ptr
, h2d_r_idx_ptr
;
10813 uint32 h2d_hwa_db_idx_ptr
= 0, d2h_hwa_db_idx_ptr
= 0;
10814 uint16 max_tx_flowrings
= bus
->max_tx_flowrings
;
10816 /* Ring mem ptr info */
10817 /* Alloated in the order
10818 H2D_MSGRING_CONTROL_SUBMIT 0
10819 H2D_MSGRING_RXPOST_SUBMIT 1
10820 D2H_MSGRING_CONTROL_COMPLETE 2
10821 D2H_MSGRING_TX_COMPLETE 3
10822 D2H_MSGRING_RX_COMPLETE 4
10826 /* ringmemptr holds start of the mem block address space */
10827 tcm_memloc
= ltoh32(ring_info
->ringmem_ptr
);
10829 /* Find out ringmem ptr for each ring common ring */
10830 for (i
= 0; i
<= BCMPCIE_COMMON_MSGRING_MAX_ID
; i
++) {
10831 bus
->ring_sh
[i
].ring_mem_addr
= tcm_memloc
;
10832 /* Update mem block */
10833 tcm_memloc
= tcm_memloc
+ sizeof(ring_mem_t
);
10834 DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__
,
10835 i
, bus
->ring_sh
[i
].ring_mem_addr
));
10839 /* Ring state mem ptr info */
10841 d2h_w_idx_ptr
= ltoh32(ring_info
->d2h_w_idx_ptr
);
10842 d2h_r_idx_ptr
= ltoh32(ring_info
->d2h_r_idx_ptr
);
10843 h2d_w_idx_ptr
= ltoh32(ring_info
->h2d_w_idx_ptr
);
10844 h2d_r_idx_ptr
= ltoh32(ring_info
->h2d_r_idx_ptr
);
10846 if (HWA_CAPAB(bus
->dhd
)) {
10847 h2d_hwa_db_idx_ptr
= ltoh32(ring_info
->h2d_hwa_db_idx_ptr
);
10848 d2h_hwa_db_idx_ptr
= ltoh32(ring_info
->d2h_hwa_db_idx_ptr
);
10851 /* Store h2d common ring write/read pointers */
10852 for (i
= 0; i
< BCMPCIE_H2D_COMMON_MSGRINGS
; i
++) {
10853 bus
->ring_sh
[i
].ring_state_w
= h2d_w_idx_ptr
;
10854 bus
->ring_sh
[i
].ring_state_r
= h2d_r_idx_ptr
;
10856 /* update mem block */
10857 h2d_w_idx_ptr
= h2d_w_idx_ptr
+ bus
->rw_index_sz
;
10858 h2d_r_idx_ptr
= h2d_r_idx_ptr
+ bus
->rw_index_sz
;
10860 DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__
, i
,
10861 bus
->ring_sh
[i
].ring_state_w
, bus
->ring_sh
[i
].ring_state_r
));
10863 if (HWA_CAPAB(bus
->dhd
)) {
10864 /* Read HWA DB index value from TCM */
10865 bus
->ring_sh
[i
].ring_hwa_db_idx
=
10866 ltoh16(dhdpcie_bus_rtcm16(bus
, h2d_hwa_db_idx_ptr
));
10867 /* update mem block */
10868 h2d_hwa_db_idx_ptr
+= bus
->hwa_db_index_sz
;
10869 DHD_INFO(("h2d hwa: idx %d hw_db %x \n", i
,
10870 bus
->ring_sh
[i
].ring_hwa_db_idx
));
10874 /* Store d2h common ring write/read pointers */
10875 for (j
= 0; j
< BCMPCIE_D2H_COMMON_MSGRINGS
; j
++, i
++) {
10876 bus
->ring_sh
[i
].ring_state_w
= d2h_w_idx_ptr
;
10877 bus
->ring_sh
[i
].ring_state_r
= d2h_r_idx_ptr
;
10879 /* update mem block */
10880 d2h_w_idx_ptr
= d2h_w_idx_ptr
+ bus
->rw_index_sz
;
10881 d2h_r_idx_ptr
= d2h_r_idx_ptr
+ bus
->rw_index_sz
;
10883 DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__
, i
,
10884 bus
->ring_sh
[i
].ring_state_w
, bus
->ring_sh
[i
].ring_state_r
));
10886 if (HWA_CAPAB(bus
->dhd
)) {
10887 /* Read HWA DB index value from TCM */
10888 bus
->ring_sh
[i
].ring_hwa_db_idx
=
10889 ltoh16(dhdpcie_bus_rtcm16(bus
, d2h_hwa_db_idx_ptr
));
10890 /* update mem block */
10891 d2h_hwa_db_idx_ptr
+= bus
->hwa_db_index_sz
;
10892 DHD_INFO(("d2h hwa: idx %d hw_db %x \n", i
,
10893 bus
->ring_sh
[i
].ring_hwa_db_idx
));
10897 /* Store txflow ring write/read pointers */
10898 if (bus
->api
.fw_rev
< PCIE_SHARED_VERSION_6
) {
10899 max_tx_flowrings
-= BCMPCIE_H2D_COMMON_MSGRINGS
;
10901 /* Account for Debug info h2d ring located after the last tx flow ring */
10902 max_tx_flowrings
= max_tx_flowrings
+ 1;
10904 for (j
= 0; j
< max_tx_flowrings
; i
++, j
++)
10906 bus
->ring_sh
[i
].ring_state_w
= h2d_w_idx_ptr
;
10907 bus
->ring_sh
[i
].ring_state_r
= h2d_r_idx_ptr
;
10909 /* update mem block */
10910 h2d_w_idx_ptr
= h2d_w_idx_ptr
+ bus
->rw_index_sz
;
10911 h2d_r_idx_ptr
= h2d_r_idx_ptr
+ bus
->rw_index_sz
;
10913 DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n",
10915 bus
->ring_sh
[i
].ring_state_w
,
10916 bus
->ring_sh
[i
].ring_state_r
));
10918 if (HWA_CAPAB(bus
->dhd
)) {
10919 /* Read HWA DB index value from TCM */
10920 bus
->ring_sh
[i
].ring_hwa_db_idx
=
10921 ltoh16(dhdpcie_bus_rtcm16(bus
, h2d_hwa_db_idx_ptr
));
10922 /* update mem block */
10923 h2d_hwa_db_idx_ptr
+= bus
->hwa_db_index_sz
;
10924 DHD_INFO(("h2d hwa: idx %d hw_db %x \n", i
,
10925 bus
->ring_sh
[i
].ring_hwa_db_idx
));
10928 /* store wr/rd pointers for debug info completion or EDL ring */
10929 bus
->ring_sh
[i
].ring_state_w
= d2h_w_idx_ptr
;
10930 bus
->ring_sh
[i
].ring_state_r
= d2h_r_idx_ptr
;
10931 d2h_w_idx_ptr
= d2h_w_idx_ptr
+ bus
->rw_index_sz
;
10932 d2h_r_idx_ptr
= d2h_r_idx_ptr
+ bus
->rw_index_sz
;
10933 DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i
,
10934 bus
->ring_sh
[i
].ring_state_w
,
10935 bus
->ring_sh
[i
].ring_state_r
));
10937 if (HWA_CAPAB(bus
->dhd
)) {
10938 /* Read HWA DB index value from TCM */
10939 bus
->ring_sh
[i
].ring_hwa_db_idx
=
10940 ltoh16(dhdpcie_bus_rtcm16(bus
, d2h_hwa_db_idx_ptr
));
10941 /* update mem block */
10942 d2h_hwa_db_idx_ptr
+= bus
->hwa_db_index_sz
;
10943 DHD_INFO(("d2h hwa: idx %d hw_db %x \n", i
,
10944 bus
->ring_sh
[i
].ring_hwa_db_idx
));
10947 } /* dhd_fillup_ring_sharedptr_info */
10950 * Initialize bus module: prepare for communication with the dongle. Called after downloading
10951 * firmware into the dongle.
10953 int dhd_bus_init(dhd_pub_t
*dhdp
, bool enforce_mutex
)
10955 dhd_bus_t
*bus
= dhdp
->bus
;
10958 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
10964 dhd_bus_pcie_pwr_req_clear_reload_war(bus
);
10966 if (MULTIBP_ENAB(bus
->sih
)) {
10967 dhd_bus_pcie_pwr_req(bus
);
10970 /* Configure AER registers to log the TLP header */
10971 dhd_bus_aer_config(bus
);
10973 /* Make sure we're talking to the core. */
10974 bus
->reg
= si_setcore(bus
->sih
, PCIE2_CORE_ID
, 0);
10975 ASSERT(bus
->reg
!= NULL
);
10977 /* before opening up bus for data transfer, check if shared are is intact */
10978 ret
= dhdpcie_readshared(bus
);
10980 DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__
));
10984 /* Make sure we're talking to the core. */
10985 bus
->reg
= si_setcore(bus
->sih
, PCIE2_CORE_ID
, 0);
10986 ASSERT(bus
->reg
!= NULL
);
10988 /* Set bus state according to enable result */
10989 dhdp
->busstate
= DHD_BUS_DATA
;
10990 DHD_SET_BUS_NOT_IN_LPS(bus
);
10991 dhdp
->dhd_bus_busy_state
= 0;
10993 /* D11 status via PCIe completion header */
10994 if ((ret
= dhdpcie_init_d11status(bus
)) < 0) {
10998 if (!dhd_download_fw_on_driverload
)
10999 dhd_dpc_enable(bus
->dhd
);
11000 /* Enable the interrupt after device is up */
11001 dhdpcie_bus_intr_enable(bus
);
11003 DHD_ERROR(("%s: Enabling bus->intr_enabled\n", __FUNCTION__
));
11004 bus
->intr_enabled
= TRUE
;
11006 /* XXX These need to change w/API updates */
11007 /* bcmsdh_intr_unmask(bus->sdh); */
11008 #ifdef DHD_PCIE_RUNTIMEPM
11009 bus
->idlecount
= 0;
11010 bus
->idletime
= (int32
)MAX_IDLE_COUNT
;
11011 init_waitqueue_head(&bus
->rpm_queue
);
11012 mutex_init(&bus
->pm_lock
);
11015 #endif /* DHD_PCIE_RUNTIMEPM */
11017 bus
->skip_ds_ack
= FALSE
;
11018 /* Initialize the lock to serialize Device Wake Inband activities */
11019 if (!bus
->inb_lock
) {
11020 bus
->inb_lock
= osl_spin_lock_init(bus
->dhd
->osh
);
11024 /* XXX Temp errnum workaround: return ok, caller checks bus state */
11026 /* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
11027 if (bus
->api
.fw_rev
< PCIE_SHARED_VERSION_6
) {
11028 bus
->use_d0_inform
= TRUE
;
11030 bus
->use_d0_inform
= FALSE
;
11033 bus
->hostready_count
= 0;
11036 if (MULTIBP_ENAB(bus
->sih
)) {
11037 dhd_bus_pcie_pwr_req_clear(bus
);
11043 dhdpcie_init_shared_addr(dhd_bus_t
*bus
)
11047 addr
= bus
->dongle_ram_base
+ bus
->ramsize
- 4;
11048 #ifdef DHD_PCIE_RUNTIMEPM
11049 dhdpcie_runtime_bus_wake(bus
->dhd
, TRUE
, __builtin_return_address(0));
11050 #endif /* DHD_PCIE_RUNTIMEPM */
11051 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*)&val
, sizeof(val
));
11055 dhdpcie_chipmatch(uint16 vendor
, uint16 device
)
11057 if (vendor
!= PCI_VENDOR_ID_BROADCOM
) {
11058 DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__
,
11064 case BCM4345_CHIP_ID
:
11065 case BCM43454_CHIP_ID
:
11066 case BCM43455_CHIP_ID
:
11067 case BCM43457_CHIP_ID
:
11068 case BCM43458_CHIP_ID
:
11069 case BCM4350_D11AC_ID
:
11070 case BCM4350_D11AC2G_ID
:
11071 case BCM4350_D11AC5G_ID
:
11072 case BCM4350_CHIP_ID
:
11073 case BCM4354_D11AC_ID
:
11074 case BCM4354_D11AC2G_ID
:
11075 case BCM4354_D11AC5G_ID
:
11076 case BCM4354_CHIP_ID
:
11077 case BCM4356_D11AC_ID
:
11078 case BCM4356_D11AC2G_ID
:
11079 case BCM4356_D11AC5G_ID
:
11080 case BCM4356_CHIP_ID
:
11081 case BCM4371_D11AC_ID
:
11082 case BCM4371_D11AC2G_ID
:
11083 case BCM4371_D11AC5G_ID
:
11084 case BCM4371_CHIP_ID
:
11085 case BCM4345_D11AC_ID
:
11086 case BCM4345_D11AC2G_ID
:
11087 case BCM4345_D11AC5G_ID
:
11088 case BCM43452_D11AC_ID
:
11089 case BCM43452_D11AC2G_ID
:
11090 case BCM43452_D11AC5G_ID
:
11091 case BCM4335_D11AC_ID
:
11092 case BCM4335_D11AC2G_ID
:
11093 case BCM4335_D11AC5G_ID
:
11094 case BCM4335_CHIP_ID
:
11095 case BCM43602_D11AC_ID
:
11096 case BCM43602_D11AC2G_ID
:
11097 case BCM43602_D11AC5G_ID
:
11098 case BCM43602_CHIP_ID
:
11099 case BCM43569_D11AC_ID
:
11100 case BCM43569_D11AC2G_ID
:
11101 case BCM43569_D11AC5G_ID
:
11102 case BCM43569_CHIP_ID
:
11103 /* XXX: For 4358, BCM4358_CHIP_ID is not checked intentionally as
11104 * this is not a real chip id, but propagated from the OTP.
11106 case BCM4358_D11AC_ID
:
11107 case BCM4358_D11AC2G_ID
:
11108 case BCM4358_D11AC5G_ID
:
11109 case BCM4349_D11AC_ID
:
11110 case BCM4349_D11AC2G_ID
:
11111 case BCM4349_D11AC5G_ID
:
11112 case BCM4355_D11AC_ID
:
11113 case BCM4355_D11AC2G_ID
:
11114 case BCM4355_D11AC5G_ID
:
11115 case BCM4355_CHIP_ID
:
11116 /* XXX: BCM4359_CHIP_ID is not checked intentionally as this is
11117 * not a real chip id, but propogated from the OTP.
11119 case BCM4359_D11AC_ID
:
11120 case BCM4359_D11AC2G_ID
:
11121 case BCM4359_D11AC5G_ID
:
11122 case BCM43596_D11AC_ID
:
11123 case BCM43596_D11AC2G_ID
:
11124 case BCM43596_D11AC5G_ID
:
11125 case BCM43597_D11AC_ID
:
11126 case BCM43597_D11AC2G_ID
:
11127 case BCM43597_D11AC5G_ID
:
11128 case BCM4364_D11AC_ID
:
11129 case BCM4364_D11AC2G_ID
:
11130 case BCM4364_D11AC5G_ID
:
11131 case BCM4364_CHIP_ID
:
11132 case BCM4361_D11AC_ID
:
11133 case BCM4361_D11AC2G_ID
:
11134 case BCM4361_D11AC5G_ID
:
11135 case BCM4361_CHIP_ID
:
11136 case BCM4347_D11AC_ID
:
11137 case BCM4347_D11AC2G_ID
:
11138 case BCM4347_D11AC5G_ID
:
11139 case BCM4347_CHIP_ID
:
11140 case BCM4369_D11AX_ID
:
11141 case BCM4369_D11AX2G_ID
:
11142 case BCM4369_D11AX5G_ID
:
11143 case BCM4369_CHIP_ID
:
11144 case BCM4376_D11AX_ID
:
11145 case BCM4376_D11AX2G_ID
:
11146 case BCM4376_D11AX5G_ID
:
11147 case BCM4376_CHIP_ID
:
11148 case BCM4377_M_D11AX_ID
:
11149 case BCM4377_D11AX_ID
:
11150 case BCM4377_D11AX2G_ID
:
11151 case BCM4377_D11AX5G_ID
:
11152 case BCM4377_CHIP_ID
:
11153 case BCM4378_D11AX_ID
:
11154 case BCM4378_D11AX2G_ID
:
11155 case BCM4378_D11AX5G_ID
:
11156 case BCM4378_CHIP_ID
:
11157 case BCM4387_D11AX_ID
:
11158 case BCM4387_CHIP_ID
:
11159 case BCM4368_D11AC_ID
:
11160 case BCM4368_D11AC2G_ID
:
11161 case BCM4368_D11AC5G_ID
:
11162 case BCM4368_CHIP_ID
:
11163 case BCM4362_D11AX_ID
:
11164 case BCM4362_D11AX2G_ID
:
11165 case BCM4362_D11AX5G_ID
:
11166 case BCM4362_CHIP_ID
:
11167 case BCM4375_D11AX_ID
:
11168 case BCM4375_D11AX2G_ID
:
11169 case BCM4375_D11AX5G_ID
:
11170 case BCM4375_CHIP_ID
:
11171 case BCM43751_D11AX_ID
:
11172 case BCM43751_D11AX2G_ID
:
11173 case BCM43751_D11AX5G_ID
:
11174 case BCM43751_CHIP_ID
:
11175 case BCM43752_D11AX_ID
:
11176 case BCM43752_D11AX2G_ID
:
11177 case BCM43752_D11AX5G_ID
:
11178 case BCM43752_CHIP_ID
:
11179 case BCM4388_CHIP_ID
:
11180 case BCM4388_D11AX_ID
:
11181 case BCM4389_CHIP_ID
:
11182 case BCM4389_D11AX_ID
:
11183 case BCM4385_D11AX_ID
:
11184 case BCM4385_CHIP_ID
:
11188 DHD_ERROR(("%s: Unsupported vendor %x device %x\n",
11189 __FUNCTION__
, vendor
, device
));
11192 } /* dhdpcie_chipmatch */
11195 * Name: dhdpcie_cc_nvmshadow
11198 * A shadow of OTP/SPROM exists in ChipCommon Region
11199 * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
11200 * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
11201 * can also be read from ChipCommon Registers.
11203 /* XXX So far tested with 4345 and 4350 (Hence the checks in the function.) */
11205 dhdpcie_cc_nvmshadow(dhd_bus_t
*bus
, struct bcmstrbuf
*b
)
11207 uint16 dump_offset
= 0;
11208 uint32 dump_size
= 0, otp_size
= 0, sprom_size
= 0;
11210 /* Table for 65nm OTP Size (in bits) */
11211 int otp_size_65nm
[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
11213 volatile uint16
*nvm_shadow
;
11216 uint chipc_corerev
;
11217 chipcregs_t
*chipcregs
;
11219 /* Save the current core */
11220 cur_coreid
= si_coreid(bus
->sih
);
11221 /* Switch to ChipC */
11222 chipcregs
= (chipcregs_t
*)si_setcore(bus
->sih
, CC_CORE_ID
, 0);
11223 ASSERT(chipcregs
!= NULL
);
11225 chipc_corerev
= si_corerev(bus
->sih
);
11227 /* Check ChipcommonCore Rev */
11228 if (chipc_corerev
< 44) {
11229 DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__
, chipc_corerev
));
11230 return BCME_UNSUPPORTED
;
11234 if (((uint16
)bus
->sih
->chip
!= BCM4350_CHIP_ID
) && !BCM4345_CHIP((uint16
)bus
->sih
->chip
) &&
11235 ((uint16
)bus
->sih
->chip
!= BCM4355_CHIP_ID
) &&
11236 ((uint16
)bus
->sih
->chip
!= BCM4364_CHIP_ID
)) {
11237 DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips"
11238 "4350/4345/4355/4364 only\n", __FUNCTION__
));
11239 return BCME_UNSUPPORTED
;
11242 /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
11243 if (chipcregs
->sromcontrol
& SRC_PRESENT
) {
11244 /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
11245 sprom_size
= (1 << (2 * ((chipcregs
->sromcontrol
& SRC_SIZE_MASK
)
11246 >> SRC_SIZE_SHIFT
))) * 1024;
11247 bcm_bprintf(b
, "\nSPROM Present (Size %d bits)\n", sprom_size
);
11250 /* XXX Check if OTP exists. 2 possible approaches:
11251 * 1) Check if OtpPresent in SpromCtrl (0x190 in ChipCommon Regs) is set OR
11252 * 2) Check if OtpSize > 0
11254 if (chipcregs
->sromcontrol
& SRC_OTPPRESENT
) {
11255 bcm_bprintf(b
, "\nOTP Present");
11257 if (((chipcregs
->otplayout
& OTPL_WRAP_TYPE_MASK
) >> OTPL_WRAP_TYPE_SHIFT
)
11258 == OTPL_WRAP_TYPE_40NM
) {
11259 /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
11260 /* Chipcommon rev51 is a variation on rev45 and does not support
11261 * the latest OTP configuration.
11263 if (chipc_corerev
!= 51 && chipc_corerev
>= 49) {
11264 otp_size
= (((chipcregs
->otplayout
& OTPL_ROW_SIZE_MASK
)
11265 >> OTPL_ROW_SIZE_SHIFT
) + 1) * 1024;
11266 bcm_bprintf(b
, "(Size %d bits)\n", otp_size
);
11268 otp_size
= (((chipcregs
->capabilities
& CC_CAP_OTPSIZE
)
11269 >> CC_CAP_OTPSIZE_SHIFT
) + 1) * 1024;
11270 bcm_bprintf(b
, "(Size %d bits)\n", otp_size
);
11273 /* This part is untested since newer chips have 40nm OTP */
11274 /* Chipcommon rev51 is a variation on rev45 and does not support
11275 * the latest OTP configuration.
11277 if (chipc_corerev
!= 51 && chipc_corerev
>= 49) {
11278 otp_size
= otp_size_65nm
[(chipcregs
->otplayout
& OTPL_ROW_SIZE_MASK
)
11279 >> OTPL_ROW_SIZE_SHIFT
];
11280 bcm_bprintf(b
, "(Size %d bits)\n", otp_size
);
11282 otp_size
= otp_size_65nm
[(chipcregs
->capabilities
& CC_CAP_OTPSIZE
)
11283 >> CC_CAP_OTPSIZE_SHIFT
];
11284 bcm_bprintf(b
, "(Size %d bits)\n", otp_size
);
11285 DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
11291 /* Chipcommon rev51 is a variation on rev45 and does not support
11292 * the latest OTP configuration.
11294 if (chipc_corerev
!= 51 && chipc_corerev
>= 49) {
11295 if (((chipcregs
->sromcontrol
& SRC_PRESENT
) == 0) &&
11296 ((chipcregs
->otplayout
& OTPL_ROW_SIZE_MASK
) == 0)) {
11297 DHD_ERROR(("%s: SPROM and OTP could not be found "
11298 "sromcontrol = %x, otplayout = %x \n",
11299 __FUNCTION__
, chipcregs
->sromcontrol
, chipcregs
->otplayout
));
11300 return BCME_NOTFOUND
;
11303 if (((chipcregs
->sromcontrol
& SRC_PRESENT
) == 0) &&
11304 ((chipcregs
->capabilities
& CC_CAP_OTPSIZE
) == 0)) {
11305 DHD_ERROR(("%s: SPROM and OTP could not be found "
11306 "sromcontrol = %x, capablities = %x \n",
11307 __FUNCTION__
, chipcregs
->sromcontrol
, chipcregs
->capabilities
));
11308 return BCME_NOTFOUND
;
11312 /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
11313 if ((!(chipcregs
->sromcontrol
& SRC_PRESENT
) || (chipcregs
->sromcontrol
& SRC_OTPSEL
)) &&
11314 (chipcregs
->sromcontrol
& SRC_OTPPRESENT
)) {
11316 bcm_bprintf(b
, "OTP Strap selected.\n"
11317 "\nOTP Shadow in ChipCommon:\n");
11319 dump_size
= otp_size
/ 16 ; /* 16bit words */
11321 } else if (((chipcregs
->sromcontrol
& SRC_OTPSEL
) == 0) &&
11322 (chipcregs
->sromcontrol
& SRC_PRESENT
)) {
11324 bcm_bprintf(b
, "SPROM Strap selected\n"
11325 "\nSPROM Shadow in ChipCommon:\n");
11327 /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
11328 /* dump_size in 16bit words */
11329 dump_size
= sprom_size
> 8 ? (8 * 1024) / 16 : sprom_size
/ 16;
11331 DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
11333 return BCME_NOTFOUND
;
11336 if (bus
->regs
== NULL
) {
11337 DHD_ERROR(("ChipCommon Regs. not initialized\n"));
11338 return BCME_NOTREADY
;
11340 bcm_bprintf(b
, "\n OffSet:");
11342 /* Chipcommon rev51 is a variation on rev45 and does not support
11343 * the latest OTP configuration.
11345 if (chipc_corerev
!= 51 && chipc_corerev
>= 49) {
11346 /* Chip common can read only 8kbits,
11347 * for ccrev >= 49 otp size is around 12 kbits so use GCI core
11349 nvm_shadow
= (volatile uint16
*)si_setcore(bus
->sih
, GCI_CORE_ID
, 0);
11351 /* Point to the SPROM/OTP shadow in ChipCommon */
11352 nvm_shadow
= chipcregs
->sromotp
;
11355 if (nvm_shadow
== NULL
) {
11356 DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__
));
11357 return BCME_NOTFOUND
;
11361 * Read 16 bits / iteration.
11362 * dump_size & dump_offset in 16-bit words
11364 while (dump_offset
< dump_size
) {
11365 if (dump_offset
% 2 == 0)
11366 /* Print the offset in the shadow space in Bytes */
11367 bcm_bprintf(b
, "\n 0x%04x", dump_offset
* 2);
11369 bcm_bprintf(b
, "\t0x%04x", *(nvm_shadow
+ dump_offset
));
11370 dump_offset
+= 0x1;
11374 /* Switch back to the original core */
11375 si_setcore(bus
->sih
, cur_coreid
, 0);
11378 } /* dhdpcie_cc_nvmshadow */
11380 /** Flow rings are dynamically created and destroyed */
11381 void dhd_bus_clean_flow_ring(dhd_bus_t
*bus
, void *node
)
11384 flow_queue_t
*queue
;
11385 flow_ring_node_t
*flow_ring_node
= (flow_ring_node_t
*)node
;
11386 unsigned long flags
;
11388 queue
= &flow_ring_node
->queue
;
11390 #ifdef DHDTCPACK_SUPPRESS
11391 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
11392 * when there is a newly coming packet from network stack.
11394 dhd_tcpack_info_tbl_clean(bus
->dhd
);
11395 #endif /* DHDTCPACK_SUPPRESS */
11397 /* clean up BUS level info */
11398 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
11400 /* Flush all pending packets in the queue, if any */
11401 while ((pkt
= dhd_flow_queue_dequeue(bus
->dhd
, queue
)) != NULL
) {
11402 PKTFREE(bus
->dhd
->osh
, pkt
, TRUE
);
11404 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue
));
11406 /* Reinitialise flowring's queue */
11407 dhd_flow_queue_reinit(bus
->dhd
, queue
, FLOW_RING_QUEUE_THRESHOLD
);
11408 flow_ring_node
->status
= FLOW_RING_STATUS_CLOSED
;
11409 flow_ring_node
->active
= FALSE
;
11411 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
11413 /* Hold flowring_list_lock to ensure no race condition while accessing the List */
11414 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
11415 dll_delete(&flow_ring_node
->list
);
11416 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
11418 /* Release the flowring object back into the pool */
11419 dhd_prot_flowrings_pool_release(bus
->dhd
,
11420 flow_ring_node
->flowid
, flow_ring_node
->prot_info
);
11422 /* Free the flowid back to the flowid allocator */
11423 dhd_flowid_free(bus
->dhd
, flow_ring_node
->flow_info
.ifindex
,
11424 flow_ring_node
->flowid
);
11428 * Allocate a Flow ring buffer,
11429 * Init Ring buffer, send Msg to device about flow ring creation
11432 dhd_bus_flow_ring_create_request(dhd_bus_t
*bus
, void *arg
)
11434 flow_ring_node_t
*flow_ring_node
= (flow_ring_node_t
*)arg
;
11436 DHD_INFO(("%s :Flow create\n", __FUNCTION__
));
11438 /* Send Msg to device about flow ring creation */
11439 if (dhd_prot_flow_ring_create(bus
->dhd
, flow_ring_node
) != BCME_OK
)
11445 /** Handle response from dongle on a 'flow ring create' request */
11447 dhd_bus_flow_ring_create_response(dhd_bus_t
*bus
, uint16 flowid
, int32 status
)
11449 flow_ring_node_t
*flow_ring_node
;
11450 unsigned long flags
;
11452 DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__
, flowid
));
11454 /* Boundary check of the flowid */
11455 if (flowid
> bus
->dhd
->max_tx_flowid
) {
11456 DHD_ERROR(("%s: flowid is invalid %d, max id %d\n", __FUNCTION__
,
11457 flowid
, bus
->dhd
->max_tx_flowid
));
11461 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flowid
);
11462 if (!flow_ring_node
) {
11463 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__
));
11467 ASSERT(flow_ring_node
->flowid
== flowid
);
11468 if (flow_ring_node
->flowid
!= flowid
) {
11469 DHD_ERROR(("%s: flowid %d is different from the flowid "
11470 "of the flow_ring_node %d\n", __FUNCTION__
, flowid
,
11471 flow_ring_node
->flowid
));
11475 if (status
!= BCME_OK
) {
11476 DHD_ERROR(("%s Flow create Response failure error status = %d \n",
11477 __FUNCTION__
, status
));
11478 /* Call Flow clean up */
11479 dhd_bus_clean_flow_ring(bus
, flow_ring_node
);
11483 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
11484 flow_ring_node
->status
= FLOW_RING_STATUS_OPEN
;
11485 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
11487 /* Now add the Flow ring node into the active list
11488 * Note that this code to add the newly created node to the active
11489 * list was living in dhd_flowid_lookup. But note that after
11490 * adding the node to the active list the contents of node is being
11491 * filled in dhd_prot_flow_ring_create.
11492 * If there is a D2H interrupt after the node gets added to the
11493 * active list and before the node gets populated with values
11494 * from the Bottom half dhd_update_txflowrings would be called.
11495 * which will then try to walk through the active flow ring list,
11496 * pickup the nodes and operate on them. Now note that since
11497 * the function dhd_prot_flow_ring_create is not finished yet
11498 * the contents of flow_ring_node can still be NULL leading to
11499 * crashes. Hence the flow_ring_node should be added to the
11500 * active list only after its truely created, which is after
11501 * receiving the create response message from the Host.
11503 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
11504 dll_prepend(&bus
->flowring_active_list
, &flow_ring_node
->list
);
11505 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
11507 dhd_bus_schedule_queue(bus
, flowid
, FALSE
); /* from queue to flowring */
11513 dhd_bus_flow_ring_delete_request(dhd_bus_t
*bus
, void *arg
)
11516 flow_queue_t
*queue
;
11517 flow_ring_node_t
*flow_ring_node
;
11518 unsigned long flags
;
11520 DHD_INFO(("%s :Flow Delete\n", __FUNCTION__
));
11522 flow_ring_node
= (flow_ring_node_t
*)arg
;
11524 #ifdef DHDTCPACK_SUPPRESS
11525 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
11526 * when there is a newly coming packet from network stack.
11528 dhd_tcpack_info_tbl_clean(bus
->dhd
);
11529 #endif /* DHDTCPACK_SUPPRESS */
11530 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
11531 if (flow_ring_node
->status
== FLOW_RING_STATUS_DELETE_PENDING
) {
11532 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
11533 DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__
, flow_ring_node
->flowid
));
11536 flow_ring_node
->status
= FLOW_RING_STATUS_DELETE_PENDING
;
11538 queue
= &flow_ring_node
->queue
; /* queue associated with flow ring */
11540 /* Flush all pending packets in the queue, if any */
11541 while ((pkt
= dhd_flow_queue_dequeue(bus
->dhd
, queue
)) != NULL
) {
11542 PKTFREE(bus
->dhd
->osh
, pkt
, TRUE
);
11544 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue
));
11546 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
11548 /* Send Msg to device about flow ring deletion */
11549 dhd_prot_flow_ring_delete(bus
->dhd
, flow_ring_node
);
11555 dhd_bus_flow_ring_delete_response(dhd_bus_t
*bus
, uint16 flowid
, uint32 status
)
11557 flow_ring_node_t
*flow_ring_node
;
11559 DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__
, flowid
));
11561 /* Boundary check of the flowid */
11562 if (flowid
> bus
->dhd
->max_tx_flowid
) {
11563 DHD_ERROR(("%s: flowid is invalid %d, max id %d\n", __FUNCTION__
,
11564 flowid
, bus
->dhd
->max_tx_flowid
));
11568 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flowid
);
11569 if (!flow_ring_node
) {
11570 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__
));
11574 ASSERT(flow_ring_node
->flowid
== flowid
);
11575 if (flow_ring_node
->flowid
!= flowid
) {
11576 DHD_ERROR(("%s: flowid %d is different from the flowid "
11577 "of the flow_ring_node %d\n", __FUNCTION__
, flowid
,
11578 flow_ring_node
->flowid
));
11582 if (status
!= BCME_OK
) {
11583 DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
11584 __FUNCTION__
, status
));
11587 /* Call Flow clean up */
11588 dhd_bus_clean_flow_ring(bus
, flow_ring_node
);
11594 int dhd_bus_flow_ring_flush_request(dhd_bus_t
*bus
, void *arg
)
11597 flow_queue_t
*queue
;
11598 flow_ring_node_t
*flow_ring_node
;
11599 unsigned long flags
;
11601 DHD_INFO(("%s :Flow Flush\n", __FUNCTION__
));
11603 flow_ring_node
= (flow_ring_node_t
*)arg
;
11605 DHD_FLOWRING_LOCK(flow_ring_node
->lock
, flags
);
11606 queue
= &flow_ring_node
->queue
; /* queue associated with flow ring */
11607 /* Flow ring status will be set back to FLOW_RING_STATUS_OPEN
11608 * once flow ring flush response is received for this flowring node.
11610 flow_ring_node
->status
= FLOW_RING_STATUS_FLUSH_PENDING
;
11612 #ifdef DHDTCPACK_SUPPRESS
11613 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
11614 * when there is a newly coming packet from network stack.
11616 dhd_tcpack_info_tbl_clean(bus
->dhd
);
11617 #endif /* DHDTCPACK_SUPPRESS */
11619 /* Flush all pending packets in the queue, if any */
11620 while ((pkt
= dhd_flow_queue_dequeue(bus
->dhd
, queue
)) != NULL
) {
11621 PKTFREE(bus
->dhd
->osh
, pkt
, TRUE
);
11623 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue
));
11625 DHD_FLOWRING_UNLOCK(flow_ring_node
->lock
, flags
);
11627 /* Send Msg to device about flow ring flush */
11628 dhd_prot_flow_ring_flush(bus
->dhd
, flow_ring_node
);
11634 dhd_bus_flow_ring_flush_response(dhd_bus_t
*bus
, uint16 flowid
, uint32 status
)
11636 flow_ring_node_t
*flow_ring_node
;
11638 if (status
!= BCME_OK
) {
11639 DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
11640 __FUNCTION__
, status
));
11644 /* Boundary check of the flowid */
11645 if (flowid
> bus
->dhd
->max_tx_flowid
) {
11646 DHD_ERROR(("%s: flowid is invalid %d, max id %d\n", __FUNCTION__
,
11647 flowid
, bus
->dhd
->max_tx_flowid
));
11651 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flowid
);
11652 if (!flow_ring_node
) {
11653 DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__
));
11657 ASSERT(flow_ring_node
->flowid
== flowid
);
11658 if (flow_ring_node
->flowid
!= flowid
) {
11659 DHD_ERROR(("%s: flowid %d is different from the flowid "
11660 "of the flow_ring_node %d\n", __FUNCTION__
, flowid
,
11661 flow_ring_node
->flowid
));
11665 flow_ring_node
->status
= FLOW_RING_STATUS_OPEN
;
11670 dhd_bus_max_h2d_queues(struct dhd_bus
*bus
)
11672 return bus
->max_submission_rings
;
11675 /* To be symmetric with SDIO */
11677 dhd_bus_pktq_flush(dhd_pub_t
*dhdp
)
11683 dhd_bus_set_linkdown(dhd_pub_t
*dhdp
, bool val
)
11685 dhdp
->bus
->is_linkdown
= val
;
11689 dhd_bus_get_linkdown(dhd_pub_t
*dhdp
)
11691 return dhdp
->bus
->is_linkdown
;
11695 dhd_bus_get_cto(dhd_pub_t
*dhdp
)
11697 return dhdp
->bus
->cto_triggered
;
11700 #ifdef IDLE_TX_FLOW_MGMT
11701 /* resume request */
11703 dhd_bus_flow_ring_resume_request(dhd_bus_t
*bus
, void *arg
)
11705 flow_ring_node_t
*flow_ring_node
= (flow_ring_node_t
*)arg
;
11707 DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__
, flow_ring_node
->flowid
));
11709 flow_ring_node
->status
= FLOW_RING_STATUS_RESUME_PENDING
;
11711 /* Send Msg to device about flow ring resume */
11712 dhd_prot_flow_ring_resume(bus
->dhd
, flow_ring_node
);
11717 /* add the node back to active flowring */
11719 dhd_bus_flow_ring_resume_response(dhd_bus_t
*bus
, uint16 flowid
, int32 status
)
11722 flow_ring_node_t
*flow_ring_node
;
11724 DHD_TRACE(("%s :flowid %d \n", __FUNCTION__
, flowid
));
11726 flow_ring_node
= DHD_FLOW_RING(bus
->dhd
, flowid
);
11727 ASSERT(flow_ring_node
->flowid
== flowid
);
11729 if (status
!= BCME_OK
) {
11730 DHD_ERROR(("%s Error Status = %d \n",
11731 __FUNCTION__
, status
));
11735 DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n",
11736 __FUNCTION__
, flow_ring_node
->flowid
, flow_ring_node
->queue
.len
));
11738 flow_ring_node
->status
= FLOW_RING_STATUS_OPEN
;
11740 dhd_bus_schedule_queue(bus
, flowid
, FALSE
);
11744 /* scan the flow rings in active list for idle time out */
11746 dhd_bus_check_idle_scan(dhd_bus_t
*bus
)
11748 uint64 time_stamp
; /* in millisec */
11751 time_stamp
= OSL_SYSUPTIME();
11752 diff
= time_stamp
- bus
->active_list_last_process_ts
;
11754 if (diff
> IDLE_FLOW_LIST_TIMEOUT
) {
11755 dhd_bus_idle_scan(bus
);
11756 bus
->active_list_last_process_ts
= OSL_SYSUPTIME();
11762 /* scan the nodes in active list till it finds a non idle node */
11764 dhd_bus_idle_scan(dhd_bus_t
*bus
)
11766 dll_t
*item
, *prev
;
11767 flow_ring_node_t
*flow_ring_node
;
11768 uint64 time_stamp
, diff
;
11769 unsigned long flags
;
11770 uint16 ringid
[MAX_SUSPEND_REQ
];
11773 time_stamp
= OSL_SYSUPTIME();
11774 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
11776 for (item
= dll_tail_p(&bus
->flowring_active_list
);
11777 !dll_end(&bus
->flowring_active_list
, item
); item
= prev
) {
11778 prev
= dll_prev_p(item
);
11780 flow_ring_node
= dhd_constlist_to_flowring(item
);
11782 if (flow_ring_node
->flowid
== (bus
->max_submission_rings
- 1))
11785 if (flow_ring_node
->status
!= FLOW_RING_STATUS_OPEN
) {
11786 /* Takes care of deleting zombie rings */
11787 /* delete from the active list */
11788 DHD_INFO(("deleting flow id %u from active list\n",
11789 flow_ring_node
->flowid
));
11790 __dhd_flow_ring_delete_from_active_list(bus
, flow_ring_node
);
11794 diff
= time_stamp
- flow_ring_node
->last_active_ts
;
11796 if ((diff
> IDLE_FLOW_RING_TIMEOUT
) && !(flow_ring_node
->queue
.len
)) {
11797 DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node
->flowid
));
11798 /* delete from the active list */
11799 __dhd_flow_ring_delete_from_active_list(bus
, flow_ring_node
);
11800 flow_ring_node
->status
= FLOW_RING_STATUS_SUSPENDED
;
11801 ringid
[count
] = flow_ring_node
->flowid
;
11803 if (count
== MAX_SUSPEND_REQ
) {
11804 /* create a batch message now!! */
11805 dhd_prot_flow_ring_batch_suspend_request(bus
->dhd
, ringid
, count
);
11811 /* No more scanning, break from here! */
11817 dhd_prot_flow_ring_batch_suspend_request(bus
->dhd
, ringid
, count
);
11820 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
11825 void dhd_flow_ring_move_to_active_list_head(struct dhd_bus
*bus
, flow_ring_node_t
*flow_ring_node
)
11827 unsigned long flags
;
11830 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
11831 /* check if the node is already at head, otherwise delete it and prepend */
11832 list
= dll_head_p(&bus
->flowring_active_list
);
11833 if (&flow_ring_node
->list
!= list
) {
11834 dll_delete(&flow_ring_node
->list
);
11835 dll_prepend(&bus
->flowring_active_list
, &flow_ring_node
->list
);
11838 /* update flow ring timestamp */
11839 flow_ring_node
->last_active_ts
= OSL_SYSUPTIME();
11841 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
11846 void dhd_flow_ring_add_to_active_list(struct dhd_bus
*bus
, flow_ring_node_t
*flow_ring_node
)
11848 unsigned long flags
;
11850 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
11852 dll_prepend(&bus
->flowring_active_list
, &flow_ring_node
->list
);
11853 /* update flow ring timestamp */
11854 flow_ring_node
->last_active_ts
= OSL_SYSUPTIME();
11856 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
11860 void __dhd_flow_ring_delete_from_active_list(struct dhd_bus
*bus
, flow_ring_node_t
*flow_ring_node
)
11862 dll_delete(&flow_ring_node
->list
);
11865 void dhd_flow_ring_delete_from_active_list(struct dhd_bus
*bus
, flow_ring_node_t
*flow_ring_node
)
11867 unsigned long flags
;
11869 DHD_FLOWRING_LIST_LOCK(bus
->dhd
->flowring_list_lock
, flags
);
11871 __dhd_flow_ring_delete_from_active_list(bus
, flow_ring_node
);
11873 DHD_FLOWRING_LIST_UNLOCK(bus
->dhd
->flowring_list_lock
, flags
);
11877 #endif /* IDLE_TX_FLOW_MGMT */
11880 dhdpcie_bus_start_host_dev(struct dhd_bus
*bus
)
11882 return dhdpcie_start_host_dev(bus
);
11886 dhdpcie_bus_stop_host_dev(struct dhd_bus
*bus
)
11888 return dhdpcie_stop_host_dev(bus
);
11892 dhdpcie_bus_disable_device(struct dhd_bus
*bus
)
11894 return dhdpcie_disable_device(bus
);
11898 dhdpcie_bus_enable_device(struct dhd_bus
*bus
)
11900 return dhdpcie_enable_device(bus
);
11904 dhdpcie_bus_alloc_resource(struct dhd_bus
*bus
)
11906 return dhdpcie_alloc_resource(bus
);
11910 dhdpcie_bus_free_resource(struct dhd_bus
*bus
)
11912 dhdpcie_free_resource(bus
);
11916 dhd_bus_request_irq(struct dhd_bus
*bus
)
11918 return dhdpcie_bus_request_irq(bus
);
11922 dhdpcie_bus_dongle_attach(struct dhd_bus
*bus
)
11924 return dhdpcie_dongle_attach(bus
);
11928 dhd_bus_release_dongle(struct dhd_bus
*bus
)
11930 bool dongle_isolation
;
11933 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
11940 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
11942 #endif /* DEBUGGER || DHD_DSCOPE */
11944 dongle_isolation
= bus
->dhd
->dongle_isolation
;
11945 dhdpcie_bus_release_dongle(bus
, osh
, dongle_isolation
, TRUE
);
11953 dhdpcie_cto_cfg_init(struct dhd_bus
*bus
, bool enable
)
11956 dhdpcie_bus_cfg_write_dword(bus
, PCI_INT_MASK
, 4,
11957 PCI_CTO_INT_MASK
| PCI_SBIM_MASK_SERR
);
11959 dhdpcie_bus_cfg_write_dword(bus
, PCI_INT_MASK
, 4, 0);
11965 dhdpcie_cto_init(struct dhd_bus
*bus
, bool enable
)
11967 volatile void *regsva
= (volatile void *)bus
->regs
;
11969 uint16 chipid
= dhd_get_chipid(bus
);
11972 bus
->cto_enable
= enable
;
11974 dhdpcie_cto_cfg_init(bus
, enable
);
11977 if (bus
->cto_threshold
== 0) {
11978 if ((chipid
== BCM4387_CHIP_ID
) ||
11979 (chipid
== BCM4388_CHIP_ID
) ||
11980 (chipid
== BCM4389_CHIP_ID
)) {
11981 bus
->cto_threshold
= PCIE_CTO_TO_THRESH_DEFAULT_REV69
;
11983 bus
->cto_threshold
= PCIE_CTO_TO_THRESH_DEFAULT
;
11986 val
= ((bus
->cto_threshold
<< PCIE_CTO_TO_THRESHOLD_SHIFT
) &
11987 PCIE_CTO_TO_THRESHHOLD_MASK
) |
11988 ((PCIE_CTO_CLKCHKCNT_VAL
<< PCIE_CTO_CLKCHKCNT_SHIFT
) &
11989 PCIE_CTO_CLKCHKCNT_MASK
) |
11990 PCIE_CTO_ENAB_MASK
;
11992 pcie_corereg(bus
->osh
, regsva
, OFFSETOF(sbpcieregs_t
, ctoctrl
), ~0, val
);
11994 pcie_corereg(bus
->osh
, regsva
, OFFSETOF(sbpcieregs_t
, ctoctrl
), ~0, 0);
11997 ctoctrl
= pcie_corereg(bus
->osh
, regsva
, OFFSETOF(sbpcieregs_t
, ctoctrl
), 0, 0);
11999 DHD_ERROR(("%s: ctoctrl(0x%x) enable/disable %d for chipid(0x%x)\n",
12000 __FUNCTION__
, ctoctrl
, bus
->cto_enable
, chipid
));
12006 dhdpcie_cto_error_recovery(struct dhd_bus
*bus
)
12008 uint32 pci_intmask
, err_status
;
12012 pci_intmask
= dhdpcie_bus_cfg_read_dword(bus
, PCI_INT_MASK
, 4);
12013 dhdpcie_bus_cfg_write_dword(bus
, PCI_INT_MASK
, 4, pci_intmask
& ~PCI_CTO_INT_MASK
);
12015 DHD_OS_WAKE_LOCK(bus
->dhd
);
12017 DHD_ERROR(("--- CTO Triggered --- %d\n", bus
->pwr_req_ref
));
12020 * DAR still accessible
12022 dhd_bus_dump_dar_registers(bus
);
12024 /* reset backplane */
12025 val
= dhdpcie_bus_cfg_read_dword(bus
, PCI_SPROM_CONTROL
, 4);
12026 dhdpcie_bus_cfg_write_dword(bus
, PCI_SPROM_CONTROL
, 4, val
| SPROM_CFG_TO_SB_RST
);
12028 /* clear timeout error */
12030 err_status
= si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
12031 DAR_ERRLOG(bus
->sih
->buscorerev
),
12033 if (err_status
& PCIE_CTO_ERR_MASK
) {
12034 si_corereg(bus
->sih
, bus
->sih
->buscoreidx
,
12035 DAR_ERRLOG(bus
->sih
->buscorerev
),
12036 ~0, PCIE_CTO_ERR_MASK
);
12040 OSL_DELAY(CTO_TO_CLEAR_WAIT_MS
* 1000);
12042 if (i
> CTO_TO_CLEAR_WAIT_MAX_CNT
) {
12043 DHD_ERROR(("cto recovery fail\n"));
12045 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
12050 /* clear interrupt status */
12051 dhdpcie_bus_cfg_write_dword(bus
, PCI_INT_STATUS
, 4, PCI_CTO_INT_MASK
);
12053 /* Halt ARM & remove reset */
12054 /* TBD : we can add ARM Halt here in case */
12056 /* reset SPROM_CFG_TO_SB_RST */
12057 val
= dhdpcie_bus_cfg_read_dword(bus
, PCI_SPROM_CONTROL
, 4);
12059 DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
12060 PCI_SPROM_CONTROL
, SPROM_CFG_TO_SB_RST
, val
));
12061 dhdpcie_bus_cfg_write_dword(bus
, PCI_SPROM_CONTROL
, 4, val
& ~SPROM_CFG_TO_SB_RST
);
12063 val
= dhdpcie_bus_cfg_read_dword(bus
, PCI_SPROM_CONTROL
, 4);
12064 DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
12065 PCI_SPROM_CONTROL
, SPROM_CFG_TO_SB_RST
, val
));
12067 DHD_OS_WAKE_UNLOCK(bus
->dhd
);
12073 dhdpcie_ssreset_dis_enum_rst(struct dhd_bus
*bus
)
12077 val
= dhdpcie_bus_cfg_read_dword(bus
, PCIE_CFG_SUBSYSTEM_CONTROL
, 4);
12078 dhdpcie_bus_cfg_write_dword(bus
, PCIE_CFG_SUBSYSTEM_CONTROL
, 4,
12079 val
| (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT
));
12082 #if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
12084 * XXX: WAR: Update dongle that driver supports sending of d11
12085 * tx_status through unused status field of PCIe completion header
12086 * if dongle also supports the same WAR.
12089 dhdpcie_init_d11status(struct dhd_bus
*bus
)
12095 if (bus
->pcie_sh
->flags2
& PCIE_SHARED2_D2H_D11_TX_STATUS
) {
12096 flags2
= bus
->pcie_sh
->flags2
;
12097 addr
= bus
->shared_addr
+ OFFSETOF(pciedev_shared_t
, flags2
);
12098 flags2
|= PCIE_SHARED2_H2D_D11_TX_STATUS
;
12099 ret
= dhdpcie_bus_membytes(bus
, TRUE
, addr
,
12100 (uint8
*)&flags2
, sizeof(flags2
));
12102 DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
12106 bus
->pcie_sh
->flags2
= flags2
;
12107 bus
->dhd
->d11_tx_status
= TRUE
;
12114 dhdpcie_init_d11status(struct dhd_bus
*bus
)
12118 #endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
12121 dhd_bus_oob_intr_register(dhd_pub_t
*dhdp
)
12124 #ifdef BCMPCIE_OOB_HOST_WAKE
12125 err
= dhdpcie_oob_intr_register(dhdp
->bus
);
12126 #endif /* BCMPCIE_OOB_HOST_WAKE */
12131 dhd_bus_oob_intr_unregister(dhd_pub_t
*dhdp
)
12133 #ifdef BCMPCIE_OOB_HOST_WAKE
12134 dhdpcie_oob_intr_unregister(dhdp
->bus
);
12135 #endif /* BCMPCIE_OOB_HOST_WAKE */
12139 dhd_bus_oob_intr_set(dhd_pub_t
*dhdp
, bool enable
)
12141 #ifdef BCMPCIE_OOB_HOST_WAKE
12142 dhdpcie_oob_intr_set(dhdp
->bus
, enable
);
12143 #endif /* BCMPCIE_OOB_HOST_WAKE */
12147 dhd_bus_get_oob_irq_num(dhd_pub_t
*dhdp
)
12150 #ifdef BCMPCIE_OOB_HOST_WAKE
12151 irq_num
= dhdpcie_get_oob_irq_num(dhdp
->bus
);
12152 #endif /* BCMPCIE_OOB_HOST_WAKE */
12157 dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t
*bus
)
12159 return bus
->dhd
->d2h_hostrdy_supported
;
12163 dhd_pcie_dump_core_regs(dhd_pub_t
* pub
, uint32 index
, uint32 first_addr
, uint32 last_addr
)
12165 dhd_bus_t
*bus
= pub
->bus
;
12166 uint32 coreoffset
= index
<< 12;
12167 uint32 core_addr
= SI_ENUM_BASE(bus
->sih
) + coreoffset
;
12170 while (first_addr
<= last_addr
) {
12171 core_addr
= SI_ENUM_BASE(bus
->sih
) + coreoffset
+ first_addr
;
12172 if (serialized_backplane_access(bus
, core_addr
, 4, &value
, TRUE
) != BCME_OK
) {
12173 DHD_ERROR(("Invalid size/addr combination \n"));
12175 DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr
, value
));
12176 first_addr
= first_addr
+ 4;
12181 dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t
*bus
)
12185 } else if (bus
->hwa_enabled
) {
12186 return bus
->dhd
->hwa_capable
;
12193 dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t
*bus
)
12197 else if (bus
->idma_enabled
) {
12198 return bus
->dhd
->idma_enable
;
12205 dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t
*bus
)
12209 else if (bus
->ifrm_enabled
) {
12210 return bus
->dhd
->ifrm_enable
;
12217 dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t
*bus
)
12221 } else if (bus
->dar_enabled
) {
12222 return bus
->dhd
->dar_enable
;
12229 dhdpcie_bus_enab_pcie_dw(dhd_bus_t
*bus
, uint8 dw_option
)
12231 DHD_ERROR(("ENABLING DW:%d\n", dw_option
));
12232 bus
->dw_option
= dw_option
;
12237 dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t
*bus
)
12241 if (bus
->inb_enabled
) {
12242 return bus
->dhd
->d2h_inband_dw
;
12249 dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t
*bus
, enum dhd_bus_ds_state state
)
12251 if (!INBAND_DW_ENAB(bus
))
12254 DHD_INFO(("%s:%d\n", __FUNCTION__
, state
));
12255 bus
->dhd
->ds_state
= state
;
12256 if (state
== DW_DEVICE_DS_DISABLED_WAIT
|| state
== DW_DEVICE_DS_D3_INFORM_WAIT
) {
12257 bus
->ds_exit_timeout
= 100;
12259 if (state
== DW_DEVICE_HOST_WAKE_WAIT
) {
12260 bus
->host_sleep_exit_timeout
= 100;
12262 if (state
== DW_DEVICE_DS_DEV_WAKE
) {
12263 bus
->ds_exit_timeout
= 0;
12265 if (state
== DW_DEVICE_DS_ACTIVE
) {
12266 bus
->host_sleep_exit_timeout
= 0;
12270 enum dhd_bus_ds_state
12271 dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t
*bus
)
12273 if (!INBAND_DW_ENAB(bus
))
12274 return DW_DEVICE_DS_INVALID
;
12275 return bus
->dhd
->ds_state
;
12277 #endif /* PCIE_INB_DW */
12281 dhd_bus_ds_trace(dhd_bus_t
*bus
, uint32 dsval
, bool d2h
, enum dhd_bus_ds_state inbstate
)
12283 dhd_bus_ds_trace(dhd_bus_t
*bus
, uint32 dsval
, bool d2h
)
12284 #endif /* PCIE_INB_DW */
12286 uint32 cnt
= bus
->ds_trace_count
% MAX_DS_TRACE_SIZE
;
12288 bus
->ds_trace
[cnt
].timestamp
= OSL_LOCALTIME_NS();
12289 bus
->ds_trace
[cnt
].d2h
= d2h
;
12290 bus
->ds_trace
[cnt
].dsval
= dsval
;
12292 bus
->ds_trace
[cnt
].inbstate
= inbstate
;
12293 #endif /* PCIE_INB_DW */
12294 bus
->ds_trace_count
++;
12299 dhd_convert_dsval(uint32 val
, bool d2h
)
12303 case D2H_DEV_D3_ACK
:
12304 return "D2H_DEV_D3_ACK";
12305 case D2H_DEV_DS_ENTER_REQ
:
12306 return "D2H_DEV_DS_ENTER_REQ";
12307 case D2H_DEV_DS_EXIT_NOTE
:
12308 return "D2H_DEV_DS_EXIT_NOTE";
12309 case D2H_DEV_FWHALT
:
12310 return "D2H_DEV_FWHALT";
12311 case D2HMB_DS_HOST_SLEEP_EXIT_ACK
:
12312 return "D2HMB_DS_HOST_SLEEP_EXIT_ACK";
12318 case H2DMB_DS_DEVICE_WAKE_DEASSERT
:
12319 return "H2DMB_DS_DEVICE_WAKE_DEASSERT";
12320 case H2DMB_DS_DEVICE_WAKE_ASSERT
:
12321 return "H2DMB_DS_DEVICE_WAKE_ASSERT";
12322 case H2D_HOST_D3_INFORM
:
12323 return "H2D_HOST_D3_INFORM";
12324 case H2D_HOST_DS_ACK
:
12325 return "H2D_HOST_DS_ACK";
12326 case H2D_HOST_DS_NAK
:
12327 return "H2D_HOST_DS_NAK";
12328 case H2D_HOST_CONS_INT
:
12329 return "H2D_HOST_CONS_INT";
12331 return "H2D_FW_TRAP";
12339 dhd_convert_inb_state_names(enum dhd_bus_ds_state inbstate
)
12341 switch (inbstate
) {
12342 case DW_DEVICE_DS_DEV_SLEEP
:
12343 return "DW_DEVICE_DS_DEV_SLEEP";
12345 case DW_DEVICE_DS_DISABLED_WAIT
:
12346 return "DW_DEVICE_DS_DISABLED_WAIT";
12348 case DW_DEVICE_DS_DEV_WAKE
:
12349 return "DW_DEVICE_DS_DEV_WAKE";
12351 case DW_DEVICE_DS_ACTIVE
:
12352 return "DW_DEVICE_DS_ACTIVE";
12354 case DW_DEVICE_HOST_SLEEP_WAIT
:
12355 return "DW_DEVICE_HOST_SLEEP_WAIT";
12357 case DW_DEVICE_HOST_SLEEP
:
12358 return "DW_DEVICE_HOST_SLEEP";
12360 case DW_DEVICE_HOST_WAKE_WAIT
:
12361 return "DW_DEVICE_HOST_WAKE_WAIT";
12363 case DW_DEVICE_DS_D3_INFORM_WAIT
:
12364 return "DW_DEVICE_DS_D3_INFORM_WAIT";
12370 #endif /* PCIE_INB_DW */
12373 dhd_dump_bus_ds_trace(dhd_bus_t
*bus
, struct bcmstrbuf
*strbuf
)
12378 dumpsz
= bus
->ds_trace_count
< MAX_DS_TRACE_SIZE
?
12379 bus
->ds_trace_count
: MAX_DS_TRACE_SIZE
;
12381 bcm_bprintf(strbuf
, "\nEmpty DS TRACE\n");
12384 bcm_bprintf(strbuf
, "---- DS TRACE ------\n");
12386 bcm_bprintf(strbuf
, "%s\t\t%s\t%-30s\t\t%s\n",
12387 "Timestamp us", "Dir", "Value", "Inband-State");
12388 for (i
= 0; i
< dumpsz
; i
++) {
12389 bcm_bprintf(strbuf
, "%llu\t%s\t%-30s\t\t%s\n",
12390 bus
->ds_trace
[i
].timestamp
,
12391 bus
->ds_trace
[i
].d2h
? "D2H":"H2D",
12392 dhd_convert_dsval(bus
->ds_trace
[i
].dsval
, bus
->ds_trace
[i
].d2h
),
12393 dhd_convert_inb_state_names(bus
->ds_trace
[i
].inbstate
));
12396 bcm_bprintf(strbuf
, "Timestamp us\t\tDir\tValue\n");
12397 for (i
= 0; i
< dumpsz
; i
++) {
12398 bcm_bprintf(strbuf
, "%llu\t%s\t%d\n",
12399 bus
->ds_trace
[i
].timestamp
,
12400 bus
->ds_trace
[i
].d2h
? "D2H":"H2D",
12401 bus
->ds_trace
[i
].dsval
);
12403 #endif /* PCIE_INB_DW */
12404 bcm_bprintf(strbuf
, "--------------------------\n");
12408 dhd_bus_dump_trap_info(dhd_bus_t
*bus
, struct bcmstrbuf
*strbuf
)
12410 trap_t
*tr
= &bus
->dhd
->last_trap_info
;
12411 bcm_bprintf(strbuf
,
12412 "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
12413 " lp 0x%x, rpc 0x%x"
12414 "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
12415 "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, "
12416 "r10 0x%x, r11 0x%x, r12 0x%x\n\n",
12417 ltoh32(tr
->type
), ltoh32(tr
->epc
), ltoh32(tr
->cpsr
), ltoh32(tr
->spsr
),
12418 ltoh32(tr
->r13
), ltoh32(tr
->r14
), ltoh32(tr
->pc
),
12419 ltoh32(bus
->pcie_sh
->trap_addr
),
12420 ltoh32(tr
->r0
), ltoh32(tr
->r1
), ltoh32(tr
->r2
), ltoh32(tr
->r3
),
12421 ltoh32(tr
->r4
), ltoh32(tr
->r5
), ltoh32(tr
->r6
), ltoh32(tr
->r7
),
12422 ltoh32(tr
->r8
), ltoh32(tr
->r9
), ltoh32(tr
->r10
),
12423 ltoh32(tr
->r11
), ltoh32(tr
->r12
));
12427 dhd_bus_readwrite_bp_addr(dhd_pub_t
*dhdp
, uint addr
, uint size
, uint
* data
, bool read
)
12430 struct dhd_bus
*bus
= dhdp
->bus
;
12432 if (serialized_backplane_access(bus
, addr
, size
, data
, read
) != BCME_OK
) {
12433 DHD_ERROR(("Invalid size/addr combination \n"));
12434 bcmerror
= BCME_ERROR
;
12441 dhd_get_idletime(dhd_pub_t
*dhd
)
12443 return dhd
->bus
->idletime
;
12447 dhd_get_rpm_state(dhd_pub_t
*dhd
)
12449 return dhd
->bus
->rpm_enabled
;
12453 dhd_set_rpm_state(dhd_pub_t
*dhd
, bool state
)
12455 DHD_ERROR(("%s: %d\n", __FUNCTION__
, state
));
12456 dhd
->bus
->rpm_enabled
= state
;
12460 dhd_sbreg_op(dhd_pub_t
*dhd
, uint addr
, uint
*val
, bool read
)
12463 if (serialized_backplane_access(dhd
->bus
, addr
, sizeof(uint
), val
, read
) != BCME_OK
) {
12464 DHD_ERROR(("sbreg: Invalid uint addr: 0x%x \n", addr
));
12466 DHD_ERROR(("sbreg: addr:0x%x val:0x%x read:%d\n", addr
, *val
, read
));
12471 #ifdef DHD_SSSR_DUMP
12473 dhdpcie_get_sssr_fifo_dump(dhd_pub_t
*dhd
, uint
*buf
, uint fifo_size
,
12474 uint addr_reg
, uint data_reg
)
12480 DHD_ERROR(("%s\n", __FUNCTION__
));
12483 DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__
));
12488 DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__
));
12492 /* Set the base address offset to 0 */
12495 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
12498 /* Read 4 bytes at once and loop for fifo_size / 4 */
12499 for (i
= 0; i
< fifo_size
/ 4; i
++) {
12500 if (serialized_backplane_access(dhd
->bus
, addr
,
12501 sizeof(uint
), &val
, TRUE
) != BCME_OK
) {
12502 DHD_ERROR(("%s: error in serialized_backplane_access\n", __FUNCTION__
));
12512 dhdpcie_get_sssr_dig_dump(dhd_pub_t
*dhd
, uint
*buf
, uint fifo_size
,
12518 si_t
*sih
= dhd
->bus
->sih
;
12519 bool vasip_enab
, dig_mem_check
;
12520 uint32 ioctrl_addr
= 0;
12522 DHD_ERROR(("%s addr_reg=0x%x size=0x%x\n", __FUNCTION__
, addr_reg
, fifo_size
));
12525 DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__
));
12530 DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__
));
12534 vasip_enab
= FALSE
;
12535 dig_mem_check
= FALSE
;
12536 /* SSSR register information structure v0 and v1 shares most except dig_mem */
12537 switch (dhd
->sssr_reg_info
->rev2
.version
) {
12538 case SSSR_REG_INFO_VER_3
:
12539 /* intentional fall through */
12540 case SSSR_REG_INFO_VER_2
:
12541 if ((dhd
->sssr_reg_info
->rev2
.length
> OFFSETOF(sssr_reg_info_v2_t
,
12542 dig_mem_info
)) && dhd
->sssr_reg_info
->rev2
.dig_mem_info
.dig_sr_size
) {
12543 dig_mem_check
= TRUE
;
12546 case SSSR_REG_INFO_VER_1
:
12547 if (dhd
->sssr_reg_info
->rev1
.vasip_regs
.vasip_sr_size
) {
12549 } else if ((dhd
->sssr_reg_info
->rev1
.length
> OFFSETOF(sssr_reg_info_v1_t
,
12550 dig_mem_info
)) && dhd
->sssr_reg_info
->rev1
.
12551 dig_mem_info
.dig_sr_size
) {
12552 dig_mem_check
= TRUE
;
12554 ioctrl_addr
= dhd
->sssr_reg_info
->rev1
.vasip_regs
.wrapper_regs
.ioctrl
;
12556 case SSSR_REG_INFO_VER_0
:
12557 if (dhd
->sssr_reg_info
->rev0
.vasip_regs
.vasip_sr_size
) {
12560 ioctrl_addr
= dhd
->sssr_reg_info
->rev0
.vasip_regs
.wrapper_regs
.ioctrl
;
12563 DHD_ERROR(("invalid sssr_reg_ver"));
12564 return BCME_UNSUPPORTED
;
12567 DHD_ERROR(("dig_mem_check=%d vasip_enab=%d\n", dig_mem_check
, vasip_enab
));
12568 if (!vasip_enab
&& dig_mem_check
) {
12569 int err
= dhdpcie_bus_membytes(dhd
->bus
, FALSE
, addr_reg
, (uint8
*)buf
,
12571 if (err
!= BCME_OK
) {
12572 DHD_ERROR(("%s: Error reading dig dump from dongle !\n",
12576 /* Check if vasip clk is disabled, if yes enable it */
12577 addr
= ioctrl_addr
;
12578 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
12581 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
12585 /* Read 4 bytes at once and loop for fifo_size / 4 */
12586 for (i
= 0; i
< fifo_size
/ 4; i
++, addr
+= 4) {
12587 if (serialized_backplane_access(dhd
->bus
, addr
, sizeof(uint
),
12588 &val
, TRUE
) != BCME_OK
) {
12589 DHD_ERROR(("%s: Invalid uint addr: 0x%x \n", __FUNCTION__
,
12599 uint chipc_corerev
;
12600 chipcregs_t
*chipcregs
;
12602 /* Save the current core */
12603 cur_coreid
= si_coreid(sih
);
12605 /* Switch to ChipC */
12606 chipcregs
= (chipcregs_t
*)si_setcore(sih
, CC_CORE_ID
, 0);
12608 DHD_ERROR(("%s: si_setcore returns NULL for core id %u \n",
12609 __FUNCTION__
, CC_CORE_ID
));
12613 chipc_corerev
= si_corerev(sih
);
12615 if ((chipc_corerev
== 64) || (chipc_corerev
== 65)) {
12616 W_REG(si_osh(sih
), &chipcregs
->sr_memrw_addr
, 0);
12618 /* Read 4 bytes at once and loop for fifo_size / 4 */
12619 for (i
= 0; i
< fifo_size
/ 4; i
++) {
12620 buf
[i
] = R_REG(si_osh(sih
), &chipcregs
->sr_memrw_data
);
12625 /* Switch back to the original core */
12626 si_setcore(sih
, cur_coreid
, 0);
12632 #if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
12634 dhdpcie_get_etd_preserve_logs(dhd_pub_t
*dhd
,
12635 uint8
*ext_trap_data
, void *event_decode_data
)
12637 hnd_ext_trap_hdr_t
*hdr
= NULL
;
12639 eventlog_trapdata_info_t
*etd_evtlog
= NULL
;
12640 eventlog_trap_buf_info_t
*evtlog_buf_arr
= NULL
;
12646 if (!ext_trap_data
|| !event_decode_data
|| !dhd
)
12649 if (!dhd
->concise_dbg_buf
)
12652 /* First word is original trap_data, skip */
12653 ext_trap_data
+= sizeof(uint32
);
12655 hdr
= (hnd_ext_trap_hdr_t
*)ext_trap_data
;
12656 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_LOG_DATA
);
12658 uint32 baseaddr
= 0;
12659 uint32 endaddr
= dhd
->bus
->dongle_ram_base
+ dhd
->bus
->ramsize
- 4;
12661 etd_evtlog
= (eventlog_trapdata_info_t
*)tlv
->data
;
12662 DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; "
12663 "seq_num=%x; log_arr_addr=%x\n", __FUNCTION__
,
12664 (etd_evtlog
->num_elements
),
12665 ntoh32(etd_evtlog
->seq_num
), (etd_evtlog
->log_arr_addr
)));
12666 if (!etd_evtlog
->num_elements
||
12667 etd_evtlog
->num_elements
> MAX_EVENTLOG_BUFFERS
) {
12668 DHD_ERROR(("%s: ETD has bad 'num_elements' !\n", __FUNCTION__
));
12671 if (!etd_evtlog
->log_arr_addr
) {
12672 DHD_ERROR(("%s: ETD has bad 'log_arr_addr' !\n", __FUNCTION__
));
12676 arr_size
= (uint32
)sizeof(*evtlog_buf_arr
) * (etd_evtlog
->num_elements
);
12677 evtlog_buf_arr
= MALLOCZ(dhd
->osh
, arr_size
);
12678 if (!evtlog_buf_arr
) {
12679 DHD_ERROR(("%s: out of memory !\n", __FUNCTION__
));
12683 /* boundary check */
12684 baseaddr
= etd_evtlog
->log_arr_addr
;
12685 if ((baseaddr
< dhd
->bus
->dongle_ram_base
) ||
12686 ((baseaddr
+ arr_size
) > endaddr
)) {
12687 DHD_ERROR(("%s: Error reading invalid address\n",
12692 /* read the eventlog_trap_buf_info_t array from dongle memory */
12693 err
= dhdpcie_bus_membytes(dhd
->bus
, FALSE
,
12694 (ulong
)(etd_evtlog
->log_arr_addr
),
12695 (uint8
*)evtlog_buf_arr
, arr_size
);
12696 if (err
!= BCME_OK
) {
12697 DHD_ERROR(("%s: Error reading event log array from dongle !\n",
12701 /* ntoh is required only for seq_num, because in the original
12702 * case of event logs from info ring, it is sent from dongle in that way
12703 * so for ETD also dongle follows same convention
12705 seqnum
= ntoh32(etd_evtlog
->seq_num
);
12706 memset(dhd
->concise_dbg_buf
, 0, CONCISE_DUMP_BUFLEN
);
12707 for (i
= 0; i
< (etd_evtlog
->num_elements
); ++i
) {
12708 /* boundary check */
12709 baseaddr
= evtlog_buf_arr
[i
].buf_addr
;
12710 if ((baseaddr
< dhd
->bus
->dongle_ram_base
) ||
12711 ((baseaddr
+ evtlog_buf_arr
[i
].len
) > endaddr
)) {
12712 DHD_ERROR(("%s: Error reading invalid address\n",
12716 /* read each individual event log buf from dongle memory */
12717 err
= dhdpcie_bus_membytes(dhd
->bus
, FALSE
,
12718 ((ulong
)evtlog_buf_arr
[i
].buf_addr
),
12719 dhd
->concise_dbg_buf
, (evtlog_buf_arr
[i
].len
));
12720 if (err
!= BCME_OK
) {
12721 DHD_ERROR(("%s: Error reading event log buffer from dongle !\n",
12725 dhd_dbg_msgtrace_log_parser(dhd
, dhd
->concise_dbg_buf
,
12726 event_decode_data
, (evtlog_buf_arr
[i
].len
),
12727 FALSE
, hton32(seqnum
));
12731 MFREE(dhd
->osh
, evtlog_buf_arr
, arr_size
);
12733 DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__
));
12736 #endif /* BCMPCIE && DHD_LOG_DUMP */
12739 dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t
*dhd
, uint32 reg_val
)
12743 uint powerctrl_mask
;
12745 DHD_ERROR(("%s\n", __FUNCTION__
));
12747 /* SSSR register information structure v0 and v1 shares most except dig_mem */
12748 switch (dhd
->sssr_reg_info
->rev2
.version
) {
12749 case SSSR_REG_INFO_VER_3
:
12750 /* intentional fall through */
12751 case SSSR_REG_INFO_VER_2
:
12752 addr
= dhd
->sssr_reg_info
->rev2
.chipcommon_regs
.base_regs
.powerctrl
;
12753 powerctrl_mask
= dhd
->sssr_reg_info
->rev2
.
12754 chipcommon_regs
.base_regs
.powerctrl_mask
;
12756 case SSSR_REG_INFO_VER_1
:
12757 case SSSR_REG_INFO_VER_0
:
12758 addr
= dhd
->sssr_reg_info
->rev1
.chipcommon_regs
.base_regs
.powerctrl
;
12759 powerctrl_mask
= dhd
->sssr_reg_info
->rev1
.
12760 chipcommon_regs
.base_regs
.powerctrl_mask
;
12763 DHD_ERROR(("invalid sssr_reg_ver"));
12764 return BCME_UNSUPPORTED
;
12767 /* conditionally clear bits [11:8] of PowerCtrl */
12768 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
12770 if (!(val
& powerctrl_mask
)) {
12771 dhd_sbreg_op(dhd
, addr
, ®_val
, FALSE
);
12777 dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t
*dhd
)
12780 uint val
= 0, reg_val
= 0;
12781 uint powerctrl_mask
;
12783 DHD_ERROR(("%s\n", __FUNCTION__
));
12785 /* SSSR register information structure v0 and v1 shares most except dig_mem */
12786 switch (dhd
->sssr_reg_info
->rev2
.version
) {
12787 case SSSR_REG_INFO_VER_3
:
12788 /* intentional fall through */
12789 case SSSR_REG_INFO_VER_2
:
12790 addr
= dhd
->sssr_reg_info
->rev2
.chipcommon_regs
.base_regs
.powerctrl
;
12791 powerctrl_mask
= dhd
->sssr_reg_info
->rev2
.
12792 chipcommon_regs
.base_regs
.powerctrl_mask
;
12794 case SSSR_REG_INFO_VER_1
:
12795 case SSSR_REG_INFO_VER_0
:
12796 addr
= dhd
->sssr_reg_info
->rev1
.chipcommon_regs
.base_regs
.powerctrl
;
12797 powerctrl_mask
= dhd
->sssr_reg_info
->rev1
.
12798 chipcommon_regs
.base_regs
.powerctrl_mask
;
12801 DHD_ERROR(("invalid sssr_reg_ver"));
12802 return BCME_UNSUPPORTED
;
12805 /* conditionally clear bits [11:8] of PowerCtrl */
12806 dhd_sbreg_op(dhd
, addr
, ®_val
, TRUE
);
12807 if (reg_val
& powerctrl_mask
) {
12809 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
12815 dhdpcie_clear_intmask_and_timer(dhd_pub_t
*dhd
)
12819 uint32 cc_intmask
, pmuintmask0
, pmuintmask1
, resreqtimer
, macresreqtimer
,
12820 macresreqtimer1
, vasip_sr_size
= 0;
12822 DHD_ERROR(("%s\n", __FUNCTION__
));
12824 /* SSSR register information structure v0 and v1 shares most except dig_mem */
12825 switch (dhd
->sssr_reg_info
->rev2
.version
) {
12826 case SSSR_REG_INFO_VER_3
:
12827 /* intentional fall through */
12828 case SSSR_REG_INFO_VER_2
:
12829 cc_intmask
= dhd
->sssr_reg_info
->rev2
.chipcommon_regs
.base_regs
.intmask
;
12830 pmuintmask0
= dhd
->sssr_reg_info
->rev2
.pmu_regs
.base_regs
.pmuintmask0
;
12831 pmuintmask1
= dhd
->sssr_reg_info
->rev2
.pmu_regs
.base_regs
.pmuintmask1
;
12832 resreqtimer
= dhd
->sssr_reg_info
->rev2
.pmu_regs
.base_regs
.resreqtimer
;
12833 macresreqtimer
= dhd
->sssr_reg_info
->rev2
.pmu_regs
.base_regs
.macresreqtimer
;
12834 macresreqtimer1
= dhd
->sssr_reg_info
->rev2
.
12835 pmu_regs
.base_regs
.macresreqtimer1
;
12837 case SSSR_REG_INFO_VER_1
:
12838 case SSSR_REG_INFO_VER_0
:
12839 cc_intmask
= dhd
->sssr_reg_info
->rev1
.chipcommon_regs
.base_regs
.intmask
;
12840 pmuintmask0
= dhd
->sssr_reg_info
->rev1
.pmu_regs
.base_regs
.pmuintmask0
;
12841 pmuintmask1
= dhd
->sssr_reg_info
->rev1
.pmu_regs
.base_regs
.pmuintmask1
;
12842 resreqtimer
= dhd
->sssr_reg_info
->rev1
.pmu_regs
.base_regs
.resreqtimer
;
12843 macresreqtimer
= dhd
->sssr_reg_info
->rev1
.pmu_regs
.base_regs
.macresreqtimer
;
12844 macresreqtimer1
= dhd
->sssr_reg_info
->rev1
.
12845 pmu_regs
.base_regs
.macresreqtimer1
;
12846 vasip_sr_size
= dhd
->sssr_reg_info
->rev1
.vasip_regs
.vasip_sr_size
;
12849 DHD_ERROR(("invalid sssr_reg_ver"));
12850 return BCME_UNSUPPORTED
;
12853 /* clear chipcommon intmask */
12855 dhd_sbreg_op(dhd
, cc_intmask
, &val
, FALSE
);
12857 /* clear PMUIntMask0 */
12859 dhd_sbreg_op(dhd
, pmuintmask0
, &val
, FALSE
);
12861 /* clear PMUIntMask1 */
12863 dhd_sbreg_op(dhd
, pmuintmask1
, &val
, FALSE
);
12865 /* clear res_req_timer */
12867 dhd_sbreg_op(dhd
, resreqtimer
, &val
, FALSE
);
12869 /* clear macresreqtimer */
12871 dhd_sbreg_op(dhd
, macresreqtimer
, &val
, FALSE
);
12873 /* clear macresreqtimer1 */
12875 dhd_sbreg_op(dhd
, macresreqtimer1
, &val
, FALSE
);
12877 /* clear VasipClkEn */
12878 if (vasip_sr_size
) {
12879 addr
= dhd
->sssr_reg_info
->rev1
.vasip_regs
.wrapper_regs
.ioctrl
;
12881 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
12888 dhdpcie_update_d11_status_from_trapdata(dhd_pub_t
*dhd
)
12890 #define TRAP_DATA_MAIN_CORE_BIT_MASK (1 << 1)
12891 #define TRAP_DATA_AUX_CORE_BIT_MASK (1 << 4)
12892 uint trap_data_mask
[MAX_NUM_D11CORES
] =
12893 {TRAP_DATA_MAIN_CORE_BIT_MASK
, TRAP_DATA_AUX_CORE_BIT_MASK
};
12895 /* Apply only for 4375 chip */
12896 if (dhd_bus_chip_id(dhd
) == BCM4375_CHIP_ID
) {
12897 for (i
= 0; i
< MAX_NUM_D11CORES
; i
++) {
12898 if (dhd
->sssr_d11_outofreset
[i
] &&
12899 (dhd
->dongle_trap_data
& trap_data_mask
[i
])) {
12900 dhd
->sssr_d11_outofreset
[i
] = TRUE
;
12902 dhd
->sssr_d11_outofreset
[i
] = FALSE
;
12904 DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d after AND with "
12905 "trap_data:0x%x-0x%x\n",
12906 __FUNCTION__
, i
, dhd
->sssr_d11_outofreset
[i
],
12907 dhd
->dongle_trap_data
, trap_data_mask
[i
]));
12913 dhdpcie_d11_check_outofreset(dhd_pub_t
*dhd
)
12918 uint8 num_d11cores
;
12920 DHD_ERROR(("%s\n", __FUNCTION__
));
12922 num_d11cores
= dhd_d11_slices_num_get(dhd
);
12924 for (i
= 0; i
< num_d11cores
; i
++) {
12925 /* Check if bit 0 of resetctrl is cleared */
12926 /* SSSR register information structure v0 and
12927 * v1 shares most except dig_mem
12929 switch (dhd
->sssr_reg_info
->rev2
.version
) {
12930 case SSSR_REG_INFO_VER_3
:
12931 /* intentional fall through */
12932 case SSSR_REG_INFO_VER_2
:
12933 addr
= dhd
->sssr_reg_info
->rev2
.
12934 mac_regs
[i
].wrapper_regs
.resetctrl
;
12936 case SSSR_REG_INFO_VER_1
:
12937 case SSSR_REG_INFO_VER_0
:
12938 addr
= dhd
->sssr_reg_info
->rev1
.
12939 mac_regs
[i
].wrapper_regs
.resetctrl
;
12942 DHD_ERROR(("invalid sssr_reg_ver"));
12943 return BCME_UNSUPPORTED
;
12946 DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n",
12950 dhd_sbreg_op(dhd
, addr
, &val
, TRUE
);
12952 dhd
->sssr_d11_outofreset
[i
] = TRUE
;
12954 dhd
->sssr_d11_outofreset
[i
] = FALSE
;
12956 DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
12957 __FUNCTION__
, i
, dhd
->sssr_d11_outofreset
[i
]));
12959 /* XXX Temporary WAR for 4375 to handle AXI errors on bad core
12960 * to not collect SSSR dump for the core whose bit is not set in trap_data.
12961 * It will be reverted once AXI errors are fixed
12963 dhdpcie_update_d11_status_from_trapdata(dhd
);
12969 dhdpcie_d11_clear_clk_req(dhd_pub_t
*dhd
)
12973 uint8 num_d11cores
;
12974 uint32 clockrequeststatus
, clockcontrolstatus
, clockcontrolstatus_val
;
12976 DHD_ERROR(("%s\n", __FUNCTION__
));
12978 num_d11cores
= dhd_d11_slices_num_get(dhd
);
12980 for (i
= 0; i
< num_d11cores
; i
++) {
12981 if (dhd
->sssr_d11_outofreset
[i
]) {
12982 /* clear request clk only if itopoobb/extrsrcreqs is non zero */
12983 /* SSSR register information structure v0 and
12984 * v1 shares most except dig_mem
12986 switch (dhd
->sssr_reg_info
->rev2
.version
) {
12987 case SSSR_REG_INFO_VER_3
:
12988 /* intentional fall through */
12989 case SSSR_REG_INFO_VER_2
:
12990 clockrequeststatus
= dhd
->sssr_reg_info
->rev2
.
12991 mac_regs
[i
].wrapper_regs
.extrsrcreq
;
12992 clockcontrolstatus
= dhd
->sssr_reg_info
->rev2
.
12993 mac_regs
[i
].base_regs
.clockcontrolstatus
;
12994 clockcontrolstatus_val
= dhd
->sssr_reg_info
->rev2
.
12995 mac_regs
[i
].base_regs
.clockcontrolstatus_val
;
12997 case SSSR_REG_INFO_VER_1
:
12998 case SSSR_REG_INFO_VER_0
:
12999 clockrequeststatus
= dhd
->sssr_reg_info
->rev1
.
13000 mac_regs
[i
].wrapper_regs
.itopoobb
;
13001 clockcontrolstatus
= dhd
->sssr_reg_info
->rev1
.
13002 mac_regs
[i
].base_regs
.clockcontrolstatus
;
13003 clockcontrolstatus_val
= dhd
->sssr_reg_info
->rev1
.
13004 mac_regs
[i
].base_regs
.clockcontrolstatus_val
;
13007 DHD_ERROR(("invalid sssr_reg_ver"));
13008 return BCME_UNSUPPORTED
;
13010 dhd_sbreg_op(dhd
, clockrequeststatus
, &val
, TRUE
);
13012 /* clear clockcontrolstatus */
13013 dhd_sbreg_op(dhd
, clockcontrolstatus
,
13014 &clockcontrolstatus_val
, FALSE
);
13022 dhdpcie_arm_clear_clk_req(dhd_pub_t
*dhd
)
13026 uint32 resetctrl
, clockrequeststatus
, clockcontrolstatus
, clockcontrolstatus_val
;
13028 DHD_ERROR(("%s\n", __FUNCTION__
));
13030 /* SSSR register information structure v0 and v1 shares most except dig_mem */
13031 switch (dhd
->sssr_reg_info
->rev2
.version
) {
13032 case SSSR_REG_INFO_VER_3
:
13033 /* intentional fall through */
13034 case SSSR_REG_INFO_VER_2
:
13035 resetctrl
= dhd
->sssr_reg_info
->rev2
.
13036 arm_regs
.wrapper_regs
.resetctrl
;
13037 clockrequeststatus
= dhd
->sssr_reg_info
->rev2
.
13038 arm_regs
.wrapper_regs
.extrsrcreq
;
13039 clockcontrolstatus
= dhd
->sssr_reg_info
->rev2
.
13040 arm_regs
.base_regs
.clockcontrolstatus
;
13041 clockcontrolstatus_val
= dhd
->sssr_reg_info
->rev2
.
13042 arm_regs
.base_regs
.clockcontrolstatus_val
;
13044 case SSSR_REG_INFO_VER_1
:
13045 case SSSR_REG_INFO_VER_0
:
13046 resetctrl
= dhd
->sssr_reg_info
->rev1
.
13047 arm_regs
.wrapper_regs
.resetctrl
;
13048 clockrequeststatus
= dhd
->sssr_reg_info
->rev1
.
13049 arm_regs
.wrapper_regs
.itopoobb
;
13050 clockcontrolstatus
= dhd
->sssr_reg_info
->rev1
.
13051 arm_regs
.base_regs
.clockcontrolstatus
;
13052 clockcontrolstatus_val
= dhd
->sssr_reg_info
->rev1
.
13053 arm_regs
.base_regs
.clockcontrolstatus_val
;
13056 DHD_ERROR(("invalid sssr_reg_ver"));
13057 return BCME_UNSUPPORTED
;
13060 /* Check if bit 0 of resetctrl is cleared */
13061 dhd_sbreg_op(dhd
, resetctrl
, &val
, TRUE
);
13063 /* clear request clk only if itopoobb/extrsrcreqs is non zero */
13064 dhd_sbreg_op(dhd
, clockrequeststatus
, &val
, TRUE
);
13066 /* clear clockcontrolstatus */
13067 dhd_sbreg_op(dhd
, clockcontrolstatus
, &clockcontrolstatus_val
, FALSE
);
13070 if (MULTIBP_ENAB(dhd
->bus
->sih
)) {
13071 /* Clear coherent bits for CA7 because CPU is halted */
13072 if (dhd
->bus
->coreid
== ARMCA7_CORE_ID
) {
13073 cfgval
= dhdpcie_bus_cfg_read_dword(dhd
->bus
,
13074 PCIE_CFG_SUBSYSTEM_CONTROL
, 4);
13075 dhdpcie_bus_cfg_write_dword(dhd
->bus
, PCIE_CFG_SUBSYSTEM_CONTROL
, 4,
13076 (cfgval
& ~PCIE_BARCOHERENTACCEN_MASK
));
13079 /* Just halt ARM but do not reset the core */
13080 resetctrl
&= ~(SI_CORE_SIZE
- 1);
13081 resetctrl
+= OFFSETOF(aidmp_t
, ioctrl
);
13083 dhd_sbreg_op(dhd
, resetctrl
, &val
, TRUE
);
13084 val
|= SICF_CPUHALT
;
13085 dhd_sbreg_op(dhd
, resetctrl
, &val
, FALSE
);
13093 dhdpcie_arm_resume_clk_req(dhd_pub_t
*dhd
)
13098 DHD_ERROR(("%s\n", __FUNCTION__
));
13100 /* SSSR register information structure v0 and v1 shares most except dig_mem */
13101 switch (dhd
->sssr_reg_info
->rev2
.version
) {
13102 case SSSR_REG_INFO_VER_3
:
13103 /* intentional fall through */
13104 case SSSR_REG_INFO_VER_2
:
13105 resetctrl
= dhd
->sssr_reg_info
->rev2
.
13106 arm_regs
.wrapper_regs
.resetctrl
;
13108 case SSSR_REG_INFO_VER_1
:
13109 case SSSR_REG_INFO_VER_0
:
13110 resetctrl
= dhd
->sssr_reg_info
->rev1
.
13111 arm_regs
.wrapper_regs
.resetctrl
;
13114 DHD_ERROR(("invalid sssr_reg_ver"));
13115 return BCME_UNSUPPORTED
;
13118 /* Check if bit 0 of resetctrl is cleared */
13119 dhd_sbreg_op(dhd
, resetctrl
, &val
, TRUE
);
13121 if (MULTIBP_ENAB(dhd
->bus
->sih
) && (dhd
->bus
->coreid
!= ARMCA7_CORE_ID
)) {
13122 /* Take ARM out of halt but do not reset core */
13123 resetctrl
&= ~(SI_CORE_SIZE
- 1);
13124 resetctrl
+= OFFSETOF(aidmp_t
, ioctrl
);
13126 dhd_sbreg_op(dhd
, resetctrl
, &val
, TRUE
);
13127 val
&= ~SICF_CPUHALT
;
13128 dhd_sbreg_op(dhd
, resetctrl
, &val
, FALSE
);
13129 dhd_sbreg_op(dhd
, resetctrl
, &val
, TRUE
);
13137 dhdpcie_pcie_clear_clk_req(dhd_pub_t
*dhd
)
13140 uint32 clockrequeststatus
, clockcontrolstatus_addr
, clockcontrolstatus_val
;
13142 DHD_ERROR(("%s\n", __FUNCTION__
));
13144 /* SSSR register information structure v0 and v1 shares most except dig_mem */
13145 switch (dhd
->sssr_reg_info
->rev2
.version
) {
13146 case SSSR_REG_INFO_VER_3
:
13147 /* intentional fall through */
13148 case SSSR_REG_INFO_VER_2
:
13149 clockrequeststatus
= dhd
->sssr_reg_info
->rev2
.
13150 pcie_regs
.wrapper_regs
.extrsrcreq
;
13151 clockcontrolstatus_addr
= dhd
->sssr_reg_info
->rev2
.
13152 pcie_regs
.base_regs
.clockcontrolstatus
;
13153 clockcontrolstatus_val
= dhd
->sssr_reg_info
->rev2
.
13154 pcie_regs
.base_regs
.clockcontrolstatus_val
;
13156 case SSSR_REG_INFO_VER_1
:
13157 case SSSR_REG_INFO_VER_0
:
13158 clockrequeststatus
= dhd
->sssr_reg_info
->rev1
.
13159 pcie_regs
.wrapper_regs
.itopoobb
;
13160 clockcontrolstatus_addr
= dhd
->sssr_reg_info
->rev1
.
13161 pcie_regs
.base_regs
.clockcontrolstatus
;
13162 clockcontrolstatus_val
= dhd
->sssr_reg_info
->rev1
.
13163 pcie_regs
.base_regs
.clockcontrolstatus_val
;
13166 DHD_ERROR(("invalid sssr_reg_ver"));
13167 return BCME_UNSUPPORTED
;
13170 /* clear request clk only if itopoobb/extrsrcreqs is non zero */
13171 dhd_sbreg_op(dhd
, clockrequeststatus
, &val
, TRUE
);
13173 /* clear clockcontrolstatus */
13174 dhd_sbreg_op(dhd
, clockcontrolstatus_addr
, &clockcontrolstatus_val
, FALSE
);
13180 dhdpcie_pcie_send_ltrsleep(dhd_pub_t
*dhd
)
13185 DHD_ERROR(("%s\n", __FUNCTION__
));
13187 /* SSSR register information structure v0 and v1 shares most except dig_mem */
13188 switch (dhd
->sssr_reg_info
->rev2
.version
) {
13189 case SSSR_REG_INFO_VER_3
:
13190 /* intentional fall through */
13191 case SSSR_REG_INFO_VER_2
:
13192 addr
= dhd
->sssr_reg_info
->rev2
.pcie_regs
.base_regs
.ltrstate
;
13194 case SSSR_REG_INFO_VER_1
:
13195 case SSSR_REG_INFO_VER_0
:
13196 addr
= dhd
->sssr_reg_info
->rev1
.pcie_regs
.base_regs
.ltrstate
;
13199 DHD_ERROR(("invalid sssr_reg_ver"));
13200 return BCME_UNSUPPORTED
;
13204 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
13207 dhd_sbreg_op(dhd
, addr
, &val
, FALSE
);
13213 dhdpcie_clear_clk_req(dhd_pub_t
*dhd
)
13215 DHD_ERROR(("%s\n", __FUNCTION__
));
13217 dhdpcie_arm_clear_clk_req(dhd
);
13219 dhdpcie_d11_clear_clk_req(dhd
);
13221 dhdpcie_pcie_clear_clk_req(dhd
);
13227 dhdpcie_bring_d11_outofreset(dhd_pub_t
*dhd
)
13231 uint8 num_d11cores
;
13232 uint32 resetctrl_addr
, ioctrl_addr
, ioctrl_resetseq_val0
, ioctrl_resetseq_val1
,
13233 ioctrl_resetseq_val2
, ioctrl_resetseq_val3
, ioctrl_resetseq_val4
;
13235 DHD_ERROR(("%s\n", __FUNCTION__
));
13237 num_d11cores
= dhd_d11_slices_num_get(dhd
);
13239 for (i
= 0; i
< num_d11cores
; i
++) {
13240 if (dhd
->sssr_d11_outofreset
[i
]) {
13241 /* SSSR register information structure v0 and v1 shares
13242 * most except dig_mem
13244 switch (dhd
->sssr_reg_info
->rev2
.version
) {
13245 case SSSR_REG_INFO_VER_3
:
13246 /* intentional fall through */
13247 case SSSR_REG_INFO_VER_2
:
13248 resetctrl_addr
= dhd
->sssr_reg_info
->rev2
.mac_regs
[i
].
13249 wrapper_regs
.resetctrl
;
13250 ioctrl_addr
= dhd
->sssr_reg_info
->rev2
.mac_regs
[i
].
13251 wrapper_regs
.ioctrl
;
13252 ioctrl_resetseq_val0
= dhd
->sssr_reg_info
->rev2
.
13253 mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[0];
13254 ioctrl_resetseq_val1
= dhd
->sssr_reg_info
->rev2
.
13255 mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[1];
13256 ioctrl_resetseq_val2
= dhd
->sssr_reg_info
->rev2
.
13257 mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[2];
13258 ioctrl_resetseq_val3
= dhd
->sssr_reg_info
->rev2
.
13259 mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[3];
13260 ioctrl_resetseq_val4
= dhd
->sssr_reg_info
->rev2
.
13261 mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[4];
13263 case SSSR_REG_INFO_VER_1
:
13264 case SSSR_REG_INFO_VER_0
:
13265 resetctrl_addr
= dhd
->sssr_reg_info
->rev1
.mac_regs
[i
].
13266 wrapper_regs
.resetctrl
;
13267 ioctrl_addr
= dhd
->sssr_reg_info
->rev1
.mac_regs
[i
].
13268 wrapper_regs
.ioctrl
;
13269 ioctrl_resetseq_val0
= dhd
->sssr_reg_info
->rev1
.
13270 mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[0];
13271 ioctrl_resetseq_val1
= dhd
->sssr_reg_info
->rev1
.
13272 mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[1];
13273 ioctrl_resetseq_val2
= dhd
->sssr_reg_info
->rev1
.
13274 mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[2];
13275 ioctrl_resetseq_val3
= dhd
->sssr_reg_info
->rev1
.
13276 mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[3];
13277 ioctrl_resetseq_val4
= dhd
->sssr_reg_info
->rev1
.
13278 mac_regs
[0].wrapper_regs
.ioctrl_resetseq_val
[4];
13281 DHD_ERROR(("invalid sssr_reg_ver"));
13282 return BCME_UNSUPPORTED
;
13284 /* disable core by setting bit 0 */
13286 dhd_sbreg_op(dhd
, resetctrl_addr
, &val
, FALSE
);
13289 dhd_sbreg_op(dhd
, ioctrl_addr
, &ioctrl_resetseq_val0
, FALSE
);
13291 dhd_sbreg_op(dhd
, ioctrl_addr
, &ioctrl_resetseq_val1
, FALSE
);
13293 /* enable core by clearing bit 0 */
13295 dhd_sbreg_op(dhd
, resetctrl_addr
, &val
, FALSE
);
13297 dhd_sbreg_op(dhd
, ioctrl_addr
, &ioctrl_resetseq_val2
, FALSE
);
13299 dhd_sbreg_op(dhd
, ioctrl_addr
, &ioctrl_resetseq_val3
, FALSE
);
13301 dhd_sbreg_op(dhd
, ioctrl_addr
, &ioctrl_resetseq_val4
, FALSE
);
13307 #ifdef DHD_SSSR_DUMP_BEFORE_SR
13309 dhdpcie_sssr_dump_get_before_sr(dhd_pub_t
*dhd
)
13312 uint32 sr_size
, xmtaddress
, xmtdata
, dig_buf_size
, dig_buf_addr
;
13313 uint8 num_d11cores
;
13315 DHD_ERROR(("%s\n", __FUNCTION__
));
13317 num_d11cores
= dhd_d11_slices_num_get(dhd
);
13319 for (i
= 0; i
< num_d11cores
; i
++) {
13320 if (dhd
->sssr_d11_outofreset
[i
]) {
13321 sr_size
= dhd_sssr_mac_buf_size(dhd
, i
);
13322 xmtaddress
= dhd_sssr_mac_xmtaddress(dhd
, i
);
13323 xmtdata
= dhd_sssr_mac_xmtdata(dhd
, i
);
13324 dhdpcie_get_sssr_fifo_dump(dhd
, dhd
->sssr_d11_before
[i
],
13325 sr_size
, xmtaddress
, xmtdata
);
13329 dig_buf_size
= dhd_sssr_dig_buf_size(dhd
);
13330 dig_buf_addr
= dhd_sssr_dig_buf_addr(dhd
);
13331 if (dig_buf_size
) {
13332 dhdpcie_get_sssr_dig_dump(dhd
, dhd
->sssr_dig_buf_before
,
13333 dig_buf_size
, dig_buf_addr
);
13338 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
13341 dhdpcie_sssr_dump_get_after_sr(dhd_pub_t
*dhd
)
13344 uint32 sr_size
, xmtaddress
, xmtdata
, dig_buf_size
, dig_buf_addr
;
13345 uint8 num_d11cores
;
13347 DHD_ERROR(("%s\n", __FUNCTION__
));
13349 num_d11cores
= dhd_d11_slices_num_get(dhd
);
13351 for (i
= 0; i
< num_d11cores
; i
++) {
13352 if (dhd
->sssr_d11_outofreset
[i
]) {
13353 sr_size
= dhd_sssr_mac_buf_size(dhd
, i
);
13354 xmtaddress
= dhd_sssr_mac_xmtaddress(dhd
, i
);
13355 xmtdata
= dhd_sssr_mac_xmtdata(dhd
, i
);
13356 dhdpcie_get_sssr_fifo_dump(dhd
, dhd
->sssr_d11_after
[i
],
13357 sr_size
, xmtaddress
, xmtdata
);
13361 dig_buf_size
= dhd_sssr_dig_buf_size(dhd
);
13362 dig_buf_addr
= dhd_sssr_dig_buf_addr(dhd
);
13364 if (dig_buf_size
) {
13365 dhdpcie_get_sssr_dig_dump(dhd
, dhd
->sssr_dig_buf_after
, dig_buf_size
, dig_buf_addr
);
13372 dhdpcie_sssr_dump(dhd_pub_t
*dhd
)
13374 uint32 powerctrl_val
;
13376 if (!dhd
->sssr_inited
) {
13377 DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__
));
13381 if (dhd
->bus
->is_linkdown
) {
13382 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__
));
13386 DHD_ERROR(("%s: Before WL down (powerctl: pcie:0x%x chipc:0x%x) "
13387 "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__
,
13388 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
13389 OFFSETOF(chipcregs_t
, powerctl
), 0, 0),
13390 si_corereg(dhd
->bus
->sih
, 0, OFFSETOF(chipcregs_t
, powerctl
), 0, 0),
13391 PMU_REG(dhd
->bus
->sih
, retention_ctl
, 0, 0),
13392 PMU_REG(dhd
->bus
->sih
, res_state
, 0, 0)));
13394 dhdpcie_d11_check_outofreset(dhd
);
13396 #ifdef DHD_SSSR_DUMP_BEFORE_SR
13397 DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__
));
13398 if (dhdpcie_sssr_dump_get_before_sr(dhd
) != BCME_OK
) {
13399 DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__
));
13402 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
13404 dhdpcie_clear_intmask_and_timer(dhd
);
13405 dhdpcie_clear_clk_req(dhd
);
13406 powerctrl_val
= dhdpcie_suspend_chipcommon_powerctrl(dhd
);
13407 dhdpcie_pcie_send_ltrsleep(dhd
);
13409 if (MULTIBP_ENAB(dhd
->bus
->sih
)) {
13410 dhd_bus_pcie_pwr_req_wl_domain(dhd
->bus
, OFFSETOF(chipcregs_t
, powerctl
), FALSE
);
13413 /* Wait for some time before Restore */
13416 DHD_ERROR(("%s: After WL down (powerctl: pcie:0x%x chipc:0x%x) "
13417 "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__
,
13418 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
13419 OFFSETOF(chipcregs_t
, powerctl
), 0, 0),
13420 si_corereg(dhd
->bus
->sih
, 0, OFFSETOF(chipcregs_t
, powerctl
), 0, 0),
13421 PMU_REG(dhd
->bus
->sih
, retention_ctl
, 0, 0),
13422 PMU_REG(dhd
->bus
->sih
, res_state
, 0, 0)));
13424 if (MULTIBP_ENAB(dhd
->bus
->sih
)) {
13425 dhd_bus_pcie_pwr_req_wl_domain(dhd
->bus
, OFFSETOF(chipcregs_t
, powerctl
), TRUE
);
13426 /* Add delay for WL domain to power up */
13429 DHD_ERROR(("%s: After WL up again (powerctl: pcie:0x%x chipc:0x%x) "
13430 "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__
,
13431 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
13432 OFFSETOF(chipcregs_t
, powerctl
), 0, 0),
13433 si_corereg(dhd
->bus
->sih
, 0, OFFSETOF(chipcregs_t
, powerctl
), 0, 0),
13434 PMU_REG(dhd
->bus
->sih
, retention_ctl
, 0, 0),
13435 PMU_REG(dhd
->bus
->sih
, res_state
, 0, 0)));
13438 dhdpcie_resume_chipcommon_powerctrl(dhd
, powerctrl_val
);
13439 dhdpcie_arm_resume_clk_req(dhd
);
13440 dhdpcie_bring_d11_outofreset(dhd
);
13442 DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__
));
13443 if (dhdpcie_sssr_dump_get_after_sr(dhd
) != BCME_OK
) {
13444 DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__
));
13447 dhd
->sssr_dump_collected
= TRUE
;
13448 dhd_write_sssr_dump(dhd
, SSSR_DUMP_MODE_SSSR
);
13453 #define PCIE_CFG_DSTATE_MASK 0x11u
13456 dhdpcie_fis_trigger(dhd_pub_t
*dhd
)
13458 uint32 fis_ctrl_status
;
13459 uint32 cfg_status_cmd
;
13462 if (!dhd
->sssr_inited
) {
13463 DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__
));
13467 if (dhd
->bus
->is_linkdown
) {
13468 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__
));
13472 #ifdef DHD_PCIE_RUNTIMEPM
13473 /* Bring back to D0 */
13474 dhdpcie_runtime_bus_wake(dhd
, CAN_SLEEP(), __builtin_return_address(0));
13475 /* Stop RPM timer so that even INB DW DEASSERT should not happen */
13476 DHD_STOP_RPM_TIMER(dhd
);
13477 #endif /* DHD_PCIE_RUNTIMEPM */
13479 /* Set fis_triggered flag to ignore link down callback from RC */
13480 dhd
->fis_triggered
= TRUE
;
13482 /* Set FIS PwrswForceOnAll */
13483 PMU_REG(dhd
->bus
->sih
, fis_ctrl_status
, PMU_FIS_FORCEON_ALL_MASK
, PMU_FIS_FORCEON_ALL_MASK
);
13485 fis_ctrl_status
= PMU_REG(dhd
->bus
->sih
, fis_ctrl_status
, 0, 0);
13487 DHD_ERROR(("%s: fis_ctrl_status=0x%x\n", __FUNCTION__
, fis_ctrl_status
));
13489 cfg_status_cmd
= dhd_pcie_config_read(dhd
->bus
, PCIECFGREG_STATUS_CMD
, sizeof(uint32
));
13490 cfg_pmcsr
= dhd_pcie_config_read(dhd
->bus
, PCIE_CFG_PMCSR
, sizeof(uint32
));
13491 DHD_ERROR(("before save: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
13492 PCIECFGREG_STATUS_CMD
, cfg_status_cmd
, PCIE_CFG_PMCSR
, cfg_pmcsr
));
13494 DHD_PCIE_CONFIG_SAVE(dhd
->bus
);
13497 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
13498 DAR_FIS_CTRL(dhd
->bus
->sih
->buscorerev
), ~0, DAR_FIS_START_MASK
);
13499 OSL_DELAY(100 * 1000);
13502 * For android built-in platforms need to perform REG ON/OFF
13503 * to restore pcie link.
13504 * dhd_download_fw_on_driverload will be FALSE for built-in.
13506 if (!dhd_download_fw_on_driverload
) {
13507 DHD_ERROR(("%s: Toggle REG_ON and restore config space\n", __FUNCTION__
));
13508 dhdpcie_bus_stop_host_dev(dhd
->bus
);
13509 dhd_wifi_platform_set_power(dhd
, FALSE
);
13510 dhd_wifi_platform_set_power(dhd
, TRUE
);
13511 dhdpcie_bus_start_host_dev(dhd
->bus
);
13512 /* Restore inited pcie cfg from pci_load_saved_state */
13513 dhdpcie_bus_enable_device(dhd
->bus
);
13516 cfg_status_cmd
= dhd_pcie_config_read(dhd
->bus
, PCIECFGREG_STATUS_CMD
, sizeof(uint32
));
13517 cfg_pmcsr
= dhd_pcie_config_read(dhd
->bus
, PCIE_CFG_PMCSR
, sizeof(uint32
));
13518 DHD_ERROR(("after regon-restore: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
13519 PCIECFGREG_STATUS_CMD
, cfg_status_cmd
, PCIE_CFG_PMCSR
, cfg_pmcsr
));
13521 /* To-Do: below is debug code, remove this if EP is in D0 after REG-ON restore */
13522 DHD_PCIE_CONFIG_RESTORE(dhd
->bus
);
13524 cfg_status_cmd
= dhd_pcie_config_read(dhd
->bus
, PCIECFGREG_STATUS_CMD
, sizeof(uint32
));
13525 cfg_pmcsr
= dhd_pcie_config_read(dhd
->bus
, PCIE_CFG_PMCSR
, sizeof(uint32
));
13526 DHD_ERROR(("after normal-restore: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
13527 PCIECFGREG_STATUS_CMD
, cfg_status_cmd
, PCIE_CFG_PMCSR
, cfg_pmcsr
));
13530 * To-Do: below is debug code, remove this if EP is in D0 after REG-ON restore
13531 * in both MSM and LSI RCs
13533 if ((cfg_pmcsr
& PCIE_CFG_DSTATE_MASK
) != 0) {
13534 int ret
= dhdpcie_set_master_and_d0_pwrstate(dhd
->bus
);
13535 if (ret
!= BCME_OK
) {
13536 DHD_ERROR(("%s: Setting D0 failed, ABORT FIS collection\n", __FUNCTION__
));
13540 dhd_pcie_config_read(dhd
->bus
, PCIECFGREG_STATUS_CMD
, sizeof(uint32
));
13541 cfg_pmcsr
= dhd_pcie_config_read(dhd
->bus
, PCIE_CFG_PMCSR
, sizeof(uint32
));
13542 DHD_ERROR(("after force-d0: Status Command(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
13543 PCIECFGREG_STATUS_CMD
, cfg_status_cmd
, PCIE_CFG_PMCSR
, cfg_pmcsr
));
13546 /* Clear fis_triggered as REG OFF/ON recovered link */
13547 dhd
->fis_triggered
= FALSE
;
13553 dhd_bus_fis_trigger(dhd_pub_t
*dhd
)
13555 return dhdpcie_fis_trigger(dhd
);
13559 dhdpcie_reset_hwa(dhd_pub_t
*dhd
)
13562 sssr_reg_info_cmn_t
*sssr_reg_info_cmn
= dhd
->sssr_reg_info
;
13563 sssr_reg_info_v3_t
*sssr_reg_info
= (sssr_reg_info_v3_t
*)&sssr_reg_info_cmn
->rev3
;
13565 /* HWA wrapper registers */
13566 uint32 ioctrl
, resetctrl
;
13567 /* HWA base registers */
13568 uint32 clkenable
, clkgatingenable
, clkext
, clkctlstatus
;
13569 uint32 hwa_resetseq_val
[SSSR_HWA_RESET_SEQ_STEPS
];
13572 if (sssr_reg_info
->version
< SSSR_REG_INFO_VER_3
) {
13573 DHD_ERROR(("%s: not supported for version:%d\n",
13574 __FUNCTION__
, sssr_reg_info
->version
));
13575 return BCME_UNSUPPORTED
;
13578 if (sssr_reg_info
->hwa_regs
.base_regs
.clkenable
== 0) {
13579 DHD_ERROR(("%s: hwa regs are not set\n", __FUNCTION__
));
13580 return BCME_UNSUPPORTED
;
13583 DHD_ERROR(("%s: version:%d\n", __FUNCTION__
, sssr_reg_info
->version
));
13585 ioctrl
= sssr_reg_info
->hwa_regs
.wrapper_regs
.ioctrl
;
13586 resetctrl
= sssr_reg_info
->hwa_regs
.wrapper_regs
.resetctrl
;
13588 clkenable
= sssr_reg_info
->hwa_regs
.base_regs
.clkenable
;
13589 clkgatingenable
= sssr_reg_info
->hwa_regs
.base_regs
.clkgatingenable
;
13590 clkext
= sssr_reg_info
->hwa_regs
.base_regs
.clkext
;
13591 clkctlstatus
= sssr_reg_info
->hwa_regs
.base_regs
.clkctlstatus
;
13593 ret
= memcpy_s(hwa_resetseq_val
, sizeof(hwa_resetseq_val
),
13594 sssr_reg_info
->hwa_regs
.hwa_resetseq_val
,
13595 sizeof(sssr_reg_info
->hwa_regs
.hwa_resetseq_val
));
13597 DHD_ERROR(("%s: hwa_resetseq_val memcpy_s failed: %d\n",
13598 __FUNCTION__
, ret
));
13602 dhd_sbreg_op(dhd
, ioctrl
, &hwa_resetseq_val
[i
++], FALSE
);
13603 dhd_sbreg_op(dhd
, resetctrl
, &hwa_resetseq_val
[i
++], FALSE
);
13604 dhd_sbreg_op(dhd
, resetctrl
, &hwa_resetseq_val
[i
++], FALSE
);
13605 dhd_sbreg_op(dhd
, ioctrl
, &hwa_resetseq_val
[i
++], FALSE
);
13607 dhd_sbreg_op(dhd
, clkenable
, &hwa_resetseq_val
[i
++], FALSE
);
13608 dhd_sbreg_op(dhd
, clkgatingenable
, &hwa_resetseq_val
[i
++], FALSE
);
13609 dhd_sbreg_op(dhd
, clkext
, &hwa_resetseq_val
[i
++], FALSE
);
13610 dhd_sbreg_op(dhd
, clkctlstatus
, &hwa_resetseq_val
[i
++], FALSE
);
13616 dhdpcie_fis_dump(dhd_pub_t
*dhd
)
13619 uint8 num_d11cores
;
13621 DHD_ERROR(("%s\n", __FUNCTION__
));
13623 if (!dhd
->sssr_inited
) {
13624 DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__
));
13628 if (dhd
->bus
->is_linkdown
) {
13629 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__
));
13633 /* bring up all pmu resources */
13634 PMU_REG(dhd
->bus
->sih
, min_res_mask
, ~0,
13635 PMU_REG(dhd
->bus
->sih
, max_res_mask
, 0, 0));
13636 OSL_DELAY(10 * 1000);
13638 num_d11cores
= dhd_d11_slices_num_get(dhd
);
13640 for (i
= 0; i
< num_d11cores
; i
++) {
13641 dhd
->sssr_d11_outofreset
[i
] = TRUE
;
13644 dhdpcie_bring_d11_outofreset(dhd
);
13647 /* clear FIS Done */
13648 PMU_REG(dhd
->bus
->sih
, fis_ctrl_status
, PMU_CLEAR_FIS_DONE_MASK
, PMU_CLEAR_FIS_DONE_MASK
);
13650 if (dhdpcie_reset_hwa(dhd
) != BCME_OK
) {
13651 DHD_ERROR(("%s: dhdpcie_reset_hwa failed\n", __FUNCTION__
));
13655 dhdpcie_d11_check_outofreset(dhd
);
13657 DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__
));
13658 if (dhdpcie_sssr_dump_get_after_sr(dhd
) != BCME_OK
) {
13659 DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__
));
13662 dhd
->sssr_dump_collected
= TRUE
;
13663 dhd_write_sssr_dump(dhd
, SSSR_DUMP_MODE_FIS
);
13669 dhd_bus_fis_dump(dhd_pub_t
*dhd
)
13671 return dhdpcie_fis_dump(dhd
);
13673 #endif /* DHD_SSSR_DUMP */
13675 #ifdef DHD_SDTC_ETB_DUMP
13677 dhd_bus_get_etb_info(dhd_pub_t
*dhd
, uint32 etbinfo_addr
, etb_info_t
*etb_info
)
13682 if ((ret
= dhdpcie_bus_membytes(dhd
->bus
, FALSE
, etbinfo_addr
,
13683 (unsigned char *)etb_info
, sizeof(*etb_info
)))) {
13684 DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__
, ret
));
13692 dhd_bus_get_sdtc_etb(dhd_pub_t
*dhd
, uint8
*sdtc_etb_mempool
, uint addr
, uint read_bytes
)
13696 if ((ret
= dhdpcie_bus_membytes(dhd
->bus
, FALSE
, addr
,
13697 (unsigned char *)sdtc_etb_mempool
, read_bytes
))) {
13698 DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__
, ret
));
13703 #endif /* DHD_SDTC_ETB_DUMP */
13705 #ifdef DHD_WAKE_STATUS
13707 dhd_bus_get_wakecount(dhd_pub_t
*dhd
)
13709 return &dhd
->bus
->wake_counts
;
13712 dhd_bus_get_bus_wake(dhd_pub_t
*dhd
)
13714 return bcmpcie_set_get_wake(dhd
->bus
, 0);
13716 #endif /* DHD_WAKE_STATUS */
13718 /* Writes random number(s) to the TCM. FW upon initialization reads this register
13719 * to fetch the random number, and uses it to randomize heap address space layout.
13722 dhdpcie_wrt_rnd(struct dhd_bus
*bus
)
13724 bcm_rand_metadata_t rnd_data
;
13725 uint8 rand_buf
[BCM_ENTROPY_HOST_NBYTES
];
13726 uint32 count
= BCM_ENTROPY_HOST_NBYTES
;
13728 uint32 addr
= bus
->dongle_ram_base
+ (bus
->ramsize
- BCM_NVRAM_OFFSET_TCM
) -
13729 ((bus
->nvram_csm
& 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR
+ sizeof(rnd_data
));
13731 memset(rand_buf
, 0, BCM_ENTROPY_HOST_NBYTES
);
13732 rnd_data
.signature
= htol32(BCM_NVRAM_RNG_SIGNATURE
);
13733 rnd_data
.count
= htol32(count
);
13734 /* write the metadata about random number */
13735 dhdpcie_bus_membytes(bus
, TRUE
, addr
, (uint8
*)&rnd_data
, sizeof(rnd_data
));
13736 /* scale back by number of random number counts */
13739 bus
->ramtop_addr
= addr
;
13741 /* Now write the random number(s) */
13742 ret
= dhd_get_random_bytes(rand_buf
, count
);
13743 if (ret
!= BCME_OK
) {
13746 dhdpcie_bus_membytes(bus
, TRUE
, addr
, rand_buf
, count
);
13748 bus
->next_tlv
= addr
;
13754 dhd_pcie_intr_count_dump(dhd_pub_t
*dhd
)
13756 struct dhd_bus
*bus
= dhd
->bus
;
13757 uint64 current_time
;
13759 DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters ------- \r\n"));
13760 DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n",
13761 bus
->resume_intr_enable_count
, bus
->dpc_intr_enable_count
));
13762 DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n",
13763 bus
->isr_intr_disable_count
, bus
->suspend_intr_disable_count
));
13764 #ifdef BCMPCIE_OOB_HOST_WAKE
13765 DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu oob_intr_disable_count=%lu\n",
13766 bus
->oob_intr_count
, bus
->oob_intr_enable_count
,
13767 bus
->oob_intr_disable_count
));
13768 DHD_ERROR(("oob_irq_num=%d last_oob_irq_times="SEC_USEC_FMT
":"SEC_USEC_FMT
"\n",
13769 dhdpcie_get_oob_irq_num(bus
),
13770 GET_SEC_USEC(bus
->last_oob_irq_isr_time
),
13771 GET_SEC_USEC(bus
->last_oob_irq_thr_time
)));
13772 DHD_ERROR(("last_oob_irq_enable_time="SEC_USEC_FMT
13773 " last_oob_irq_disable_time="SEC_USEC_FMT
"\n",
13774 GET_SEC_USEC(bus
->last_oob_irq_enable_time
),
13775 GET_SEC_USEC(bus
->last_oob_irq_disable_time
)));
13776 DHD_ERROR(("oob_irq_enabled=%d oob_gpio_level=%d\n",
13777 dhdpcie_get_oob_irq_status(bus
),
13778 dhdpcie_get_oob_irq_level()));
13779 #endif /* BCMPCIE_OOB_HOST_WAKE */
13780 DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
13781 bus
->dpc_return_busdown_count
, bus
->non_ours_irq_count
));
13783 current_time
= OSL_LOCALTIME_NS();
13784 DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT
"\n",
13785 GET_SEC_USEC(current_time
)));
13786 DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
13787 " isr_exit_time="SEC_USEC_FMT
"\n",
13788 GET_SEC_USEC(bus
->isr_entry_time
),
13789 GET_SEC_USEC(bus
->isr_exit_time
)));
13790 DHD_ERROR(("isr_sched_dpc_time="SEC_USEC_FMT
13791 " rpm_sched_dpc_time="SEC_USEC_FMT
13792 " last_non_ours_irq_time="SEC_USEC_FMT
"\n",
13793 GET_SEC_USEC(bus
->isr_sched_dpc_time
),
13794 GET_SEC_USEC(bus
->rpm_sched_dpc_time
),
13795 GET_SEC_USEC(bus
->last_non_ours_irq_time
)));
13796 DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT
13797 " last_process_ctrlbuf_time="SEC_USEC_FMT
"\n",
13798 GET_SEC_USEC(bus
->dpc_entry_time
),
13799 GET_SEC_USEC(bus
->last_process_ctrlbuf_time
)));
13800 DHD_ERROR(("last_process_flowring_time="SEC_USEC_FMT
13801 " last_process_txcpl_time="SEC_USEC_FMT
"\n",
13802 GET_SEC_USEC(bus
->last_process_flowring_time
),
13803 GET_SEC_USEC(bus
->last_process_txcpl_time
)));
13804 DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT
13805 " last_process_infocpl_time="SEC_USEC_FMT
13806 " last_process_edl_time="SEC_USEC_FMT
"\n",
13807 GET_SEC_USEC(bus
->last_process_rxcpl_time
),
13808 GET_SEC_USEC(bus
->last_process_infocpl_time
),
13809 GET_SEC_USEC(bus
->last_process_edl_time
)));
13810 DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT
13811 " resched_dpc_time="SEC_USEC_FMT
"\n",
13812 GET_SEC_USEC(bus
->dpc_exit_time
),
13813 GET_SEC_USEC(bus
->resched_dpc_time
)));
13814 DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT
"\n",
13815 GET_SEC_USEC(bus
->last_d3_inform_time
)));
13817 DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT
13818 " last_suspend_end_time="SEC_USEC_FMT
"\n",
13819 GET_SEC_USEC(bus
->last_suspend_start_time
),
13820 GET_SEC_USEC(bus
->last_suspend_end_time
)));
13821 DHD_ERROR(("last_resume_start_time="SEC_USEC_FMT
13822 " last_resume_end_time="SEC_USEC_FMT
"\n",
13823 GET_SEC_USEC(bus
->last_resume_start_time
),
13824 GET_SEC_USEC(bus
->last_resume_end_time
)));
13826 #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
13827 DHD_ERROR(("logtrace_thread_entry_time="SEC_USEC_FMT
13828 " logtrace_thread_sem_down_time="SEC_USEC_FMT
13829 "\nlogtrace_thread_flush_time="SEC_USEC_FMT
13830 " logtrace_thread_unexpected_break_time="SEC_USEC_FMT
13831 "\nlogtrace_thread_complete_time="SEC_USEC_FMT
"\n",
13832 GET_SEC_USEC(dhd
->logtrace_thr_ts
.entry_time
),
13833 GET_SEC_USEC(dhd
->logtrace_thr_ts
.sem_down_time
),
13834 GET_SEC_USEC(dhd
->logtrace_thr_ts
.flush_time
),
13835 GET_SEC_USEC(dhd
->logtrace_thr_ts
.unexpected_break_time
),
13836 GET_SEC_USEC(dhd
->logtrace_thr_ts
.complete_time
)));
13837 #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
13841 dhd_bus_intr_count_dump(dhd_pub_t
*dhd
)
13843 dhd_pcie_intr_count_dump(dhd
);
13847 dhd_pcie_dump_wrapper_regs(dhd_pub_t
*dhd
)
13849 uint32 save_idx
, val
;
13850 si_t
*sih
= dhd
->bus
->sih
;
13851 uint32 oob_base
, oob_base1
;
13852 uint32 wrapper_dump_list
[] = {
13853 AI_OOBSELOUTA30
, AI_OOBSELOUTA74
, AI_OOBSELOUTB30
, AI_OOBSELOUTB74
,
13854 AI_OOBSELOUTC30
, AI_OOBSELOUTC74
, AI_OOBSELOUTD30
, AI_OOBSELOUTD74
,
13855 AI_RESETSTATUS
, AI_RESETCTRL
,
13856 AI_ITIPOOBA
, AI_ITIPOOBB
, AI_ITIPOOBC
, AI_ITIPOOBD
,
13857 AI_ITIPOOBAOUT
, AI_ITIPOOBBOUT
, AI_ITIPOOBCOUT
, AI_ITIPOOBDOUT
13860 hndoobr_reg_t
*reg
;
13861 cr4regs_t
*cr4regs
;
13862 ca7regs_t
*ca7regs
;
13864 save_idx
= si_coreidx(sih
);
13866 DHD_ERROR(("%s: Master wrapper Reg\n", __FUNCTION__
));
13868 if (si_setcore(sih
, PCIE2_CORE_ID
, 0) != NULL
) {
13869 for (i
= 0; i
< (uint32
)sizeof(wrapper_dump_list
) / 4; i
++) {
13870 val
= si_wrapperreg(sih
, wrapper_dump_list
[i
], 0, 0);
13871 DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list
[i
], val
));
13875 if ((cr4regs
= si_setcore(sih
, ARMCR4_CORE_ID
, 0)) != NULL
) {
13876 DHD_ERROR(("%s: ARM CR4 wrapper Reg\n", __FUNCTION__
));
13877 for (i
= 0; i
< (uint32
)sizeof(wrapper_dump_list
) / 4; i
++) {
13878 val
= si_wrapperreg(sih
, wrapper_dump_list
[i
], 0, 0);
13879 DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list
[i
], val
));
13881 DHD_ERROR(("%s: ARM CR4 core Reg\n", __FUNCTION__
));
13882 val
= R_REG(dhd
->osh
, ARM_CR4_REG(cr4regs
, corecontrol
));
13883 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint
)OFFSETOF(cr4regs_t
, corecontrol
), val
));
13884 val
= R_REG(dhd
->osh
, ARM_CR4_REG(cr4regs
, corecapabilities
));
13885 DHD_ERROR(("reg:0x%x val:0x%x\n",
13886 (uint
)OFFSETOF(cr4regs_t
, corecapabilities
), val
));
13887 val
= R_REG(dhd
->osh
, ARM_CR4_REG(cr4regs
, corestatus
));
13888 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint
)OFFSETOF(cr4regs_t
, corestatus
), val
));
13889 val
= R_REG(dhd
->osh
, ARM_CR4_REG(cr4regs
, nmiisrst
));
13890 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint
)OFFSETOF(cr4regs_t
, nmiisrst
), val
));
13891 val
= R_REG(dhd
->osh
, ARM_CR4_REG(cr4regs
, nmimask
));
13892 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint
)OFFSETOF(cr4regs_t
, nmimask
), val
));
13893 val
= R_REG(dhd
->osh
, ARM_CR4_REG(cr4regs
, isrmask
));
13894 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint
)OFFSETOF(cr4regs_t
, isrmask
), val
));
13895 val
= R_REG(dhd
->osh
, ARM_CR4_REG(cr4regs
, swintreg
));
13896 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint
)OFFSETOF(cr4regs_t
, swintreg
), val
));
13897 val
= R_REG(dhd
->osh
, ARM_CR4_REG(cr4regs
, intstatus
));
13898 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint
)OFFSETOF(cr4regs_t
, intstatus
), val
));
13899 val
= R_REG(dhd
->osh
, ARM_CR4_REG(cr4regs
, cyclecnt
));
13900 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint
)OFFSETOF(cr4regs_t
, cyclecnt
), val
));
13901 val
= R_REG(dhd
->osh
, ARM_CR4_REG(cr4regs
, inttimer
));
13902 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint
)OFFSETOF(cr4regs_t
, inttimer
), val
));
13903 val
= R_REG(dhd
->osh
, ARM_CR4_REG(cr4regs
, clk_ctl_st
));
13904 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint
)OFFSETOF(cr4regs_t
, clk_ctl_st
), val
));
13905 val
= R_REG(dhd
->osh
, ARM_CR4_REG(cr4regs
, powerctl
));
13906 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint
)OFFSETOF(cr4regs_t
, powerctl
), val
));
13908 /* XXX: Currently dumping CA7 registers causing CTO, temporarily disabling it */
13909 BCM_REFERENCE(ca7regs
);
13911 if ((ca7regs
= si_setcore(sih
, ARMCA7_CORE_ID
, 0)) != NULL
) {
13912 DHD_ERROR(("%s: ARM CA7 core Reg\n", __FUNCTION__
));
13913 val
= R_REG(dhd
->osh
, ARM_CA7_REG(ca7regs
, corecontrol
));
13914 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint
)OFFSETOF(ca7regs_t
, corecontrol
), val
));
13915 val
= R_REG(dhd
->osh
, ARM_CA7_REG(ca7regs
, corecapabilities
));
13916 DHD_ERROR(("reg:0x%x val:0x%x\n",
13917 (uint
)OFFSETOF(ca7regs_t
, corecapabilities
), val
));
13918 val
= R_REG(dhd
->osh
, ARM_CA7_REG(ca7regs
, corestatus
));
13919 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint
)OFFSETOF(ca7regs_t
, corestatus
), val
));
13920 val
= R_REG(dhd
->osh
, ARM_CA7_REG(ca7regs
, tracecontrol
));
13921 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint
)OFFSETOF(ca7regs_t
, tracecontrol
), val
));
13922 val
= R_REG(dhd
->osh
, ARM_CA7_REG(ca7regs
, clk_ctl_st
));
13923 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint
)OFFSETOF(ca7regs_t
, clk_ctl_st
), val
));
13924 val
= R_REG(dhd
->osh
, ARM_CA7_REG(ca7regs
, powerctl
));
13925 DHD_ERROR(("reg:0x%x val:0x%x\n", (uint
)OFFSETOF(ca7regs_t
, powerctl
), val
));
13927 #endif /* NOT_YET */
13929 DHD_ERROR(("%s: OOBR Reg\n", __FUNCTION__
));
13931 oob_base
= si_oobr_baseaddr(sih
, FALSE
);
13932 oob_base1
= si_oobr_baseaddr(sih
, TRUE
);
13934 dhd_sbreg_op(dhd
, oob_base
+ OOB_STATUSA
, &val
, TRUE
);
13935 dhd_sbreg_op(dhd
, oob_base
+ OOB_STATUSB
, &val
, TRUE
);
13936 dhd_sbreg_op(dhd
, oob_base
+ OOB_STATUSC
, &val
, TRUE
);
13937 dhd_sbreg_op(dhd
, oob_base
+ OOB_STATUSD
, &val
, TRUE
);
13938 } else if ((reg
= si_setcore(sih
, HND_OOBR_CORE_ID
, 0)) != NULL
) {
13939 val
= R_REG(dhd
->osh
, ®
->intstatus
[0]);
13940 DHD_ERROR(("reg: addr:%p val:0x%x\n", reg
, val
));
13941 val
= R_REG(dhd
->osh
, ®
->intstatus
[1]);
13942 DHD_ERROR(("reg: addr:%p val:0x%x\n", reg
, val
));
13943 val
= R_REG(dhd
->osh
, ®
->intstatus
[2]);
13944 DHD_ERROR(("reg: addr:%p val:0x%x\n", reg
, val
));
13945 val
= R_REG(dhd
->osh
, ®
->intstatus
[3]);
13946 DHD_ERROR(("reg: addr:%p val:0x%x\n", reg
, val
));
13950 DHD_ERROR(("%s: Second OOBR Reg\n", __FUNCTION__
));
13952 dhd_sbreg_op(dhd
, oob_base1
+ OOB_STATUSA
, &val
, TRUE
);
13953 dhd_sbreg_op(dhd
, oob_base1
+ OOB_STATUSB
, &val
, TRUE
);
13954 dhd_sbreg_op(dhd
, oob_base1
+ OOB_STATUSC
, &val
, TRUE
);
13955 dhd_sbreg_op(dhd
, oob_base1
+ OOB_STATUSD
, &val
, TRUE
);
13958 si_setcoreidx(dhd
->bus
->sih
, save_idx
);
13964 dhdpcie_hw_war_regdump(dhd_bus_t
*bus
)
13966 uint32 save_idx
, val
;
13967 volatile uint32
*reg
;
13969 save_idx
= si_coreidx(bus
->sih
);
13970 if ((reg
= si_setcore(bus
->sih
, CC_CORE_ID
, 0)) != NULL
) {
13971 val
= R_REG(bus
->osh
, reg
+ REG_WORK_AROUND
);
13972 DHD_ERROR(("CC HW_WAR :0x%x\n", val
));
13975 if ((reg
= si_setcore(bus
->sih
, ARMCR4_CORE_ID
, 0)) != NULL
) {
13976 val
= R_REG(bus
->osh
, reg
+ REG_WORK_AROUND
);
13977 DHD_ERROR(("ARM HW_WAR:0x%x\n", val
));
13980 if ((reg
= si_setcore(bus
->sih
, PCIE2_CORE_ID
, 0)) != NULL
) {
13981 val
= R_REG(bus
->osh
, reg
+ REG_WORK_AROUND
);
13982 DHD_ERROR(("PCIE HW_WAR :0x%x\n", val
));
13984 si_setcoreidx(bus
->sih
, save_idx
);
13986 val
= PMU_REG_NEW(bus
->sih
, min_res_mask
, 0, 0);
13987 DHD_ERROR(("MINRESMASK :0x%x\n", val
));
13991 dhd_pcie_dma_info_dump(dhd_pub_t
*dhd
)
13993 if (dhd
->bus
->is_linkdown
) {
13994 DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
13995 "due to PCIe link down ------- \r\n"));
13999 DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
14002 DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
14003 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x200, 0, 0),
14004 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x204, 0, 0)));
14005 DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
14006 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x208, 0, 0),
14007 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x20C, 0, 0)));
14008 DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
14009 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x210, 0, 0),
14010 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x214, 0, 0)));
14012 DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
14013 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x220, 0, 0),
14014 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x224, 0, 0)));
14015 DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
14016 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x228, 0, 0),
14017 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x22C, 0, 0)));
14018 DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
14019 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x230, 0, 0),
14020 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x234, 0, 0)));
14023 DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
14024 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x240, 0, 0),
14025 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x244, 0, 0)));
14026 DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
14027 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x248, 0, 0),
14028 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x24C, 0, 0)));
14029 DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
14030 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x250, 0, 0),
14031 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x254, 0, 0)));
14033 DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
14034 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x260, 0, 0),
14035 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x264, 0, 0)));
14036 DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
14037 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x268, 0, 0),
14038 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x26C, 0, 0)));
14039 DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
14040 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x270, 0, 0),
14041 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
, 0x274, 0, 0)));
14047 dhd_pcie_dump_int_regs(dhd_pub_t
*dhd
)
14049 uint32 intstatus
= 0;
14050 uint32 intmask
= 0;
14051 uint32 d2h_db0
= 0;
14052 uint32 d2h_mb_data
= 0;
14054 DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
14055 intstatus
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
14056 dhd
->bus
->pcie_mailbox_int
, 0, 0);
14057 if (intstatus
== (uint32
)-1) {
14058 DHD_ERROR(("intstatus=0x%x \n", intstatus
));
14062 intmask
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
14063 dhd
->bus
->pcie_mailbox_mask
, 0, 0);
14064 if (intmask
== (uint32
) -1) {
14065 DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus
, intmask
));
14069 d2h_db0
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
14070 PCID2H_MailBox
, 0, 0);
14071 if (d2h_db0
== (uint32
)-1) {
14072 DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
14073 intstatus
, intmask
, d2h_db0
));
14077 DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
14078 intstatus
, intmask
, d2h_db0
));
14079 dhd_bus_cmn_readshared(dhd
->bus
, &d2h_mb_data
, D2H_MB_DATA
, 0);
14080 DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data
,
14081 dhd
->bus
->def_intmask
));
14087 dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t
*dhd
)
14089 DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
14090 DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n",
14091 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
14092 PCIE_EXTCAP_AER_UCERR_OFFSET
, TRUE
, FALSE
, 0)));
14093 #ifdef EXTENDED_PCIE_DEBUG_DUMP
14094 DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n",
14095 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
14096 PCIE_EXTCAP_ERR_HEADER_LOG_0
, TRUE
, FALSE
, 0),
14097 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
14098 PCIE_EXTCAP_ERR_HEADER_LOG_1
, TRUE
, FALSE
, 0),
14099 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
14100 PCIE_EXTCAP_ERR_HEADER_LOG_2
, TRUE
, FALSE
, 0),
14101 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
14102 PCIE_EXTCAP_ERR_HEADER_LOG_3
, TRUE
, FALSE
, 0)));
14103 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
14106 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
14107 #define MAX_RC_REG_INFO_VAL 8
14108 #define PCIE_EXTCAP_ERR_HD_SZ 4
14110 dhd_dump_pcie_rc_regs_for_linkdown(dhd_pub_t
*dhd
, int *bytes_written
)
14115 /* dump link control & status */
14116 if (dhd
->hang_info_cnt
< HANG_FIELD_CNT_MAX
) {
14117 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
14118 *bytes_written
+= scnprintf(&dhd
->hang_info
[*bytes_written
], remain_len
, "%08x%c",
14119 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_CAP_ID_EXP
,
14120 PCIE_CAP_LINKCTRL_OFFSET
, FALSE
, FALSE
, 0), HANG_KEY_DEL
);
14121 dhd
->hang_info_cnt
++;
14124 /* dump device control & status */
14125 if (dhd
->hang_info_cnt
< HANG_FIELD_CNT_MAX
) {
14126 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
14127 *bytes_written
+= scnprintf(&dhd
->hang_info
[*bytes_written
], remain_len
, "%08x%c",
14128 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_CAP_ID_EXP
,
14129 PCIE_CAP_DEVCTRL_OFFSET
, FALSE
, FALSE
, 0), HANG_KEY_DEL
);
14130 dhd
->hang_info_cnt
++;
14133 /* dump uncorrectable error */
14134 if (dhd
->hang_info_cnt
< HANG_FIELD_CNT_MAX
) {
14135 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
14136 *bytes_written
+= scnprintf(&dhd
->hang_info
[*bytes_written
], remain_len
, "%08x%c",
14137 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
14138 PCIE_EXTCAP_AER_UCERR_OFFSET
, TRUE
, FALSE
, 0), HANG_KEY_DEL
);
14139 dhd
->hang_info_cnt
++;
14142 /* dump correctable error */
14143 if (dhd
->hang_info_cnt
< HANG_FIELD_CNT_MAX
) {
14144 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
14145 *bytes_written
+= scnprintf(&dhd
->hang_info
[*bytes_written
], remain_len
, "%08x%c",
14146 dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
14147 /* XXX: use definition in linux/pcie_regs.h */
14148 PCI_ERR_COR_STATUS
, TRUE
, FALSE
, 0), HANG_KEY_DEL
);
14149 dhd
->hang_info_cnt
++;
14152 /* HG05/06 reserved */
14153 if (dhd
->hang_info_cnt
< HANG_FIELD_CNT_MAX
) {
14154 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
14155 *bytes_written
+= scnprintf(&dhd
->hang_info
[*bytes_written
], remain_len
, "%08x%c",
14157 dhd
->hang_info_cnt
++;
14160 if (dhd
->hang_info_cnt
< HANG_FIELD_CNT_MAX
) {
14161 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
14162 *bytes_written
+= scnprintf(&dhd
->hang_info
[*bytes_written
], remain_len
, "%08x%c",
14164 dhd
->hang_info_cnt
++;
14167 /* dump error header log in RAW */
14168 for (i
= 0; i
< PCIE_EXTCAP_ERR_HD_SZ
; i
++) {
14169 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
14170 *bytes_written
+= scnprintf(&dhd
->hang_info
[*bytes_written
], remain_len
,
14171 "%c%08x", HANG_RAW_DEL
, dhdpcie_rc_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
14172 PCIE_EXTCAP_ERR_HEADER_LOG_0
+ i
* PCIE_EXTCAP_ERR_HD_SZ
,
14175 dhd
->hang_info_cnt
++;
14177 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
14180 dhd_pcie_debug_info_dump(dhd_pub_t
*dhd
)
14182 int host_irq_disabled
;
14184 DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd
->bus
->bus_low_power_state
));
14185 host_irq_disabled
= dhdpcie_irq_disabled(dhd
->bus
);
14186 DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled
));
14187 dhd_print_tasklet_status(dhd
);
14188 dhd_pcie_intr_count_dump(dhd
);
14190 DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
14191 dhdpcie_dump_resource(dhd
->bus
);
14193 dhd_pcie_dump_rc_conf_space_cap(dhd
);
14195 DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
14196 dhd_debug_get_rc_linkcap(dhd
->bus
)));
14197 #ifdef CUSTOMER_HW4_DEBUG
14198 if (dhd
->bus
->is_linkdown
) {
14199 DHD_ERROR(("Skip dumping the PCIe Config and Core registers. "
14200 "link may be DOWN\n"));
14203 #endif /* CUSTOMER_HW4_DEBUG */
14204 DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n"));
14205 /* XXX: hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/CurrentPcieGen2ProgramGuide */
14206 dhd_bus_dump_imp_cfg_registers(dhd
->bus
);
14207 #ifdef EXTENDED_PCIE_DEBUG_DUMP
14208 DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
14209 dhdpcie_ep_access_cap(dhd
->bus
, PCIE_EXTCAP_ID_ERR
,
14210 PCIE_EXTCAP_AER_UCERR_OFFSET
, TRUE
, FALSE
, 0)));
14211 DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
14212 "hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1
,
14213 dhd_pcie_config_read(dhd
->bus
, PCI_TLP_HDR_LOG1
, sizeof(uint32
)),
14215 dhd_pcie_config_read(dhd
->bus
, PCI_TLP_HDR_LOG2
, sizeof(uint32
)),
14217 dhd_pcie_config_read(dhd
->bus
, PCI_TLP_HDR_LOG3
, sizeof(uint32
)),
14219 dhd_pcie_config_read(dhd
->bus
, PCI_TLP_HDR_LOG4
, sizeof(uint32
))));
14220 if (dhd
->bus
->sih
->buscorerev
>= 24) {
14221 DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
14222 "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL
,
14223 dhd_pcie_config_read(dhd
->bus
, PCIECFGREG_DEV_STATUS_CTRL
,
14224 sizeof(uint32
)), PCIE_CFG_SUBSYSTEM_CONTROL
,
14225 dhd_pcie_config_read(dhd
->bus
, PCIE_CFG_SUBSYSTEM_CONTROL
,
14226 sizeof(uint32
)), PCIECFGREG_PML1_SUB_CTRL2
,
14227 dhd_pcie_config_read(dhd
->bus
, PCIECFGREG_PML1_SUB_CTRL2
,
14229 dhd_bus_dump_dar_registers(dhd
->bus
);
14231 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
14233 if (dhd
->bus
->is_linkdown
) {
14234 DHD_ERROR(("Skip dumping the PCIe Core registers. link may be DOWN\n"));
14238 if (MULTIBP_ENAB(dhd
->bus
->sih
)) {
14239 dhd_bus_pcie_pwr_req(dhd
->bus
);
14242 DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n"));
14243 /* XXX: hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/
14244 * CurrentPcieGen2ProgramGuide/pcie_ep.htm
14247 DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
14248 "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0
,
14249 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_DBG_CLKREQ0
),
14250 PCIECFGREG_PHY_DBG_CLKREQ1
,
14251 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_DBG_CLKREQ1
),
14252 PCIECFGREG_PHY_DBG_CLKREQ2
,
14253 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_DBG_CLKREQ2
),
14254 PCIECFGREG_PHY_DBG_CLKREQ3
,
14255 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_DBG_CLKREQ3
)));
14257 #ifdef EXTENDED_PCIE_DEBUG_DUMP
14258 if (dhd
->bus
->sih
->buscorerev
>= 24) {
14260 DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x "
14261 "ltssm_hist_2(0x%x)=0x%x "
14262 "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0
,
14263 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_LTSSM_HIST_0
),
14264 PCIECFGREG_PHY_LTSSM_HIST_1
,
14265 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_LTSSM_HIST_1
),
14266 PCIECFGREG_PHY_LTSSM_HIST_2
,
14267 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_LTSSM_HIST_2
),
14268 PCIECFGREG_PHY_LTSSM_HIST_3
,
14269 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_PHY_LTSSM_HIST_3
)));
14271 DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n",
14273 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_TREFUP
),
14274 PCIECFGREG_TREFUP_EXT
,
14275 dhd_pcie_corereg_read(dhd
->bus
->sih
, PCIECFGREG_TREFUP_EXT
)));
14276 DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x "
14277 "Function_Intstatus(0x%x)=0x%x "
14278 "Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x "
14279 "Power_Intmask(0x%x)=0x%x\n",
14280 PCIE_CORE_REG_ERRLOG
,
14281 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
14282 PCIE_CORE_REG_ERRLOG
, 0, 0),
14283 PCIE_CORE_REG_ERR_ADDR
,
14284 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
14285 PCIE_CORE_REG_ERR_ADDR
, 0, 0),
14286 PCIFunctionIntstatus(dhd
->bus
->sih
->buscorerev
),
14287 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
14288 PCIFunctionIntstatus(dhd
->bus
->sih
->buscorerev
), 0, 0),
14289 PCIFunctionIntmask(dhd
->bus
->sih
->buscorerev
),
14290 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
14291 PCIFunctionIntmask(dhd
->bus
->sih
->buscorerev
), 0, 0),
14292 PCIPowerIntstatus(dhd
->bus
->sih
->buscorerev
),
14293 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
14294 PCIPowerIntstatus(dhd
->bus
->sih
->buscorerev
), 0, 0),
14295 PCIPowerIntmask(dhd
->bus
->sih
->buscorerev
),
14296 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
14297 PCIPowerIntmask(dhd
->bus
->sih
->buscorerev
), 0, 0)));
14298 DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x "
14299 "err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n",
14300 (uint
)OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg1
),
14301 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
14302 OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg1
), 0, 0),
14303 (uint
)OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg2
),
14304 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
14305 OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg2
), 0, 0),
14306 (uint
)OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg3
),
14307 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
14308 OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg3
), 0, 0),
14309 (uint
)OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg4
),
14310 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
14311 OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_hdr_logreg4
), 0, 0)));
14312 DHD_ERROR(("err_code(0x%x)=0x%x\n",
14313 (uint
)OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_code_logreg
),
14314 si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
14315 OFFSETOF(sbpcieregs_t
, u
.pcie2
.err_code_logreg
), 0, 0)));
14317 dhd_pcie_dump_wrapper_regs(dhd
);
14318 dhdpcie_hw_war_regdump(dhd
->bus
);
14320 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
14322 dhd_pcie_dma_info_dump(dhd
);
14324 if (MULTIBP_ENAB(dhd
->bus
->sih
)) {
14325 dhd_bus_pcie_pwr_req_clear(dhd
->bus
);
14332 dhd_bus_force_bt_quiesce_enabled(struct dhd_bus
*bus
)
14334 return bus
->force_bt_quiesce
;
14338 dhd_d11_slices_num_get(dhd_pub_t
*dhdp
)
14340 return si_scan_core_present(dhdp
->bus
->sih
) ?
14341 MAX_NUM_D11_CORES_WITH_SCAN
: MAX_NUM_D11CORES
;
14345 dhd_bus_tcm_test(struct dhd_bus
*bus
)
14348 int size
; /* Full mem size */
14349 int start
; /* Start address */
14350 int read_size
= 0; /* Read size of each iteration */
14352 uint8
*read_buf
, *write_buf
;
14353 uint8 init_val
[NUM_PATTERNS
] = {
14354 0xFFu
, /* 11111111 */
14355 0x00u
, /* 00000000 */
14359 DHD_ERROR(("%s: bus is NULL !\n", __FUNCTION__
));
14363 read_buf
= MALLOCZ(bus
->dhd
->osh
, MEMBLOCK
);
14366 DHD_ERROR(("%s: MALLOC of read_buf failed\n", __FUNCTION__
));
14370 write_buf
= MALLOCZ(bus
->dhd
->osh
, MEMBLOCK
);
14373 MFREE(bus
->dhd
->osh
, read_buf
, MEMBLOCK
);
14374 DHD_ERROR(("%s: MALLOC of write_buf failed\n", __FUNCTION__
));
14378 DHD_ERROR(("%s: start %x, size: %x\n", __FUNCTION__
, bus
->dongle_ram_base
, bus
->ramsize
));
14379 DHD_ERROR(("%s: memblock size %d, #pattern %d\n", __FUNCTION__
, MEMBLOCK
, NUM_PATTERNS
));
14381 while (num
< NUM_PATTERNS
) {
14382 start
= bus
->dongle_ram_base
;
14383 /* Get full mem size */
14384 size
= bus
->ramsize
;
14386 memset(write_buf
, init_val
[num
], MEMBLOCK
);
14388 read_size
= MIN(MEMBLOCK
, size
);
14389 memset(read_buf
, 0, read_size
);
14392 if ((ret
= dhdpcie_bus_membytes(bus
, TRUE
, start
, write_buf
, read_size
))) {
14393 DHD_ERROR(("%s: Write Error membytes %d\n", __FUNCTION__
, ret
));
14394 MFREE(bus
->dhd
->osh
, read_buf
, MEMBLOCK
);
14395 MFREE(bus
->dhd
->osh
, write_buf
, MEMBLOCK
);
14400 if ((ret
= dhdpcie_bus_membytes(bus
, FALSE
, start
, read_buf
, read_size
))) {
14401 DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__
, ret
));
14402 MFREE(bus
->dhd
->osh
, read_buf
, MEMBLOCK
);
14403 MFREE(bus
->dhd
->osh
, write_buf
, MEMBLOCK
);
14408 if (memcmp(read_buf
, write_buf
, read_size
)) {
14409 DHD_ERROR(("%s: Mismatch at %x, iter : %d\n",
14410 __FUNCTION__
, start
, num
));
14411 prhex("Readbuf", read_buf
, read_size
);
14412 prhex("Writebuf", write_buf
, read_size
);
14413 MFREE(bus
->dhd
->osh
, read_buf
, MEMBLOCK
);
14414 MFREE(bus
->dhd
->osh
, write_buf
, MEMBLOCK
);
14418 /* Decrement size and increment start address */
14420 start
+= read_size
;
14425 MFREE(bus
->dhd
->osh
, read_buf
, MEMBLOCK
);
14426 MFREE(bus
->dhd
->osh
, write_buf
, MEMBLOCK
);
14428 DHD_ERROR(("%s: Success iter : %d\n", __FUNCTION__
, num
));
14432 #define PCI_CFG_LINK_SPEED_SHIFT 16
14434 dhd_get_pcie_linkspeed(dhd_pub_t
*dhd
)
14437 uint32 pcie_lnkspeed
;
14438 pcie_lnkst
= OSL_PCI_READ_CONFIG(dhd
->osh
, PCIECFGREG_LINK_STATUS_CTRL
,
14439 sizeof(pcie_lnkst
));
14441 pcie_lnkspeed
= (pcie_lnkst
>> PCI_CFG_LINK_SPEED_SHIFT
) & PCI_LINK_SPEED_MASK
;
14442 DHD_INFO(("%s: Link speed: %d\n", __FUNCTION__
, pcie_lnkspeed
));
14443 return pcie_lnkspeed
;